1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-attr.h"
44 #include "diagnostic-core.h"
46 #include "integrate.h"
49 #include "target-def.h"
50 #include "common/common-target.h"
52 #include "langhooks.h"
53 #include "splay-tree.h"
54 #include "cfglayout.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58 #include "tm-constrs.h"
63 /* Specify which cpu to schedule for. */
64 enum processor_type alpha_tune;
66 /* Which cpu we're generating code for. */
67 enum processor_type alpha_cpu;
69 static const char * const alpha_cpu_name[] =
74 /* Specify how accurate floating-point traps need to be. */
76 enum alpha_trap_precision alpha_tp;
78 /* Specify the floating-point rounding mode. */
80 enum alpha_fp_rounding_mode alpha_fprm;
82 /* Specify which things cause traps. */
84 enum alpha_fp_trap_mode alpha_fptm;
86 /* Nonzero if inside of a function, because the Alpha asm can't
87 handle .files inside of functions. */
89 static int inside_function = FALSE;
91 /* The number of cycles of latency we should assume on memory reads. */
93 int alpha_memory_latency = 3;
95 /* Whether the function needs the GP. */
97 static int alpha_function_needs_gp;
99 /* The assembler name of the current function. */
101 static const char *alpha_fnname;
103 /* The next explicit relocation sequence number. */
104 extern GTY(()) int alpha_next_sequence_number;
105 int alpha_next_sequence_number = 1;
107 /* The literal and gpdisp sequence numbers for this insn, as printed
108 by %# and %* respectively. */
109 extern GTY(()) int alpha_this_literal_sequence_number;
110 extern GTY(()) int alpha_this_gpdisp_sequence_number;
111 int alpha_this_literal_sequence_number;
112 int alpha_this_gpdisp_sequence_number;
114 /* Costs of various operations on the different architectures. */
116 struct alpha_rtx_cost_data
118 unsigned char fp_add;
119 unsigned char fp_mult;
120 unsigned char fp_div_sf;
121 unsigned char fp_div_df;
122 unsigned char int_mult_si;
123 unsigned char int_mult_di;
124 unsigned char int_shift;
125 unsigned char int_cmov;
126 unsigned short int_div;
129 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
132 COSTS_N_INSNS (6), /* fp_add */
133 COSTS_N_INSNS (6), /* fp_mult */
134 COSTS_N_INSNS (34), /* fp_div_sf */
135 COSTS_N_INSNS (63), /* fp_div_df */
136 COSTS_N_INSNS (23), /* int_mult_si */
137 COSTS_N_INSNS (23), /* int_mult_di */
138 COSTS_N_INSNS (2), /* int_shift */
139 COSTS_N_INSNS (2), /* int_cmov */
140 COSTS_N_INSNS (97), /* int_div */
143 COSTS_N_INSNS (4), /* fp_add */
144 COSTS_N_INSNS (4), /* fp_mult */
145 COSTS_N_INSNS (15), /* fp_div_sf */
146 COSTS_N_INSNS (22), /* fp_div_df */
147 COSTS_N_INSNS (8), /* int_mult_si */
148 COSTS_N_INSNS (12), /* int_mult_di */
149 COSTS_N_INSNS (1) + 1, /* int_shift */
150 COSTS_N_INSNS (1), /* int_cmov */
151 COSTS_N_INSNS (83), /* int_div */
154 COSTS_N_INSNS (4), /* fp_add */
155 COSTS_N_INSNS (4), /* fp_mult */
156 COSTS_N_INSNS (12), /* fp_div_sf */
157 COSTS_N_INSNS (15), /* fp_div_df */
158 COSTS_N_INSNS (7), /* int_mult_si */
159 COSTS_N_INSNS (7), /* int_mult_di */
160 COSTS_N_INSNS (1), /* int_shift */
161 COSTS_N_INSNS (2), /* int_cmov */
162 COSTS_N_INSNS (86), /* int_div */
166 /* Similar but tuned for code size instead of execution latency. The
167 extra +N is fractional cost tuning based on latency. It's used to
168 encourage use of cheaper insns like shift, but only if there's just
171 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
173 COSTS_N_INSNS (1), /* fp_add */
174 COSTS_N_INSNS (1), /* fp_mult */
175 COSTS_N_INSNS (1), /* fp_div_sf */
176 COSTS_N_INSNS (1) + 1, /* fp_div_df */
177 COSTS_N_INSNS (1) + 1, /* int_mult_si */
178 COSTS_N_INSNS (1) + 2, /* int_mult_di */
179 COSTS_N_INSNS (1), /* int_shift */
180 COSTS_N_INSNS (1), /* int_cmov */
181 COSTS_N_INSNS (6), /* int_div */
184 /* Get the number of args of a function in one of two ways. */
185 #if TARGET_ABI_OPEN_VMS
186 #define NUM_ARGS crtl->args.info.num_args
188 #define NUM_ARGS crtl->args.info
194 /* Declarations of static functions. */
195 static struct machine_function *alpha_init_machine_status (void);
196 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
198 #if TARGET_ABI_OPEN_VMS
199 static void alpha_write_linkage (FILE *, const char *);
200 static bool vms_valid_pointer_mode (enum machine_mode);
202 #define vms_patch_builtins() gcc_unreachable()
205 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
206 /* Implement TARGET_MANGLE_TYPE. */
209 alpha_mangle_type (const_tree type)
211 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
212 && TARGET_LONG_DOUBLE_128)
215 /* For all other types, use normal C++ mangling. */
220 /* Parse target option strings. */
223 alpha_option_override (void)
225 static const struct cpu_table {
226 const char *const name;
227 const enum processor_type processor;
230 { "ev4", PROCESSOR_EV4, 0 },
231 { "ev45", PROCESSOR_EV4, 0 },
232 { "21064", PROCESSOR_EV4, 0 },
233 { "ev5", PROCESSOR_EV5, 0 },
234 { "21164", PROCESSOR_EV5, 0 },
235 { "ev56", PROCESSOR_EV5, MASK_BWX },
236 { "21164a", PROCESSOR_EV5, MASK_BWX },
237 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
238 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
239 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
240 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
241 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
242 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
243 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX }
246 int const ct_size = ARRAY_SIZE (cpu_table);
249 #ifdef SUBTARGET_OVERRIDE_OPTIONS
250 SUBTARGET_OVERRIDE_OPTIONS;
253 /* Default to full IEEE compliance mode for Go language. */
254 if (strcmp (lang_hooks.name, "GNU Go") == 0
255 && !(target_flags_explicit & MASK_IEEE))
256 target_flags |= MASK_IEEE;
258 alpha_fprm = ALPHA_FPRM_NORM;
259 alpha_tp = ALPHA_TP_PROG;
260 alpha_fptm = ALPHA_FPTM_N;
264 alpha_tp = ALPHA_TP_INSN;
265 alpha_fptm = ALPHA_FPTM_SU;
267 if (TARGET_IEEE_WITH_INEXACT)
269 alpha_tp = ALPHA_TP_INSN;
270 alpha_fptm = ALPHA_FPTM_SUI;
275 if (! strcmp (alpha_tp_string, "p"))
276 alpha_tp = ALPHA_TP_PROG;
277 else if (! strcmp (alpha_tp_string, "f"))
278 alpha_tp = ALPHA_TP_FUNC;
279 else if (! strcmp (alpha_tp_string, "i"))
280 alpha_tp = ALPHA_TP_INSN;
282 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
285 if (alpha_fprm_string)
287 if (! strcmp (alpha_fprm_string, "n"))
288 alpha_fprm = ALPHA_FPRM_NORM;
289 else if (! strcmp (alpha_fprm_string, "m"))
290 alpha_fprm = ALPHA_FPRM_MINF;
291 else if (! strcmp (alpha_fprm_string, "c"))
292 alpha_fprm = ALPHA_FPRM_CHOP;
293 else if (! strcmp (alpha_fprm_string,"d"))
294 alpha_fprm = ALPHA_FPRM_DYN;
296 error ("bad value %qs for -mfp-rounding-mode switch",
300 if (alpha_fptm_string)
302 if (strcmp (alpha_fptm_string, "n") == 0)
303 alpha_fptm = ALPHA_FPTM_N;
304 else if (strcmp (alpha_fptm_string, "u") == 0)
305 alpha_fptm = ALPHA_FPTM_U;
306 else if (strcmp (alpha_fptm_string, "su") == 0)
307 alpha_fptm = ALPHA_FPTM_SU;
308 else if (strcmp (alpha_fptm_string, "sui") == 0)
309 alpha_fptm = ALPHA_FPTM_SUI;
311 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
314 if (alpha_cpu_string)
316 for (i = 0; i < ct_size; i++)
317 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
319 alpha_tune = alpha_cpu = cpu_table [i].processor;
320 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
321 target_flags |= cpu_table [i].flags;
325 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
328 if (alpha_tune_string)
330 for (i = 0; i < ct_size; i++)
331 if (! strcmp (alpha_tune_string, cpu_table [i].name))
333 alpha_tune = cpu_table [i].processor;
337 error ("bad value %qs for -mtune switch", alpha_tune_string);
340 /* Do some sanity checks on the above options. */
342 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
343 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
345 warning (0, "fp software completion requires -mtrap-precision=i");
346 alpha_tp = ALPHA_TP_INSN;
349 if (alpha_cpu == PROCESSOR_EV6)
351 /* Except for EV6 pass 1 (not released), we always have precise
352 arithmetic traps. Which means we can do software completion
353 without minding trap shadows. */
354 alpha_tp = ALPHA_TP_PROG;
357 if (TARGET_FLOAT_VAX)
359 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
361 warning (0, "rounding mode not supported for VAX floats");
362 alpha_fprm = ALPHA_FPRM_NORM;
364 if (alpha_fptm == ALPHA_FPTM_SUI)
366 warning (0, "trap mode not supported for VAX floats");
367 alpha_fptm = ALPHA_FPTM_SU;
369 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
370 warning (0, "128-bit long double not supported for VAX floats");
371 target_flags &= ~MASK_LONG_DOUBLE_128;
378 if (!alpha_mlat_string)
379 alpha_mlat_string = "L1";
381 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
382 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
384 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
385 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
386 && alpha_mlat_string[2] == '\0')
388 static int const cache_latency[][4] =
390 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
391 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
392 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
395 lat = alpha_mlat_string[1] - '0';
396 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
398 warning (0, "L%d cache latency unknown for %s",
399 lat, alpha_cpu_name[alpha_tune]);
403 lat = cache_latency[alpha_tune][lat-1];
405 else if (! strcmp (alpha_mlat_string, "main"))
407 /* Most current memories have about 370ns latency. This is
408 a reasonable guess for a fast cpu. */
413 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
417 alpha_memory_latency = lat;
420 /* Default the definition of "small data" to 8 bytes. */
421 if (!global_options_set.x_g_switch_value)
424 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
426 target_flags |= MASK_SMALL_DATA;
427 else if (flag_pic == 2)
428 target_flags &= ~MASK_SMALL_DATA;
430 /* Align labels and loops for optimal branching. */
431 /* ??? Kludge these by not doing anything if we don't optimize and also if
432 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
433 if (optimize > 0 && write_symbols != SDB_DEBUG)
435 if (align_loops <= 0)
437 if (align_jumps <= 0)
440 if (align_functions <= 0)
441 align_functions = 16;
443 /* Register variables and functions with the garbage collector. */
445 /* Set up function hooks. */
446 init_machine_status = alpha_init_machine_status;
448 /* Tell the compiler when we're using VAX floating point. */
449 if (TARGET_FLOAT_VAX)
451 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
452 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
453 REAL_MODE_FORMAT (TFmode) = NULL;
456 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
457 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
458 target_flags |= MASK_LONG_DOUBLE_128;
462 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
465 zap_mask (HOST_WIDE_INT value)
469 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
471 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
477 /* Return true if OP is valid for a particular TLS relocation.
478 We are already guaranteed that OP is a CONST. */
481 tls_symbolic_operand_1 (rtx op, int size, int unspec)
485 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
487 op = XVECEXP (op, 0, 0);
489 if (GET_CODE (op) != SYMBOL_REF)
492 switch (SYMBOL_REF_TLS_MODEL (op))
494 case TLS_MODEL_LOCAL_DYNAMIC:
495 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
496 case TLS_MODEL_INITIAL_EXEC:
497 return unspec == UNSPEC_TPREL && size == 64;
498 case TLS_MODEL_LOCAL_EXEC:
499 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
505 /* Used by aligned_memory_operand and unaligned_memory_operand to
506 resolve what reload is going to do with OP if it's a register. */
509 resolve_reload_operand (rtx op)
511 if (reload_in_progress)
514 if (GET_CODE (tmp) == SUBREG)
515 tmp = SUBREG_REG (tmp);
517 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
519 op = reg_equiv_memory_loc (REGNO (tmp));
527 /* The scalar modes supported differs from the default check-what-c-supports
528 version in that sometimes TFmode is available even when long double
529 indicates only DFmode. */
532 alpha_scalar_mode_supported_p (enum machine_mode mode)
540 case TImode: /* via optabs.c */
548 return TARGET_HAS_XFLOATING_LIBS;
555 /* Alpha implements a couple of integer vector mode operations when
556 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
557 which allows the vectorizer to operate on e.g. move instructions,
558 or when expand_vector_operations can do something useful. */
561 alpha_vector_mode_supported_p (enum machine_mode mode)
563 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
566 /* Return 1 if this function can directly return via $26. */
571 return (TARGET_ABI_OSF
573 && alpha_sa_size () == 0
574 && get_frame_size () == 0
575 && crtl->outgoing_args_size == 0
576 && crtl->args.pretend_args_size == 0);
579 /* Return the TLS model to use for SYMBOL. */
581 static enum tls_model
582 tls_symbolic_operand_type (rtx symbol)
584 enum tls_model model;
586 if (GET_CODE (symbol) != SYMBOL_REF)
587 return TLS_MODEL_NONE;
588 model = SYMBOL_REF_TLS_MODEL (symbol);
590 /* Local-exec with a 64-bit size is the same code as initial-exec. */
591 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
592 model = TLS_MODEL_INITIAL_EXEC;
597 /* Return true if the function DECL will share the same GP as any
598 function in the current unit of translation. */
601 decl_has_samegp (const_tree decl)
603 /* Functions that are not local can be overridden, and thus may
604 not share the same gp. */
605 if (!(*targetm.binds_local_p) (decl))
608 /* If -msmall-data is in effect, assume that there is only one GP
609 for the module, and so any local symbol has this property. We
610 need explicit relocations to be able to enforce this for symbols
611 not defined in this unit of translation, however. */
612 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
615 /* Functions that are not external are defined in this UoT. */
616 /* ??? Irritatingly, static functions not yet emitted are still
617 marked "external". Apply this to non-static functions only. */
618 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
621 /* Return true if EXP should be placed in the small data section. */
624 alpha_in_small_data_p (const_tree exp)
626 /* We want to merge strings, so we never consider them small data. */
627 if (TREE_CODE (exp) == STRING_CST)
630 /* Functions are never in the small data area. Duh. */
631 if (TREE_CODE (exp) == FUNCTION_DECL)
634 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
636 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
637 if (strcmp (section, ".sdata") == 0
638 || strcmp (section, ".sbss") == 0)
643 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
645 /* If this is an incomplete type with size 0, then we can't put it
646 in sdata because it might be too big when completed. */
647 if (size > 0 && size <= g_switch_value)
654 #if TARGET_ABI_OPEN_VMS
656 vms_valid_pointer_mode (enum machine_mode mode)
658 return (mode == SImode || mode == DImode);
662 alpha_linkage_symbol_p (const char *symname)
664 int symlen = strlen (symname);
667 return strcmp (&symname [symlen - 4], "..lk") == 0;
672 #define LINKAGE_SYMBOL_REF_P(X) \
673 ((GET_CODE (X) == SYMBOL_REF \
674 && alpha_linkage_symbol_p (XSTR (X, 0))) \
675 || (GET_CODE (X) == CONST \
676 && GET_CODE (XEXP (X, 0)) == PLUS \
677 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
678 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
681 /* legitimate_address_p recognizes an RTL expression that is a valid
682 memory address for an instruction. The MODE argument is the
683 machine mode for the MEM expression that wants to use this address.
685 For Alpha, we have either a constant address or the sum of a
686 register and a constant address, or just a register. For DImode,
687 any of those forms can be surrounded with an AND that clear the
688 low-order three bits; this is an "unaligned" access. */
691 alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
693 /* If this is an ldq_u type address, discard the outer AND. */
695 && GET_CODE (x) == AND
696 && CONST_INT_P (XEXP (x, 1))
697 && INTVAL (XEXP (x, 1)) == -8)
700 /* Discard non-paradoxical subregs. */
701 if (GET_CODE (x) == SUBREG
702 && (GET_MODE_SIZE (GET_MODE (x))
703 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
706 /* Unadorned general registers are valid. */
709 ? STRICT_REG_OK_FOR_BASE_P (x)
710 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
713 /* Constant addresses (i.e. +/- 32k) are valid. */
714 if (CONSTANT_ADDRESS_P (x))
717 #if TARGET_ABI_OPEN_VMS
718 if (LINKAGE_SYMBOL_REF_P (x))
722 /* Register plus a small constant offset is valid. */
723 if (GET_CODE (x) == PLUS)
725 rtx ofs = XEXP (x, 1);
728 /* Discard non-paradoxical subregs. */
729 if (GET_CODE (x) == SUBREG
730 && (GET_MODE_SIZE (GET_MODE (x))
731 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
737 && NONSTRICT_REG_OK_FP_BASE_P (x)
738 && CONST_INT_P (ofs))
741 ? STRICT_REG_OK_FOR_BASE_P (x)
742 : NONSTRICT_REG_OK_FOR_BASE_P (x))
743 && CONSTANT_ADDRESS_P (ofs))
748 /* If we're managing explicit relocations, LO_SUM is valid, as are small
749 data symbols. Avoid explicit relocations of modes larger than word
750 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
751 else if (TARGET_EXPLICIT_RELOCS
752 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
754 if (small_symbolic_operand (x, Pmode))
757 if (GET_CODE (x) == LO_SUM)
759 rtx ofs = XEXP (x, 1);
762 /* Discard non-paradoxical subregs. */
763 if (GET_CODE (x) == SUBREG
764 && (GET_MODE_SIZE (GET_MODE (x))
765 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
768 /* Must have a valid base register. */
771 ? STRICT_REG_OK_FOR_BASE_P (x)
772 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
775 /* The symbol must be local. */
776 if (local_symbolic_operand (ofs, Pmode)
777 || dtp32_symbolic_operand (ofs, Pmode)
778 || tp32_symbolic_operand (ofs, Pmode))
786 /* Build the SYMBOL_REF for __tls_get_addr. */
788 static GTY(()) rtx tls_get_addr_libfunc;
791 get_tls_get_addr (void)
793 if (!tls_get_addr_libfunc)
794 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
795 return tls_get_addr_libfunc;
798 /* Try machine-dependent ways of modifying an illegitimate address
799 to be legitimate. If we find one, return the new, valid address. */
802 alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
804 HOST_WIDE_INT addend;
806 /* If the address is (plus reg const_int) and the CONST_INT is not a
807 valid offset, compute the high part of the constant and add it to
808 the register. Then our address is (plus temp low-part-const). */
809 if (GET_CODE (x) == PLUS
810 && REG_P (XEXP (x, 0))
811 && CONST_INT_P (XEXP (x, 1))
812 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
814 addend = INTVAL (XEXP (x, 1));
819 /* If the address is (const (plus FOO const_int)), find the low-order
820 part of the CONST_INT. Then load FOO plus any high-order part of the
821 CONST_INT into a register. Our address is (plus reg low-part-const).
822 This is done to reduce the number of GOT entries. */
823 if (can_create_pseudo_p ()
824 && GET_CODE (x) == CONST
825 && GET_CODE (XEXP (x, 0)) == PLUS
826 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
828 addend = INTVAL (XEXP (XEXP (x, 0), 1));
829 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
833 /* If we have a (plus reg const), emit the load as in (2), then add
834 the two registers, and finally generate (plus reg low-part-const) as
836 if (can_create_pseudo_p ()
837 && GET_CODE (x) == PLUS
838 && REG_P (XEXP (x, 0))
839 && GET_CODE (XEXP (x, 1)) == CONST
840 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
841 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
843 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
844 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
845 XEXP (XEXP (XEXP (x, 1), 0), 0),
846 NULL_RTX, 1, OPTAB_LIB_WIDEN);
850 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
851 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
852 around +/- 32k offset. */
853 if (TARGET_EXPLICIT_RELOCS
854 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
855 && symbolic_operand (x, Pmode))
857 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
859 switch (tls_symbolic_operand_type (x))
864 case TLS_MODEL_GLOBAL_DYNAMIC:
867 r0 = gen_rtx_REG (Pmode, 0);
868 r16 = gen_rtx_REG (Pmode, 16);
869 tga = get_tls_get_addr ();
870 dest = gen_reg_rtx (Pmode);
871 seq = GEN_INT (alpha_next_sequence_number++);
873 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
874 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
875 insn = emit_call_insn (insn);
876 RTL_CONST_CALL_P (insn) = 1;
877 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
882 emit_libcall_block (insn, dest, r0, x);
885 case TLS_MODEL_LOCAL_DYNAMIC:
888 r0 = gen_rtx_REG (Pmode, 0);
889 r16 = gen_rtx_REG (Pmode, 16);
890 tga = get_tls_get_addr ();
891 scratch = gen_reg_rtx (Pmode);
892 seq = GEN_INT (alpha_next_sequence_number++);
894 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
895 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
896 insn = emit_call_insn (insn);
897 RTL_CONST_CALL_P (insn) = 1;
898 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
903 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
905 emit_libcall_block (insn, scratch, r0, eqv);
907 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
908 eqv = gen_rtx_CONST (Pmode, eqv);
910 if (alpha_tls_size == 64)
912 dest = gen_reg_rtx (Pmode);
913 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
914 emit_insn (gen_adddi3 (dest, dest, scratch));
917 if (alpha_tls_size == 32)
919 insn = gen_rtx_HIGH (Pmode, eqv);
920 insn = gen_rtx_PLUS (Pmode, scratch, insn);
921 scratch = gen_reg_rtx (Pmode);
922 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
924 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
926 case TLS_MODEL_INITIAL_EXEC:
927 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
928 eqv = gen_rtx_CONST (Pmode, eqv);
929 tp = gen_reg_rtx (Pmode);
930 scratch = gen_reg_rtx (Pmode);
931 dest = gen_reg_rtx (Pmode);
933 emit_insn (gen_load_tp (tp));
934 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
935 emit_insn (gen_adddi3 (dest, tp, scratch));
938 case TLS_MODEL_LOCAL_EXEC:
939 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
940 eqv = gen_rtx_CONST (Pmode, eqv);
941 tp = gen_reg_rtx (Pmode);
943 emit_insn (gen_load_tp (tp));
944 if (alpha_tls_size == 32)
946 insn = gen_rtx_HIGH (Pmode, eqv);
947 insn = gen_rtx_PLUS (Pmode, tp, insn);
948 tp = gen_reg_rtx (Pmode);
949 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
951 return gen_rtx_LO_SUM (Pmode, tp, eqv);
957 if (local_symbolic_operand (x, Pmode))
959 if (small_symbolic_operand (x, Pmode))
963 if (can_create_pseudo_p ())
964 scratch = gen_reg_rtx (Pmode);
965 emit_insn (gen_rtx_SET (VOIDmode, scratch,
966 gen_rtx_HIGH (Pmode, x)));
967 return gen_rtx_LO_SUM (Pmode, scratch, x);
976 HOST_WIDE_INT low, high;
978 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
980 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
984 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
985 (!can_create_pseudo_p () ? scratch : NULL_RTX),
988 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
989 (!can_create_pseudo_p () ? scratch : NULL_RTX),
992 return plus_constant (x, low);
997 /* Try machine-dependent ways of modifying an illegitimate address
998 to be legitimate. Return X or the new, valid address. */
1001 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1002 enum machine_mode mode)
1004 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1005 return new_x ? new_x : x;
1008 /* Primarily this is required for TLS symbols, but given that our move
1009 patterns *ought* to be able to handle any symbol at any time, we
1010 should never be spilling symbolic operands to the constant pool, ever. */
1013 alpha_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1015 enum rtx_code code = GET_CODE (x);
1016 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1019 /* We do not allow indirect calls to be optimized into sibling calls, nor
1020 can we allow a call to a function with a different GP to be optimized
1024 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1026 /* Can't do indirect tail calls, since we don't know if the target
1027 uses the same GP. */
1031 /* Otherwise, we can make a tail call if the target function shares
1033 return decl_has_samegp (decl);
1037 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1041 /* Don't re-split. */
1042 if (GET_CODE (x) == LO_SUM)
1045 return small_symbolic_operand (x, Pmode) != 0;
1049 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1053 /* Don't re-split. */
1054 if (GET_CODE (x) == LO_SUM)
1057 if (small_symbolic_operand (x, Pmode))
1059 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1068 split_small_symbolic_operand (rtx x)
1071 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1075 /* Indicate that INSN cannot be duplicated. This is true for any insn
1076 that we've marked with gpdisp relocs, since those have to stay in
1077 1-1 correspondence with one another.
1079 Technically we could copy them if we could set up a mapping from one
1080 sequence number to another, across the set of insns to be duplicated.
1081 This seems overly complicated and error-prone since interblock motion
1082 from sched-ebb could move one of the pair of insns to a different block.
1084 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1085 then they'll be in a different block from their ldgp. Which could lead
1086 the bb reorder code to think that it would be ok to copy just the block
1087 containing the call and branch to the block containing the ldgp. */
1090 alpha_cannot_copy_insn_p (rtx insn)
1092 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1094 if (recog_memoized (insn) >= 0)
1095 return get_attr_cannot_copy (insn);
1101 /* Try a machine-dependent way of reloading an illegitimate address
1102 operand. If we find one, push the reload and return the new rtx. */
1105 alpha_legitimize_reload_address (rtx x,
1106 enum machine_mode mode ATTRIBUTE_UNUSED,
1107 int opnum, int type,
1108 int ind_levels ATTRIBUTE_UNUSED)
1110 /* We must recognize output that we have already generated ourselves. */
1111 if (GET_CODE (x) == PLUS
1112 && GET_CODE (XEXP (x, 0)) == PLUS
1113 && REG_P (XEXP (XEXP (x, 0), 0))
1114 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1115 && CONST_INT_P (XEXP (x, 1)))
1117 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1118 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1119 opnum, (enum reload_type) type);
1123 /* We wish to handle large displacements off a base register by
1124 splitting the addend across an ldah and the mem insn. This
1125 cuts number of extra insns needed from 3 to 1. */
1126 if (GET_CODE (x) == PLUS
1127 && REG_P (XEXP (x, 0))
1128 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1129 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1130 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1132 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1133 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1135 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1137 /* Check for 32-bit overflow. */
1138 if (high + low != val)
1141 /* Reload the high part into a base reg; leave the low part
1142 in the mem directly. */
1143 x = gen_rtx_PLUS (GET_MODE (x),
1144 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1148 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1149 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1150 opnum, (enum reload_type) type);
1157 /* Compute a (partial) cost for rtx X. Return true if the complete
1158 cost has been computed, and false if subexpressions should be
1159 scanned. In either case, *TOTAL contains the cost result. */
1162 alpha_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
1165 enum machine_mode mode = GET_MODE (x);
1166 bool float_mode_p = FLOAT_MODE_P (mode);
1167 const struct alpha_rtx_cost_data *cost_data;
1170 cost_data = &alpha_rtx_cost_size;
1172 cost_data = &alpha_rtx_cost_data[alpha_tune];
1177 /* If this is an 8-bit constant, return zero since it can be used
1178 nearly anywhere with no cost. If it is a valid operand for an
1179 ADD or AND, likewise return 0 if we know it will be used in that
1180 context. Otherwise, return 2 since it might be used there later.
1181 All other constants take at least two insns. */
1182 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1190 if (x == CONST0_RTX (mode))
1192 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1193 || (outer_code == AND && and_operand (x, VOIDmode)))
1195 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1198 *total = COSTS_N_INSNS (2);
1204 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1205 *total = COSTS_N_INSNS (outer_code != MEM);
1206 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1207 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1208 else if (tls_symbolic_operand_type (x))
1209 /* Estimate of cost for call_pal rduniq. */
1210 /* ??? How many insns do we emit here? More than one... */
1211 *total = COSTS_N_INSNS (15);
1213 /* Otherwise we do a load from the GOT. */
1214 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1218 /* This is effectively an add_operand. */
1225 *total = cost_data->fp_add;
1226 else if (GET_CODE (XEXP (x, 0)) == MULT
1227 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1229 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1230 (enum rtx_code) outer_code, opno, speed)
1231 + rtx_cost (XEXP (x, 1),
1232 (enum rtx_code) outer_code, opno, speed)
1233 + COSTS_N_INSNS (1));
1240 *total = cost_data->fp_mult;
1241 else if (mode == DImode)
1242 *total = cost_data->int_mult_di;
1244 *total = cost_data->int_mult_si;
1248 if (CONST_INT_P (XEXP (x, 1))
1249 && INTVAL (XEXP (x, 1)) <= 3)
1251 *total = COSTS_N_INSNS (1);
1258 *total = cost_data->int_shift;
1263 *total = cost_data->fp_add;
1265 *total = cost_data->int_cmov;
1273 *total = cost_data->int_div;
1274 else if (mode == SFmode)
1275 *total = cost_data->fp_div_sf;
1277 *total = cost_data->fp_div_df;
1281 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1287 *total = COSTS_N_INSNS (1);
1295 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1301 case UNSIGNED_FLOAT:
1304 case FLOAT_TRUNCATE:
1305 *total = cost_data->fp_add;
1309 if (MEM_P (XEXP (x, 0)))
1312 *total = cost_data->fp_add;
1320 /* REF is an alignable memory location. Place an aligned SImode
1321 reference into *PALIGNED_MEM and the number of bits to shift into
1322 *PBITNUM. SCRATCH is a free register for use in reloading out
1323 of range stack slots. */
1326 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1329 HOST_WIDE_INT disp, offset;
1331 gcc_assert (MEM_P (ref));
1333 if (reload_in_progress
1334 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1336 base = find_replacement (&XEXP (ref, 0));
1337 gcc_assert (memory_address_p (GET_MODE (ref), base));
1340 base = XEXP (ref, 0);
1342 if (GET_CODE (base) == PLUS)
1343 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1347 /* Find the byte offset within an aligned word. If the memory itself is
1348 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1349 will have examined the base register and determined it is aligned, and
1350 thus displacements from it are naturally alignable. */
1351 if (MEM_ALIGN (ref) >= 32)
1356 /* The location should not cross aligned word boundary. */
1357 gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1358 <= GET_MODE_SIZE (SImode));
1360 /* Access the entire aligned word. */
1361 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1363 /* Convert the byte offset within the word to a bit offset. */
1364 offset *= BITS_PER_UNIT;
1365 *pbitnum = GEN_INT (offset);
1368 /* Similar, but just get the address. Handle the two reload cases.
1369 Add EXTRA_OFFSET to the address we return. */
1372 get_unaligned_address (rtx ref)
1375 HOST_WIDE_INT offset = 0;
1377 gcc_assert (MEM_P (ref));
1379 if (reload_in_progress
1380 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1382 base = find_replacement (&XEXP (ref, 0));
1384 gcc_assert (memory_address_p (GET_MODE (ref), base));
1387 base = XEXP (ref, 0);
1389 if (GET_CODE (base) == PLUS)
1390 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1392 return plus_constant (base, offset);
1395 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1396 X is always returned in a register. */
1399 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1401 if (GET_CODE (addr) == PLUS)
1403 ofs += INTVAL (XEXP (addr, 1));
1404 addr = XEXP (addr, 0);
1407 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1408 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1411 /* On the Alpha, all (non-symbolic) constants except zero go into
1412 a floating-point register via memory. Note that we cannot
1413 return anything that is not a subset of RCLASS, and that some
1414 symbolic constants cannot be dropped to memory. */
1417 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1419 /* Zero is present in any register class. */
1420 if (x == CONST0_RTX (GET_MODE (x)))
1423 /* These sorts of constants we can easily drop to memory. */
1425 || GET_CODE (x) == CONST_DOUBLE
1426 || GET_CODE (x) == CONST_VECTOR)
1428 if (rclass == FLOAT_REGS)
1430 if (rclass == ALL_REGS)
1431 return GENERAL_REGS;
1435 /* All other kinds of constants should not (and in the case of HIGH
1436 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1437 secondary reload. */
1439 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1444 /* Inform reload about cases where moving X with a mode MODE to a register in
1445 RCLASS requires an extra scratch or immediate register. Return the class
1446 needed for the immediate register. */
1449 alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1450 enum machine_mode mode, secondary_reload_info *sri)
1452 enum reg_class rclass = (enum reg_class) rclass_i;
1454 /* Loading and storing HImode or QImode values to and from memory
1455 usually requires a scratch register. */
1456 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1458 if (any_memory_operand (x, mode))
1462 if (!aligned_memory_operand (x, mode))
1463 sri->icode = direct_optab_handler (reload_in_optab, mode);
1466 sri->icode = direct_optab_handler (reload_out_optab, mode);
1471 /* We also cannot do integral arithmetic into FP regs, as might result
1472 from register elimination into a DImode fp register. */
1473 if (rclass == FLOAT_REGS)
1475 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1476 return GENERAL_REGS;
1477 if (in_p && INTEGRAL_MODE_P (mode)
1478 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1479 return GENERAL_REGS;
1485 /* Subfunction of the following function. Update the flags of any MEM
1486 found in part of X. */
1489 alpha_set_memflags_1 (rtx *xp, void *data)
1491 rtx x = *xp, orig = (rtx) data;
1496 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1497 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1498 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1500 /* Sadly, we cannot use alias sets because the extra aliasing
1501 produced by the AND interferes. Given that two-byte quantities
1502 are the only thing we would be able to differentiate anyway,
1503 there does not seem to be any point in convoluting the early
1504 out of the alias check. */
1509 /* Given SEQ, which is an INSN list, look for any MEMs in either
1510 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1511 volatile flags from REF into each of the MEMs found. If REF is not
1512 a MEM, don't do anything. */
1515 alpha_set_memflags (rtx seq, rtx ref)
1522 /* This is only called from alpha.md, after having had something
1523 generated from one of the insn patterns. So if everything is
1524 zero, the pattern is already up-to-date. */
1525 if (!MEM_VOLATILE_P (ref)
1526 && !MEM_NOTRAP_P (ref)
1527 && !MEM_READONLY_P (ref))
1530 for (insn = seq; insn; insn = NEXT_INSN (insn))
1532 for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1537 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1540 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1541 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1542 and return pc_rtx if successful. */
1545 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1546 HOST_WIDE_INT c, int n, bool no_output)
1548 HOST_WIDE_INT new_const;
1550 /* Use a pseudo if highly optimizing and still generating RTL. */
1552 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1555 /* If this is a sign-extended 32-bit constant, we can do this in at most
1556 three insns, so do it if we have enough insns left. We always have
1557 a sign-extended 32-bit constant when compiling on a narrow machine. */
1559 if (HOST_BITS_PER_WIDE_INT != 64
1560 || c >> 31 == -1 || c >> 31 == 0)
1562 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1563 HOST_WIDE_INT tmp1 = c - low;
1564 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1565 HOST_WIDE_INT extra = 0;
1567 /* If HIGH will be interpreted as negative but the constant is
1568 positive, we must adjust it to do two ldha insns. */
1570 if ((high & 0x8000) != 0 && c >= 0)
1574 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1577 if (c == low || (low == 0 && extra == 0))
1579 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1580 but that meant that we can't handle INT_MIN on 32-bit machines
1581 (like NT/Alpha), because we recurse indefinitely through
1582 emit_move_insn to gen_movdi. So instead, since we know exactly
1583 what we want, create it explicitly. */
1588 target = gen_reg_rtx (mode);
1589 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1592 else if (n >= 2 + (extra != 0))
1596 if (!can_create_pseudo_p ())
1598 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1602 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1605 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1606 This means that if we go through expand_binop, we'll try to
1607 generate extensions, etc, which will require new pseudos, which
1608 will fail during some split phases. The SImode add patterns
1609 still exist, but are not named. So build the insns by hand. */
1614 subtarget = gen_reg_rtx (mode);
1615 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1616 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1622 target = gen_reg_rtx (mode);
1623 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1624 insn = gen_rtx_SET (VOIDmode, target, insn);
1630 /* If we couldn't do it that way, try some other methods. But if we have
1631 no instructions left, don't bother. Likewise, if this is SImode and
1632 we can't make pseudos, we can't do anything since the expand_binop
1633 and expand_unop calls will widen and try to make pseudos. */
1635 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1638 /* Next, see if we can load a related constant and then shift and possibly
1639 negate it to get the constant we want. Try this once each increasing
1640 numbers of insns. */
1642 for (i = 1; i < n; i++)
1644 /* First, see if minus some low bits, we've an easy load of
1647 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1650 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1655 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1656 target, 0, OPTAB_WIDEN);
1660 /* Next try complementing. */
1661 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1666 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1669 /* Next try to form a constant and do a left shift. We can do this
1670 if some low-order bits are zero; the exact_log2 call below tells
1671 us that information. The bits we are shifting out could be any
1672 value, but here we'll just try the 0- and sign-extended forms of
1673 the constant. To try to increase the chance of having the same
1674 constant in more than one insn, start at the highest number of
1675 bits to shift, but try all possibilities in case a ZAPNOT will
1678 bits = exact_log2 (c & -c);
1680 for (; bits > 0; bits--)
1682 new_const = c >> bits;
1683 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1686 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1687 temp = alpha_emit_set_const (subtarget, mode, new_const,
1694 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1695 target, 0, OPTAB_WIDEN);
1699 /* Now try high-order zero bits. Here we try the shifted-in bits as
1700 all zero and all ones. Be careful to avoid shifting outside the
1701 mode and to avoid shifting outside the host wide int size. */
1702 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1703 confuse the recursive call and set all of the high 32 bits. */
1705 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1706 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1708 for (; bits > 0; bits--)
1710 new_const = c << bits;
1711 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1714 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1715 temp = alpha_emit_set_const (subtarget, mode, new_const,
1722 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1723 target, 1, OPTAB_WIDEN);
1727 /* Now try high-order 1 bits. We get that with a sign-extension.
1728 But one bit isn't enough here. Be careful to avoid shifting outside
1729 the mode and to avoid shifting outside the host wide int size. */
1731 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1732 - floor_log2 (~ c) - 2);
1734 for (; bits > 0; bits--)
1736 new_const = c << bits;
1737 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1740 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1741 temp = alpha_emit_set_const (subtarget, mode, new_const,
1748 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1749 target, 0, OPTAB_WIDEN);
1754 #if HOST_BITS_PER_WIDE_INT == 64
1755 /* Finally, see if can load a value into the target that is the same as the
1756 constant except that all bytes that are 0 are changed to be 0xff. If we
1757 can, then we can do a ZAPNOT to obtain the desired constant. */
1760 for (i = 0; i < 64; i += 8)
1761 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1762 new_const |= (HOST_WIDE_INT) 0xff << i;
1764 /* We are only called for SImode and DImode. If this is SImode, ensure that
1765 we are sign extended to a full word. */
1768 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1772 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1777 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1778 target, 0, OPTAB_WIDEN);
1786 /* Try to output insns to set TARGET equal to the constant C if it can be
1787 done in less than N insns. Do all computations in MODE. Returns the place
1788 where the output has been placed if it can be done and the insns have been
1789 emitted. If it would take more than N insns, zero is returned and no
1790 insns and emitted. */
1793 alpha_emit_set_const (rtx target, enum machine_mode mode,
1794 HOST_WIDE_INT c, int n, bool no_output)
1796 enum machine_mode orig_mode = mode;
1797 rtx orig_target = target;
1801 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1802 can't load this constant in one insn, do this in DImode. */
1803 if (!can_create_pseudo_p () && mode == SImode
1804 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1806 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1810 target = no_output ? NULL : gen_lowpart (DImode, target);
1813 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1815 target = no_output ? NULL : gen_lowpart (DImode, target);
1819 /* Try 1 insn, then 2, then up to N. */
1820 for (i = 1; i <= n; i++)
1822 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1830 insn = get_last_insn ();
1831 set = single_set (insn);
1832 if (! CONSTANT_P (SET_SRC (set)))
1833 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1838 /* Allow for the case where we changed the mode of TARGET. */
1841 if (result == target)
1842 result = orig_target;
1843 else if (mode != orig_mode)
1844 result = gen_lowpart (orig_mode, result);
1850 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1851 fall back to a straight forward decomposition. We do this to avoid
1852 exponential run times encountered when looking for longer sequences
1853 with alpha_emit_set_const. */
1856 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1858 HOST_WIDE_INT d1, d2, d3, d4;
1860 /* Decompose the entire word */
1861 #if HOST_BITS_PER_WIDE_INT >= 64
1862 gcc_assert (c2 == -(c1 < 0));
1863 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1865 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1866 c1 = (c1 - d2) >> 32;
1867 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1869 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1870 gcc_assert (c1 == d4);
1872 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1874 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1875 gcc_assert (c1 == d2);
1877 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1879 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1880 gcc_assert (c2 == d4);
1883 /* Construct the high word */
1886 emit_move_insn (target, GEN_INT (d4));
1888 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1891 emit_move_insn (target, GEN_INT (d3));
1893 /* Shift it into place */
1894 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1896 /* Add in the low bits. */
1898 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1900 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
1905 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
1909 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
1911 HOST_WIDE_INT i0, i1;
1913 if (GET_CODE (x) == CONST_VECTOR)
1914 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
1917 if (CONST_INT_P (x))
1922 else if (HOST_BITS_PER_WIDE_INT >= 64)
1924 i0 = CONST_DOUBLE_LOW (x);
1929 i0 = CONST_DOUBLE_LOW (x);
1930 i1 = CONST_DOUBLE_HIGH (x);
1937 /* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which
1938 we are willing to load the value into a register via a move pattern.
1939 Normally this is all symbolic constants, integral constants that
1940 take three or fewer instructions, and floating-point zero. */
1943 alpha_legitimate_constant_p (enum machine_mode mode, rtx x)
1945 HOST_WIDE_INT i0, i1;
1947 switch (GET_CODE (x))
1954 if (GET_CODE (XEXP (x, 0)) == PLUS
1955 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
1956 x = XEXP (XEXP (x, 0), 0);
1960 if (GET_CODE (x) != SYMBOL_REF)
1966 /* TLS symbols are never valid. */
1967 return SYMBOL_REF_TLS_MODEL (x) == 0;
1970 if (x == CONST0_RTX (mode))
1972 if (FLOAT_MODE_P (mode))
1977 if (x == CONST0_RTX (mode))
1979 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
1981 if (GET_MODE_SIZE (mode) != 8)
1987 if (TARGET_BUILD_CONSTANTS)
1989 alpha_extract_integer (x, &i0, &i1);
1990 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
1991 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
1999 /* Operand 1 is known to be a constant, and should require more than one
2000 instruction to load. Emit that multi-part load. */
2003 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2005 HOST_WIDE_INT i0, i1;
2006 rtx temp = NULL_RTX;
2008 alpha_extract_integer (operands[1], &i0, &i1);
2010 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2011 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2013 if (!temp && TARGET_BUILD_CONSTANTS)
2014 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2018 if (!rtx_equal_p (operands[0], temp))
2019 emit_move_insn (operands[0], temp);
2026 /* Expand a move instruction; return true if all work is done.
2027 We don't handle non-bwx subword loads here. */
2030 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2034 /* If the output is not a register, the input must be. */
2035 if (MEM_P (operands[0])
2036 && ! reg_or_0_operand (operands[1], mode))
2037 operands[1] = force_reg (mode, operands[1]);
2039 /* Allow legitimize_address to perform some simplifications. */
2040 if (mode == Pmode && symbolic_operand (operands[1], mode))
2042 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2045 if (tmp == operands[0])
2052 /* Early out for non-constants and valid constants. */
2053 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2056 /* Split large integers. */
2057 if (CONST_INT_P (operands[1])
2058 || GET_CODE (operands[1]) == CONST_DOUBLE
2059 || GET_CODE (operands[1]) == CONST_VECTOR)
2061 if (alpha_split_const_mov (mode, operands))
2065 /* Otherwise we've nothing left but to drop the thing to memory. */
2066 tmp = force_const_mem (mode, operands[1]);
2068 if (tmp == NULL_RTX)
2071 if (reload_in_progress)
2073 emit_move_insn (operands[0], XEXP (tmp, 0));
2074 operands[1] = replace_equiv_address (tmp, operands[0]);
2077 operands[1] = validize_mem (tmp);
2081 /* Expand a non-bwx QImode or HImode move instruction;
2082 return true if all work is done. */
2085 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2089 /* If the output is not a register, the input must be. */
2090 if (MEM_P (operands[0]))
2091 operands[1] = force_reg (mode, operands[1]);
2093 /* Handle four memory cases, unaligned and aligned for either the input
2094 or the output. The only case where we can be called during reload is
2095 for aligned loads; all other cases require temporaries. */
2097 if (any_memory_operand (operands[1], mode))
2099 if (aligned_memory_operand (operands[1], mode))
2101 if (reload_in_progress)
2104 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2106 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2111 rtx aligned_mem, bitnum;
2112 rtx scratch = gen_reg_rtx (SImode);
2116 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2118 subtarget = operands[0];
2119 if (REG_P (subtarget))
2120 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2122 subtarget = gen_reg_rtx (DImode), copyout = true;
2125 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2128 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2133 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2138 /* Don't pass these as parameters since that makes the generated
2139 code depend on parameter evaluation order which will cause
2140 bootstrap failures. */
2142 rtx temp1, temp2, subtarget, ua;
2145 temp1 = gen_reg_rtx (DImode);
2146 temp2 = gen_reg_rtx (DImode);
2148 subtarget = operands[0];
2149 if (REG_P (subtarget))
2150 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2152 subtarget = gen_reg_rtx (DImode), copyout = true;
2154 ua = get_unaligned_address (operands[1]);
2156 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2158 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2160 alpha_set_memflags (seq, operands[1]);
2164 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2169 if (any_memory_operand (operands[0], mode))
2171 if (aligned_memory_operand (operands[0], mode))
2173 rtx aligned_mem, bitnum;
2174 rtx temp1 = gen_reg_rtx (SImode);
2175 rtx temp2 = gen_reg_rtx (SImode);
2177 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2179 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2184 rtx temp1 = gen_reg_rtx (DImode);
2185 rtx temp2 = gen_reg_rtx (DImode);
2186 rtx temp3 = gen_reg_rtx (DImode);
2187 rtx ua = get_unaligned_address (operands[0]);
2190 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2192 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2194 alpha_set_memflags (seq, operands[0]);
2203 /* Implement the movmisalign patterns. One of the operands is a memory
2204 that is not naturally aligned. Emit instructions to load it. */
2207 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2209 /* Honor misaligned loads, for those we promised to do so. */
2210 if (MEM_P (operands[1]))
2214 if (register_operand (operands[0], mode))
2217 tmp = gen_reg_rtx (mode);
2219 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2220 if (tmp != operands[0])
2221 emit_move_insn (operands[0], tmp);
2223 else if (MEM_P (operands[0]))
2225 if (!reg_or_0_operand (operands[1], mode))
2226 operands[1] = force_reg (mode, operands[1]);
2227 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2233 /* Generate an unsigned DImode to FP conversion. This is the same code
2234 optabs would emit if we didn't have TFmode patterns.
2236 For SFmode, this is the only construction I've found that can pass
2237 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2238 intermediates will work, because you'll get intermediate rounding
2239 that ruins the end result. Some of this could be fixed by turning
2240 on round-to-positive-infinity, but that requires diddling the fpsr,
2241 which kills performance. I tried turning this around and converting
2242 to a negative number, so that I could turn on /m, but either I did
2243 it wrong or there's something else cause I wound up with the exact
2244 same single-bit error. There is a branch-less form of this same code:
2255 fcmoveq $f10,$f11,$f0
2257 I'm not using it because it's the same number of instructions as
2258 this branch-full form, and it has more serialized long latency
2259 instructions on the critical path.
2261 For DFmode, we can avoid rounding errors by breaking up the word
2262 into two pieces, converting them separately, and adding them back:
2264 LC0: .long 0,0x5f800000
2269 cpyse $f11,$f31,$f10
2270 cpyse $f31,$f11,$f11
2278 This doesn't seem to be a clear-cut win over the optabs form.
2279 It probably all depends on the distribution of numbers being
2280 converted -- in the optabs form, all but high-bit-set has a
2281 much lower minimum execution time. */
2284 alpha_emit_floatuns (rtx operands[2])
2286 rtx neglab, donelab, i0, i1, f0, in, out;
2287 enum machine_mode mode;
2290 in = force_reg (DImode, operands[1]);
2291 mode = GET_MODE (out);
2292 neglab = gen_label_rtx ();
2293 donelab = gen_label_rtx ();
2294 i0 = gen_reg_rtx (DImode);
2295 i1 = gen_reg_rtx (DImode);
2296 f0 = gen_reg_rtx (mode);
2298 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2300 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2301 emit_jump_insn (gen_jump (donelab));
2304 emit_label (neglab);
2306 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2307 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2308 emit_insn (gen_iordi3 (i0, i0, i1));
2309 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2310 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2312 emit_label (donelab);
2315 /* Generate the comparison for a conditional branch. */
2318 alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
2320 enum rtx_code cmp_code, branch_code;
2321 enum machine_mode branch_mode = VOIDmode;
2322 enum rtx_code code = GET_CODE (operands[0]);
2323 rtx op0 = operands[1], op1 = operands[2];
2326 if (cmp_mode == TFmode)
2328 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2333 /* The general case: fold the comparison code to the types of compares
2334 that we have, choosing the branch as necessary. */
2337 case EQ: case LE: case LT: case LEU: case LTU:
2339 /* We have these compares. */
2340 cmp_code = code, branch_code = NE;
2345 /* These must be reversed. */
2346 cmp_code = reverse_condition (code), branch_code = EQ;
2349 case GE: case GT: case GEU: case GTU:
2350 /* For FP, we swap them, for INT, we reverse them. */
2351 if (cmp_mode == DFmode)
2353 cmp_code = swap_condition (code);
2355 tem = op0, op0 = op1, op1 = tem;
2359 cmp_code = reverse_condition (code);
2368 if (cmp_mode == DFmode)
2370 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2372 /* When we are not as concerned about non-finite values, and we
2373 are comparing against zero, we can branch directly. */
2374 if (op1 == CONST0_RTX (DFmode))
2375 cmp_code = UNKNOWN, branch_code = code;
2376 else if (op0 == CONST0_RTX (DFmode))
2378 /* Undo the swap we probably did just above. */
2379 tem = op0, op0 = op1, op1 = tem;
2380 branch_code = swap_condition (cmp_code);
2386 /* ??? We mark the branch mode to be CCmode to prevent the
2387 compare and branch from being combined, since the compare
2388 insn follows IEEE rules that the branch does not. */
2389 branch_mode = CCmode;
2394 /* The following optimizations are only for signed compares. */
2395 if (code != LEU && code != LTU && code != GEU && code != GTU)
2397 /* Whee. Compare and branch against 0 directly. */
2398 if (op1 == const0_rtx)
2399 cmp_code = UNKNOWN, branch_code = code;
2401 /* If the constants doesn't fit into an immediate, but can
2402 be generated by lda/ldah, we adjust the argument and
2403 compare against zero, so we can use beq/bne directly. */
2404 /* ??? Don't do this when comparing against symbols, otherwise
2405 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2406 be declared false out of hand (at least for non-weak). */
2407 else if (CONST_INT_P (op1)
2408 && (code == EQ || code == NE)
2409 && !(symbolic_operand (op0, VOIDmode)
2410 || (REG_P (op0) && REG_POINTER (op0))))
2412 rtx n_op1 = GEN_INT (-INTVAL (op1));
2414 if (! satisfies_constraint_I (op1)
2415 && (satisfies_constraint_K (n_op1)
2416 || satisfies_constraint_L (n_op1)))
2417 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2421 if (!reg_or_0_operand (op0, DImode))
2422 op0 = force_reg (DImode, op0);
2423 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2424 op1 = force_reg (DImode, op1);
2427 /* Emit an initial compare instruction, if necessary. */
2429 if (cmp_code != UNKNOWN)
2431 tem = gen_reg_rtx (cmp_mode);
2432 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2435 /* Emit the branch instruction. */
2436 tem = gen_rtx_SET (VOIDmode, pc_rtx,
2437 gen_rtx_IF_THEN_ELSE (VOIDmode,
2438 gen_rtx_fmt_ee (branch_code,
2440 CONST0_RTX (cmp_mode)),
2441 gen_rtx_LABEL_REF (VOIDmode,
2444 emit_jump_insn (tem);
2447 /* Certain simplifications can be done to make invalid setcc operations
2448 valid. Return the final comparison, or NULL if we can't work. */
2451 alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
2453 enum rtx_code cmp_code;
2454 enum rtx_code code = GET_CODE (operands[1]);
2455 rtx op0 = operands[2], op1 = operands[3];
2458 if (cmp_mode == TFmode)
2460 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2465 if (cmp_mode == DFmode && !TARGET_FIX)
2468 /* The general case: fold the comparison code to the types of compares
2469 that we have, choosing the branch as necessary. */
2474 case EQ: case LE: case LT: case LEU: case LTU:
2476 /* We have these compares. */
2477 if (cmp_mode == DFmode)
2478 cmp_code = code, code = NE;
2482 if (cmp_mode == DImode && op1 == const0_rtx)
2487 cmp_code = reverse_condition (code);
2491 case GE: case GT: case GEU: case GTU:
2492 /* These normally need swapping, but for integer zero we have
2493 special patterns that recognize swapped operands. */
2494 if (cmp_mode == DImode && op1 == const0_rtx)
2496 code = swap_condition (code);
2497 if (cmp_mode == DFmode)
2498 cmp_code = code, code = NE;
2499 tmp = op0, op0 = op1, op1 = tmp;
2506 if (cmp_mode == DImode)
2508 if (!register_operand (op0, DImode))
2509 op0 = force_reg (DImode, op0);
2510 if (!reg_or_8bit_operand (op1, DImode))
2511 op1 = force_reg (DImode, op1);
2514 /* Emit an initial compare instruction, if necessary. */
2515 if (cmp_code != UNKNOWN)
2517 tmp = gen_reg_rtx (cmp_mode);
2518 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2519 gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2521 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2525 /* Emit the setcc instruction. */
2526 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2527 gen_rtx_fmt_ee (code, DImode, op0, op1)));
2532 /* Rewrite a comparison against zero CMP of the form
2533 (CODE (cc0) (const_int 0)) so it can be written validly in
2534 a conditional move (if_then_else CMP ...).
2535 If both of the operands that set cc0 are nonzero we must emit
2536 an insn to perform the compare (it can't be done within
2537 the conditional move). */
2540 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2542 enum rtx_code code = GET_CODE (cmp);
2543 enum rtx_code cmov_code = NE;
2544 rtx op0 = XEXP (cmp, 0);
2545 rtx op1 = XEXP (cmp, 1);
2546 enum machine_mode cmp_mode
2547 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2548 enum machine_mode cmov_mode = VOIDmode;
2549 int local_fast_math = flag_unsafe_math_optimizations;
2552 if (cmp_mode == TFmode)
2554 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2559 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2561 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2563 enum rtx_code cmp_code;
2568 /* If we have fp<->int register move instructions, do a cmov by
2569 performing the comparison in fp registers, and move the
2570 zero/nonzero value to integer registers, where we can then
2571 use a normal cmov, or vice-versa. */
2575 case EQ: case LE: case LT: case LEU: case LTU:
2577 /* We have these compares. */
2578 cmp_code = code, code = NE;
2583 /* These must be reversed. */
2584 cmp_code = reverse_condition (code), code = EQ;
2587 case GE: case GT: case GEU: case GTU:
2588 /* These normally need swapping, but for integer zero we have
2589 special patterns that recognize swapped operands. */
2590 if (cmp_mode == DImode && op1 == const0_rtx)
2591 cmp_code = code, code = NE;
2594 cmp_code = swap_condition (code);
2596 tem = op0, op0 = op1, op1 = tem;
2604 if (cmp_mode == DImode)
2606 if (!reg_or_0_operand (op0, DImode))
2607 op0 = force_reg (DImode, op0);
2608 if (!reg_or_8bit_operand (op1, DImode))
2609 op1 = force_reg (DImode, op1);
2612 tem = gen_reg_rtx (cmp_mode);
2613 emit_insn (gen_rtx_SET (VOIDmode, tem,
2614 gen_rtx_fmt_ee (cmp_code, cmp_mode,
2617 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2618 op0 = gen_lowpart (cmp_mode, tem);
2619 op1 = CONST0_RTX (cmp_mode);
2620 local_fast_math = 1;
2623 if (cmp_mode == DImode)
2625 if (!reg_or_0_operand (op0, DImode))
2626 op0 = force_reg (DImode, op0);
2627 if (!reg_or_8bit_operand (op1, DImode))
2628 op1 = force_reg (DImode, op1);
2631 /* We may be able to use a conditional move directly.
2632 This avoids emitting spurious compares. */
2633 if (signed_comparison_operator (cmp, VOIDmode)
2634 && (cmp_mode == DImode || local_fast_math)
2635 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2636 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2638 /* We can't put the comparison inside the conditional move;
2639 emit a compare instruction and put that inside the
2640 conditional move. Make sure we emit only comparisons we have;
2641 swap or reverse as necessary. */
2643 if (!can_create_pseudo_p ())
2648 case EQ: case LE: case LT: case LEU: case LTU:
2650 /* We have these compares: */
2655 /* These must be reversed. */
2656 code = reverse_condition (code);
2660 case GE: case GT: case GEU: case GTU:
2661 /* These must be swapped. */
2662 if (op1 != CONST0_RTX (cmp_mode))
2664 code = swap_condition (code);
2665 tem = op0, op0 = op1, op1 = tem;
2673 if (cmp_mode == DImode)
2675 if (!reg_or_0_operand (op0, DImode))
2676 op0 = force_reg (DImode, op0);
2677 if (!reg_or_8bit_operand (op1, DImode))
2678 op1 = force_reg (DImode, op1);
2681 /* ??? We mark the branch mode to be CCmode to prevent the compare
2682 and cmov from being combined, since the compare insn follows IEEE
2683 rules that the cmov does not. */
2684 if (cmp_mode == DFmode && !local_fast_math)
2687 tem = gen_reg_rtx (cmp_mode);
2688 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2689 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2692 /* Simplify a conditional move of two constants into a setcc with
2693 arithmetic. This is done with a splitter since combine would
2694 just undo the work if done during code generation. It also catches
2695 cases we wouldn't have before cse. */
2698 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2699 rtx t_rtx, rtx f_rtx)
2701 HOST_WIDE_INT t, f, diff;
2702 enum machine_mode mode;
2703 rtx target, subtarget, tmp;
2705 mode = GET_MODE (dest);
2710 if (((code == NE || code == EQ) && diff < 0)
2711 || (code == GE || code == GT))
2713 code = reverse_condition (code);
2714 diff = t, t = f, f = diff;
2718 subtarget = target = dest;
2721 target = gen_lowpart (DImode, dest);
2722 if (can_create_pseudo_p ())
2723 subtarget = gen_reg_rtx (DImode);
2727 /* Below, we must be careful to use copy_rtx on target and subtarget
2728 in intermediate insns, as they may be a subreg rtx, which may not
2731 if (f == 0 && exact_log2 (diff) > 0
2732 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2733 viable over a longer latency cmove. On EV5, the E0 slot is a
2734 scarce resource, and on EV4 shift has the same latency as a cmove. */
2735 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2737 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2738 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2740 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2741 GEN_INT (exact_log2 (t)));
2742 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2744 else if (f == 0 && t == -1)
2746 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2747 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2749 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2751 else if (diff == 1 || diff == 4 || diff == 8)
2755 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2756 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2759 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2762 add_op = GEN_INT (f);
2763 if (sext_add_operand (add_op, mode))
2765 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2767 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2768 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2780 /* Look up the function X_floating library function name for the
2783 struct GTY(()) xfloating_op
2785 const enum rtx_code code;
2786 const char *const GTY((skip)) osf_func;
2787 const char *const GTY((skip)) vms_func;
2791 static GTY(()) struct xfloating_op xfloating_ops[] =
2793 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2794 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2795 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2796 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2797 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2798 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2799 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2800 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2801 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2802 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2803 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2804 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2805 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2806 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2807 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2810 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2812 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2813 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2817 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2819 struct xfloating_op *ops = xfloating_ops;
2820 long n = ARRAY_SIZE (xfloating_ops);
2823 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2825 /* How irritating. Nothing to key off for the main table. */
2826 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2829 n = ARRAY_SIZE (vax_cvt_ops);
2832 for (i = 0; i < n; ++i, ++ops)
2833 if (ops->code == code)
2835 rtx func = ops->libcall;
2838 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2839 ? ops->vms_func : ops->osf_func);
2840 ops->libcall = func;
2848 /* Most X_floating operations take the rounding mode as an argument.
2849 Compute that here. */
2852 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2853 enum alpha_fp_rounding_mode round)
2859 case ALPHA_FPRM_NORM:
2862 case ALPHA_FPRM_MINF:
2865 case ALPHA_FPRM_CHOP:
2868 case ALPHA_FPRM_DYN:
2874 /* XXX For reference, round to +inf is mode = 3. */
2877 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2883 /* Emit an X_floating library function call.
2885 Note that these functions do not follow normal calling conventions:
2886 TFmode arguments are passed in two integer registers (as opposed to
2887 indirect); TFmode return values appear in R16+R17.
2889 FUNC is the function to call.
2890 TARGET is where the output belongs.
2891 OPERANDS are the inputs.
2892 NOPERANDS is the count of inputs.
2893 EQUIV is the expression equivalent for the function.
2897 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2898 int noperands, rtx equiv)
2900 rtx usage = NULL_RTX, tmp, reg;
2905 for (i = 0; i < noperands; ++i)
2907 switch (GET_MODE (operands[i]))
2910 reg = gen_rtx_REG (TFmode, regno);
2915 reg = gen_rtx_REG (DFmode, regno + 32);
2920 gcc_assert (CONST_INT_P (operands[i]));
2923 reg = gen_rtx_REG (DImode, regno);
2931 emit_move_insn (reg, operands[i]);
2932 use_reg (&usage, reg);
2935 switch (GET_MODE (target))
2938 reg = gen_rtx_REG (TFmode, 16);
2941 reg = gen_rtx_REG (DFmode, 32);
2944 reg = gen_rtx_REG (DImode, 0);
2950 tmp = gen_rtx_MEM (QImode, func);
2951 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
2952 const0_rtx, const0_rtx));
2953 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
2954 RTL_CONST_CALL_P (tmp) = 1;
2959 emit_libcall_block (tmp, target, reg, equiv);
2962 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
2965 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
2969 rtx out_operands[3];
2971 func = alpha_lookup_xfloating_lib_func (code);
2972 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
2974 out_operands[0] = operands[1];
2975 out_operands[1] = operands[2];
2976 out_operands[2] = GEN_INT (mode);
2977 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
2978 gen_rtx_fmt_ee (code, TFmode, operands[1],
2982 /* Emit an X_floating library function call for a comparison. */
2985 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
2987 enum rtx_code cmp_code, res_code;
2988 rtx func, out, operands[2], note;
2990 /* X_floating library comparison functions return
2994 Convert the compare against the raw return value. */
3022 func = alpha_lookup_xfloating_lib_func (cmp_code);
3026 out = gen_reg_rtx (DImode);
3028 /* What's actually returned is -1,0,1, not a proper boolean value,
3029 so use an EXPR_LIST as with a generic libcall instead of a
3030 comparison type expression. */
3031 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3032 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3033 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3034 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3039 /* Emit an X_floating library function call for a conversion. */
3042 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3044 int noperands = 1, mode;
3045 rtx out_operands[2];
3047 enum rtx_code code = orig_code;
3049 if (code == UNSIGNED_FIX)
3052 func = alpha_lookup_xfloating_lib_func (code);
3054 out_operands[0] = operands[1];
3059 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3060 out_operands[1] = GEN_INT (mode);
3063 case FLOAT_TRUNCATE:
3064 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3065 out_operands[1] = GEN_INT (mode);
3072 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3073 gen_rtx_fmt_e (orig_code,
3074 GET_MODE (operands[0]),
3078 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3079 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3080 guarantee that the sequence
3083 is valid. Naturally, output operand ordering is little-endian.
3084 This is used by *movtf_internal and *movti_internal. */
3087 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3090 switch (GET_CODE (operands[1]))
3093 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3094 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3098 operands[3] = adjust_address (operands[1], DImode, 8);
3099 operands[2] = adjust_address (operands[1], DImode, 0);
3104 gcc_assert (operands[1] == CONST0_RTX (mode));
3105 operands[2] = operands[3] = const0_rtx;
3112 switch (GET_CODE (operands[0]))
3115 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3116 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3120 operands[1] = adjust_address (operands[0], DImode, 8);
3121 operands[0] = adjust_address (operands[0], DImode, 0);
3128 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3131 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3132 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3136 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3137 op2 is a register containing the sign bit, operation is the
3138 logical operation to be performed. */
3141 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3143 rtx high_bit = operands[2];
3147 alpha_split_tmode_pair (operands, TFmode, false);
3149 /* Detect three flavors of operand overlap. */
3151 if (rtx_equal_p (operands[0], operands[2]))
3153 else if (rtx_equal_p (operands[1], operands[2]))
3155 if (rtx_equal_p (operands[0], high_bit))
3162 emit_move_insn (operands[0], operands[2]);
3164 /* ??? If the destination overlaps both source tf and high_bit, then
3165 assume source tf is dead in its entirety and use the other half
3166 for a scratch register. Otherwise "scratch" is just the proper
3167 destination register. */
3168 scratch = operands[move < 2 ? 1 : 3];
3170 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3174 emit_move_insn (operands[0], operands[2]);
3176 emit_move_insn (operands[1], scratch);
3180 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3184 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3185 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3186 lda r3,X(r11) lda r3,X+2(r11)
3187 extwl r1,r3,r1 extql r1,r3,r1
3188 extwh r2,r3,r2 extqh r2,r3,r2
3189 or r1.r2.r1 or r1,r2,r1
3192 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3193 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3194 lda r3,X(r11) lda r3,X(r11)
3195 extll r1,r3,r1 extll r1,r3,r1
3196 extlh r2,r3,r2 extlh r2,r3,r2
3197 or r1.r2.r1 addl r1,r2,r1
3199 quad: ldq_u r1,X(r11)
3208 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3209 HOST_WIDE_INT ofs, int sign)
3211 rtx meml, memh, addr, extl, exth, tmp, mema;
3212 enum machine_mode mode;
3214 if (TARGET_BWX && size == 2)
3216 meml = adjust_address (mem, QImode, ofs);
3217 memh = adjust_address (mem, QImode, ofs+1);
3218 extl = gen_reg_rtx (DImode);
3219 exth = gen_reg_rtx (DImode);
3220 emit_insn (gen_zero_extendqidi2 (extl, meml));
3221 emit_insn (gen_zero_extendqidi2 (exth, memh));
3222 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3223 NULL, 1, OPTAB_LIB_WIDEN);
3224 addr = expand_simple_binop (DImode, IOR, extl, exth,
3225 NULL, 1, OPTAB_LIB_WIDEN);
3227 if (sign && GET_MODE (tgt) != HImode)
3229 addr = gen_lowpart (HImode, addr);
3230 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3234 if (GET_MODE (tgt) != DImode)
3235 addr = gen_lowpart (GET_MODE (tgt), addr);
3236 emit_move_insn (tgt, addr);
3241 meml = gen_reg_rtx (DImode);
3242 memh = gen_reg_rtx (DImode);
3243 addr = gen_reg_rtx (DImode);
3244 extl = gen_reg_rtx (DImode);
3245 exth = gen_reg_rtx (DImode);
3247 mema = XEXP (mem, 0);
3248 if (GET_CODE (mema) == LO_SUM)
3249 mema = force_reg (Pmode, mema);
3251 /* AND addresses cannot be in any alias set, since they may implicitly
3252 alias surrounding code. Ideally we'd have some alias set that
3253 covered all types except those with alignment 8 or higher. */
3255 tmp = change_address (mem, DImode,
3256 gen_rtx_AND (DImode,
3257 plus_constant (mema, ofs),
3259 set_mem_alias_set (tmp, 0);
3260 emit_move_insn (meml, tmp);
3262 tmp = change_address (mem, DImode,
3263 gen_rtx_AND (DImode,
3264 plus_constant (mema, ofs + size - 1),
3266 set_mem_alias_set (tmp, 0);
3267 emit_move_insn (memh, tmp);
3269 if (sign && size == 2)
3271 emit_move_insn (addr, plus_constant (mema, ofs+2));
3273 emit_insn (gen_extql (extl, meml, addr));
3274 emit_insn (gen_extqh (exth, memh, addr));
3276 /* We must use tgt here for the target. Alpha-vms port fails if we use
3277 addr for the target, because addr is marked as a pointer and combine
3278 knows that pointers are always sign-extended 32-bit values. */
3279 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3280 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3281 addr, 1, OPTAB_WIDEN);
3285 emit_move_insn (addr, plus_constant (mema, ofs));
3286 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
3290 emit_insn (gen_extwh (exth, memh, addr));
3294 emit_insn (gen_extlh (exth, memh, addr));
3298 emit_insn (gen_extqh (exth, memh, addr));
3305 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3306 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3311 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3314 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3317 alpha_expand_unaligned_store (rtx dst, rtx src,
3318 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3320 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3322 if (TARGET_BWX && size == 2)
3324 if (src != const0_rtx)
3326 dstl = gen_lowpart (QImode, src);
3327 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3328 NULL, 1, OPTAB_LIB_WIDEN);
3329 dsth = gen_lowpart (QImode, dsth);
3332 dstl = dsth = const0_rtx;
3334 meml = adjust_address (dst, QImode, ofs);
3335 memh = adjust_address (dst, QImode, ofs+1);
3337 emit_move_insn (meml, dstl);
3338 emit_move_insn (memh, dsth);
3342 dstl = gen_reg_rtx (DImode);
3343 dsth = gen_reg_rtx (DImode);
3344 insl = gen_reg_rtx (DImode);
3345 insh = gen_reg_rtx (DImode);
3347 dsta = XEXP (dst, 0);
3348 if (GET_CODE (dsta) == LO_SUM)
3349 dsta = force_reg (Pmode, dsta);
3351 /* AND addresses cannot be in any alias set, since they may implicitly
3352 alias surrounding code. Ideally we'd have some alias set that
3353 covered all types except those with alignment 8 or higher. */
3355 meml = change_address (dst, DImode,
3356 gen_rtx_AND (DImode,
3357 plus_constant (dsta, ofs),
3359 set_mem_alias_set (meml, 0);
3361 memh = change_address (dst, DImode,
3362 gen_rtx_AND (DImode,
3363 plus_constant (dsta, ofs + size - 1),
3365 set_mem_alias_set (memh, 0);
3367 emit_move_insn (dsth, memh);
3368 emit_move_insn (dstl, meml);
3370 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3372 if (src != CONST0_RTX (GET_MODE (src)))
3374 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3375 GEN_INT (size*8), addr));
3380 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
3383 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
3386 emit_insn (gen_insql (insl, gen_lowpart (DImode, src), addr));
3393 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3398 emit_insn (gen_mskwl (dstl, dstl, addr));
3401 emit_insn (gen_mskll (dstl, dstl, addr));
3404 emit_insn (gen_mskql (dstl, dstl, addr));
3410 if (src != CONST0_RTX (GET_MODE (src)))
3412 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3413 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3416 /* Must store high before low for degenerate case of aligned. */
3417 emit_move_insn (memh, dsth);
3418 emit_move_insn (meml, dstl);
3421 /* The block move code tries to maximize speed by separating loads and
3422 stores at the expense of register pressure: we load all of the data
3423 before we store it back out. There are two secondary effects worth
3424 mentioning, that this speeds copying to/from aligned and unaligned
3425 buffers, and that it makes the code significantly easier to write. */
3427 #define MAX_MOVE_WORDS 8
3429 /* Load an integral number of consecutive unaligned quadwords. */
3432 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3433 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3435 rtx const im8 = GEN_INT (-8);
3436 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3437 rtx sreg, areg, tmp, smema;
3440 smema = XEXP (smem, 0);
3441 if (GET_CODE (smema) == LO_SUM)
3442 smema = force_reg (Pmode, smema);
3444 /* Generate all the tmp registers we need. */
3445 for (i = 0; i < words; ++i)
3447 data_regs[i] = out_regs[i];
3448 ext_tmps[i] = gen_reg_rtx (DImode);
3450 data_regs[words] = gen_reg_rtx (DImode);
3453 smem = adjust_address (smem, GET_MODE (smem), ofs);
3455 /* Load up all of the source data. */
3456 for (i = 0; i < words; ++i)
3458 tmp = change_address (smem, DImode,
3459 gen_rtx_AND (DImode,
3460 plus_constant (smema, 8*i),
3462 set_mem_alias_set (tmp, 0);
3463 emit_move_insn (data_regs[i], tmp);
3466 tmp = change_address (smem, DImode,
3467 gen_rtx_AND (DImode,
3468 plus_constant (smema, 8*words - 1),
3470 set_mem_alias_set (tmp, 0);
3471 emit_move_insn (data_regs[words], tmp);
3473 /* Extract the half-word fragments. Unfortunately DEC decided to make
3474 extxh with offset zero a noop instead of zeroing the register, so
3475 we must take care of that edge condition ourselves with cmov. */
3477 sreg = copy_addr_to_reg (smema);
3478 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3480 for (i = 0; i < words; ++i)
3482 emit_insn (gen_extql (data_regs[i], data_regs[i], sreg));
3483 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
3484 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3485 gen_rtx_IF_THEN_ELSE (DImode,
3486 gen_rtx_EQ (DImode, areg,
3488 const0_rtx, ext_tmps[i])));
3491 /* Merge the half-words into whole words. */
3492 for (i = 0; i < words; ++i)
3494 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3495 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3499 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3500 may be NULL to store zeros. */
3503 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3504 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3506 rtx const im8 = GEN_INT (-8);
3507 rtx ins_tmps[MAX_MOVE_WORDS];
3508 rtx st_tmp_1, st_tmp_2, dreg;
3509 rtx st_addr_1, st_addr_2, dmema;
3512 dmema = XEXP (dmem, 0);
3513 if (GET_CODE (dmema) == LO_SUM)
3514 dmema = force_reg (Pmode, dmema);
3516 /* Generate all the tmp registers we need. */
3517 if (data_regs != NULL)
3518 for (i = 0; i < words; ++i)
3519 ins_tmps[i] = gen_reg_rtx(DImode);
3520 st_tmp_1 = gen_reg_rtx(DImode);
3521 st_tmp_2 = gen_reg_rtx(DImode);
3524 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3526 st_addr_2 = change_address (dmem, DImode,
3527 gen_rtx_AND (DImode,
3528 plus_constant (dmema, words*8 - 1),
3530 set_mem_alias_set (st_addr_2, 0);
3532 st_addr_1 = change_address (dmem, DImode,
3533 gen_rtx_AND (DImode, dmema, im8));
3534 set_mem_alias_set (st_addr_1, 0);
3536 /* Load up the destination end bits. */
3537 emit_move_insn (st_tmp_2, st_addr_2);
3538 emit_move_insn (st_tmp_1, st_addr_1);
3540 /* Shift the input data into place. */
3541 dreg = copy_addr_to_reg (dmema);
3542 if (data_regs != NULL)
3544 for (i = words-1; i >= 0; --i)
3546 emit_insn (gen_insqh (ins_tmps[i], data_regs[i], dreg));
3547 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
3549 for (i = words-1; i > 0; --i)
3551 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3552 ins_tmps[i-1], ins_tmps[i-1], 1,
3557 /* Split and merge the ends with the destination data. */
3558 emit_insn (gen_mskqh (st_tmp_2, st_tmp_2, dreg));
3559 emit_insn (gen_mskql (st_tmp_1, st_tmp_1, dreg));
3561 if (data_regs != NULL)
3563 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3564 st_tmp_2, 1, OPTAB_WIDEN);
3565 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3566 st_tmp_1, 1, OPTAB_WIDEN);
3570 emit_move_insn (st_addr_2, st_tmp_2);
3571 for (i = words-1; i > 0; --i)
3573 rtx tmp = change_address (dmem, DImode,
3574 gen_rtx_AND (DImode,
3575 plus_constant (dmema, i*8),
3577 set_mem_alias_set (tmp, 0);
3578 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3580 emit_move_insn (st_addr_1, st_tmp_1);
3584 /* Expand string/block move operations.
3586 operands[0] is the pointer to the destination.
3587 operands[1] is the pointer to the source.
3588 operands[2] is the number of bytes to move.
3589 operands[3] is the alignment. */
3592 alpha_expand_block_move (rtx operands[])
3594 rtx bytes_rtx = operands[2];
3595 rtx align_rtx = operands[3];
3596 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3597 HOST_WIDE_INT bytes = orig_bytes;
3598 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3599 HOST_WIDE_INT dst_align = src_align;
3600 rtx orig_src = operands[1];
3601 rtx orig_dst = operands[0];
3602 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3604 unsigned int i, words, ofs, nregs = 0;
3606 if (orig_bytes <= 0)
3608 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3611 /* Look for additional alignment information from recorded register info. */
3613 tmp = XEXP (orig_src, 0);
3615 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3616 else if (GET_CODE (tmp) == PLUS
3617 && REG_P (XEXP (tmp, 0))
3618 && CONST_INT_P (XEXP (tmp, 1)))
3620 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3621 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3625 if (a >= 64 && c % 8 == 0)
3627 else if (a >= 32 && c % 4 == 0)
3629 else if (a >= 16 && c % 2 == 0)
3634 tmp = XEXP (orig_dst, 0);
3636 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3637 else if (GET_CODE (tmp) == PLUS
3638 && REG_P (XEXP (tmp, 0))
3639 && CONST_INT_P (XEXP (tmp, 1)))
3641 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3642 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3646 if (a >= 64 && c % 8 == 0)
3648 else if (a >= 32 && c % 4 == 0)
3650 else if (a >= 16 && c % 2 == 0)
3656 if (src_align >= 64 && bytes >= 8)
3660 for (i = 0; i < words; ++i)
3661 data_regs[nregs + i] = gen_reg_rtx (DImode);
3663 for (i = 0; i < words; ++i)
3664 emit_move_insn (data_regs[nregs + i],
3665 adjust_address (orig_src, DImode, ofs + i * 8));
3672 if (src_align >= 32 && bytes >= 4)
3676 for (i = 0; i < words; ++i)
3677 data_regs[nregs + i] = gen_reg_rtx (SImode);
3679 for (i = 0; i < words; ++i)
3680 emit_move_insn (data_regs[nregs + i],
3681 adjust_address (orig_src, SImode, ofs + i * 4));
3692 for (i = 0; i < words+1; ++i)
3693 data_regs[nregs + i] = gen_reg_rtx (DImode);
3695 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3703 if (! TARGET_BWX && bytes >= 4)
3705 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3706 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3713 if (src_align >= 16)
3716 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3717 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3720 } while (bytes >= 2);
3722 else if (! TARGET_BWX)
3724 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3725 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3733 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3734 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3739 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3741 /* Now save it back out again. */
3745 /* Write out the data in whatever chunks reading the source allowed. */
3746 if (dst_align >= 64)
3748 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3750 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3757 if (dst_align >= 32)
3759 /* If the source has remaining DImode regs, write them out in
3761 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3763 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3764 NULL_RTX, 1, OPTAB_WIDEN);
3766 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3767 gen_lowpart (SImode, data_regs[i]));
3768 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3769 gen_lowpart (SImode, tmp));
3774 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3776 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3783 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3785 /* Write out a remaining block of words using unaligned methods. */
3787 for (words = 1; i + words < nregs; words++)
3788 if (GET_MODE (data_regs[i + words]) != DImode)
3792 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3794 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3801 /* Due to the above, this won't be aligned. */
3802 /* ??? If we have more than one of these, consider constructing full
3803 words in registers and using alpha_expand_unaligned_store_words. */
3804 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3806 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3811 if (dst_align >= 16)
3812 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3814 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
3819 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3821 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
3826 /* The remainder must be byte copies. */
3829 gcc_assert (GET_MODE (data_regs[i]) == QImode);
3830 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
3839 alpha_expand_block_clear (rtx operands[])
3841 rtx bytes_rtx = operands[1];
3842 rtx align_rtx = operands[3];
3843 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3844 HOST_WIDE_INT bytes = orig_bytes;
3845 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
3846 HOST_WIDE_INT alignofs = 0;
3847 rtx orig_dst = operands[0];
3849 int i, words, ofs = 0;
3851 if (orig_bytes <= 0)
3853 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3856 /* Look for stricter alignment. */
3857 tmp = XEXP (orig_dst, 0);
3859 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3860 else if (GET_CODE (tmp) == PLUS
3861 && REG_P (XEXP (tmp, 0))
3862 && CONST_INT_P (XEXP (tmp, 1)))
3864 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3865 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3870 align = a, alignofs = 8 - c % 8;
3872 align = a, alignofs = 4 - c % 4;
3874 align = a, alignofs = 2 - c % 2;
3878 /* Handle an unaligned prefix first. */
3882 #if HOST_BITS_PER_WIDE_INT >= 64
3883 /* Given that alignofs is bounded by align, the only time BWX could
3884 generate three stores is for a 7 byte fill. Prefer two individual
3885 stores over a load/mask/store sequence. */
3886 if ((!TARGET_BWX || alignofs == 7)
3888 && !(alignofs == 4 && bytes >= 4))
3890 enum machine_mode mode = (align >= 64 ? DImode : SImode);
3891 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
3895 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
3896 set_mem_alias_set (mem, 0);
3898 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
3899 if (bytes < alignofs)
3901 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
3912 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
3913 NULL_RTX, 1, OPTAB_WIDEN);
3915 emit_move_insn (mem, tmp);
3919 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
3921 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
3926 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
3928 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
3933 if (alignofs == 4 && bytes >= 4)
3935 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
3941 /* If we've not used the extra lead alignment information by now,
3942 we won't be able to. Downgrade align to match what's left over. */
3945 alignofs = alignofs & -alignofs;
3946 align = MIN (align, alignofs * BITS_PER_UNIT);
3950 /* Handle a block of contiguous long-words. */
3952 if (align >= 64 && bytes >= 8)
3956 for (i = 0; i < words; ++i)
3957 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
3964 /* If the block is large and appropriately aligned, emit a single
3965 store followed by a sequence of stq_u insns. */
3967 if (align >= 32 && bytes > 16)
3971 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
3975 orig_dsta = XEXP (orig_dst, 0);
3976 if (GET_CODE (orig_dsta) == LO_SUM)
3977 orig_dsta = force_reg (Pmode, orig_dsta);
3980 for (i = 0; i < words; ++i)
3983 = change_address (orig_dst, DImode,
3984 gen_rtx_AND (DImode,
3985 plus_constant (orig_dsta, ofs + i*8),
3987 set_mem_alias_set (mem, 0);
3988 emit_move_insn (mem, const0_rtx);
3991 /* Depending on the alignment, the first stq_u may have overlapped
3992 with the initial stl, which means that the last stq_u didn't
3993 write as much as it would appear. Leave those questionable bytes
3995 bytes -= words * 8 - 4;
3996 ofs += words * 8 - 4;
3999 /* Handle a smaller block of aligned words. */
4001 if ((align >= 64 && bytes == 4)
4002 || (align == 32 && bytes >= 4))
4006 for (i = 0; i < words; ++i)
4007 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4014 /* An unaligned block uses stq_u stores for as many as possible. */
4020 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4026 /* Next clean up any trailing pieces. */
4028 #if HOST_BITS_PER_WIDE_INT >= 64
4029 /* Count the number of bits in BYTES for which aligned stores could
4032 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4036 /* If we have appropriate alignment (and it wouldn't take too many
4037 instructions otherwise), mask out the bytes we need. */
4038 if (TARGET_BWX ? words > 2 : bytes > 0)
4045 mem = adjust_address (orig_dst, DImode, ofs);
4046 set_mem_alias_set (mem, 0);
4048 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4050 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4051 NULL_RTX, 1, OPTAB_WIDEN);
4053 emit_move_insn (mem, tmp);
4056 else if (align >= 32 && bytes < 4)
4061 mem = adjust_address (orig_dst, SImode, ofs);
4062 set_mem_alias_set (mem, 0);
4064 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4066 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4067 NULL_RTX, 1, OPTAB_WIDEN);
4069 emit_move_insn (mem, tmp);
4075 if (!TARGET_BWX && bytes >= 4)
4077 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4087 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4091 } while (bytes >= 2);
4093 else if (! TARGET_BWX)
4095 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4103 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4111 /* Returns a mask so that zap(x, value) == x & mask. */
4114 alpha_expand_zap_mask (HOST_WIDE_INT value)
4119 if (HOST_BITS_PER_WIDE_INT >= 64)
4121 HOST_WIDE_INT mask = 0;
4123 for (i = 7; i >= 0; --i)
4126 if (!((value >> i) & 1))
4130 result = gen_int_mode (mask, DImode);
4134 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4136 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4138 for (i = 7; i >= 4; --i)
4141 if (!((value >> i) & 1))
4145 for (i = 3; i >= 0; --i)
4148 if (!((value >> i) & 1))
4152 result = immed_double_const (mask_lo, mask_hi, DImode);
4159 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4160 enum machine_mode mode,
4161 rtx op0, rtx op1, rtx op2)
4163 op0 = gen_lowpart (mode, op0);
4165 if (op1 == const0_rtx)
4166 op1 = CONST0_RTX (mode);
4168 op1 = gen_lowpart (mode, op1);
4170 if (op2 == const0_rtx)
4171 op2 = CONST0_RTX (mode);
4173 op2 = gen_lowpart (mode, op2);
4175 emit_insn ((*gen) (op0, op1, op2));
4178 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4179 COND is true. Mark the jump as unlikely to be taken. */
4182 emit_unlikely_jump (rtx cond, rtx label)
4184 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4187 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4188 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4189 add_reg_note (x, REG_BR_PROB, very_unlikely);
4192 /* A subroutine of the atomic operation splitters. Emit a load-locked
4193 instruction in MODE. */
4196 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4198 rtx (*fn) (rtx, rtx) = NULL;
4200 fn = gen_load_locked_si;
4201 else if (mode == DImode)
4202 fn = gen_load_locked_di;
4203 emit_insn (fn (reg, mem));
4206 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4207 instruction in MODE. */
4210 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4212 rtx (*fn) (rtx, rtx, rtx) = NULL;
4214 fn = gen_store_conditional_si;
4215 else if (mode == DImode)
4216 fn = gen_store_conditional_di;
4217 emit_insn (fn (res, mem, val));
4220 /* Subroutines of the atomic operation splitters. Emit barriers
4221 as needed for the memory MODEL. */
4224 alpha_pre_atomic_barrier (enum memmodel model)
4228 case MEMMODEL_RELAXED:
4229 case MEMMODEL_CONSUME:
4230 case MEMMODEL_ACQUIRE:
4232 case MEMMODEL_RELEASE:
4233 case MEMMODEL_ACQ_REL:
4234 case MEMMODEL_SEQ_CST:
4235 emit_insn (gen_memory_barrier ());
4243 alpha_post_atomic_barrier (enum memmodel model)
4247 case MEMMODEL_RELAXED:
4248 case MEMMODEL_CONSUME:
4249 case MEMMODEL_RELEASE:
4251 case MEMMODEL_ACQUIRE:
4252 case MEMMODEL_ACQ_REL:
4253 case MEMMODEL_SEQ_CST:
4254 emit_insn (gen_memory_barrier ());
4261 /* A subroutine of the atomic operation splitters. Emit an insxl
4262 instruction in MODE. */
4265 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4267 rtx ret = gen_reg_rtx (DImode);
4268 rtx (*fn) (rtx, rtx, rtx);
4288 op1 = force_reg (mode, op1);
4289 emit_insn (fn (ret, op1, op2));
4294 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4295 to perform. MEM is the memory on which to operate. VAL is the second
4296 operand of the binary operator. BEFORE and AFTER are optional locations to
4297 return the value of MEM either before of after the operation. SCRATCH is
4298 a scratch register. */
4301 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val, rtx before,
4302 rtx after, rtx scratch, enum memmodel model)
4304 enum machine_mode mode = GET_MODE (mem);
4305 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4307 alpha_pre_atomic_barrier (model);
4309 label = gen_label_rtx ();
4311 label = gen_rtx_LABEL_REF (DImode, label);
4315 emit_load_locked (mode, before, mem);
4319 x = gen_rtx_AND (mode, before, val);
4320 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4322 x = gen_rtx_NOT (mode, val);
4325 x = gen_rtx_fmt_ee (code, mode, before, val);
4327 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4328 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4330 emit_store_conditional (mode, cond, mem, scratch);
4332 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4333 emit_unlikely_jump (x, label);
4335 alpha_post_atomic_barrier (model);
4338 /* Expand a compare and swap operation. */
4341 alpha_split_compare_and_swap (rtx operands[])
4343 rtx cond, retval, mem, oldval, newval;
4345 enum memmodel mod_s, mod_f;
4346 enum machine_mode mode;
4347 rtx label1, label2, x;
4350 retval = operands[1];
4352 oldval = operands[3];
4353 newval = operands[4];
4354 is_weak = (operands[5] != const0_rtx);
4355 mod_s = (enum memmodel) INTVAL (operands[6]);
4356 mod_f = (enum memmodel) INTVAL (operands[7]);
4357 mode = GET_MODE (mem);
4359 alpha_pre_atomic_barrier (mod_s);
4364 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4365 emit_label (XEXP (label1, 0));
4367 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4369 emit_load_locked (mode, retval, mem);
4371 x = gen_lowpart (DImode, retval);
4372 if (oldval == const0_rtx)
4374 emit_move_insn (cond, const0_rtx);
4375 x = gen_rtx_NE (DImode, x, const0_rtx);
4379 x = gen_rtx_EQ (DImode, x, oldval);
4380 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4381 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4383 emit_unlikely_jump (x, label2);
4385 emit_move_insn (cond, newval);
4386 emit_store_conditional (mode, cond, mem, gen_lowpart (mode, cond));
4390 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4391 emit_unlikely_jump (x, label1);
4394 if (mod_f != MEMMODEL_RELAXED)
4395 emit_label (XEXP (label2, 0));
4397 alpha_post_atomic_barrier (mod_s);
4399 if (mod_f == MEMMODEL_RELAXED)
4400 emit_label (XEXP (label2, 0));
4404 alpha_expand_compare_and_swap_12 (rtx operands[])
4406 rtx cond, dst, mem, oldval, newval, is_weak, mod_s, mod_f;
4407 enum machine_mode mode;
4408 rtx addr, align, wdst;
4409 rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
4414 oldval = operands[3];
4415 newval = operands[4];
4416 is_weak = operands[5];
4417 mod_s = operands[6];
4418 mod_f = operands[7];
4419 mode = GET_MODE (mem);
4421 /* We forced the address into a register via mem_noofs_operand. */
4422 addr = XEXP (mem, 0);
4423 gcc_assert (register_operand (addr, DImode));
4425 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4426 NULL_RTX, 1, OPTAB_DIRECT);
4428 oldval = convert_modes (DImode, mode, oldval, 1);
4430 if (newval != const0_rtx)
4431 newval = emit_insxl (mode, newval, addr);
4433 wdst = gen_reg_rtx (DImode);
4435 gen = gen_atomic_compare_and_swapqi_1;
4437 gen = gen_atomic_compare_and_swaphi_1;
4438 emit_insn (gen (cond, wdst, mem, oldval, newval, align,
4439 is_weak, mod_s, mod_f));
4441 emit_move_insn (dst, gen_lowpart (mode, wdst));
4445 alpha_split_compare_and_swap_12 (rtx operands[])
4447 rtx cond, dest, orig_mem, oldval, newval, align, scratch;
4448 enum machine_mode mode;
4450 enum memmodel mod_s, mod_f;
4451 rtx label1, label2, mem, addr, width, mask, x;
4455 orig_mem = operands[2];
4456 oldval = operands[3];
4457 newval = operands[4];
4458 align = operands[5];
4459 is_weak = (operands[6] != const0_rtx);
4460 mod_s = (enum memmodel) INTVAL (operands[7]);
4461 mod_f = (enum memmodel) INTVAL (operands[8]);
4462 scratch = operands[9];
4463 mode = GET_MODE (orig_mem);
4464 addr = XEXP (orig_mem, 0);
4466 mem = gen_rtx_MEM (DImode, align);
4467 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4468 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4469 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4471 alpha_pre_atomic_barrier (mod_s);
4476 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4477 emit_label (XEXP (label1, 0));
4479 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4481 emit_load_locked (DImode, scratch, mem);
4483 width = GEN_INT (GET_MODE_BITSIZE (mode));
4484 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4485 emit_insn (gen_extxl (dest, scratch, width, addr));
4487 if (oldval == const0_rtx)
4489 emit_move_insn (cond, const0_rtx);
4490 x = gen_rtx_NE (DImode, dest, const0_rtx);
4494 x = gen_rtx_EQ (DImode, dest, oldval);
4495 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4496 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4498 emit_unlikely_jump (x, label2);
4500 emit_insn (gen_mskxl (cond, scratch, mask, addr));
4502 if (newval != const0_rtx)
4503 emit_insn (gen_iordi3 (cond, cond, newval));
4505 emit_store_conditional (DImode, cond, mem, cond);
4509 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4510 emit_unlikely_jump (x, label1);
4513 if (mod_f != MEMMODEL_RELAXED)
4514 emit_label (XEXP (label2, 0));
4516 alpha_post_atomic_barrier (mod_s);
4518 if (mod_f == MEMMODEL_RELAXED)
4519 emit_label (XEXP (label2, 0));
4522 /* Expand an atomic exchange operation. */
4525 alpha_split_atomic_exchange (rtx operands[])
4527 rtx retval, mem, val, scratch;
4528 enum memmodel model;
4529 enum machine_mode mode;
4532 retval = operands[0];
4535 model = (enum memmodel) INTVAL (operands[3]);
4536 scratch = operands[4];
4537 mode = GET_MODE (mem);
4538 cond = gen_lowpart (DImode, scratch);
4540 alpha_pre_atomic_barrier (model);
4542 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4543 emit_label (XEXP (label, 0));
4545 emit_load_locked (mode, retval, mem);
4546 emit_move_insn (scratch, val);
4547 emit_store_conditional (mode, cond, mem, scratch);
4549 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4550 emit_unlikely_jump (x, label);
4552 alpha_post_atomic_barrier (model);
4556 alpha_expand_atomic_exchange_12 (rtx operands[])
4558 rtx dst, mem, val, model;
4559 enum machine_mode mode;
4560 rtx addr, align, wdst;
4561 rtx (*gen) (rtx, rtx, rtx, rtx, rtx);
4566 model = operands[3];
4567 mode = GET_MODE (mem);
4569 /* We forced the address into a register via mem_noofs_operand. */
4570 addr = XEXP (mem, 0);
4571 gcc_assert (register_operand (addr, DImode));
4573 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4574 NULL_RTX, 1, OPTAB_DIRECT);
4576 /* Insert val into the correct byte location within the word. */
4577 if (val != const0_rtx)
4578 val = emit_insxl (mode, val, addr);
4580 wdst = gen_reg_rtx (DImode);
4582 gen = gen_atomic_exchangeqi_1;
4584 gen = gen_atomic_exchangehi_1;
4585 emit_insn (gen (wdst, mem, val, align, model));
4587 emit_move_insn (dst, gen_lowpart (mode, wdst));
4591 alpha_split_atomic_exchange_12 (rtx operands[])
4593 rtx dest, orig_mem, addr, val, align, scratch;
4594 rtx label, mem, width, mask, x;
4595 enum machine_mode mode;
4596 enum memmodel model;
4599 orig_mem = operands[1];
4601 align = operands[3];
4602 model = (enum memmodel) INTVAL (operands[4]);
4603 scratch = operands[5];
4604 mode = GET_MODE (orig_mem);
4605 addr = XEXP (orig_mem, 0);
4607 mem = gen_rtx_MEM (DImode, align);
4608 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4609 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4610 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4612 alpha_pre_atomic_barrier (model);
4614 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4615 emit_label (XEXP (label, 0));
4617 emit_load_locked (DImode, scratch, mem);
4619 width = GEN_INT (GET_MODE_BITSIZE (mode));
4620 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4621 emit_insn (gen_extxl (dest, scratch, width, addr));
4622 emit_insn (gen_mskxl (scratch, scratch, mask, addr));
4623 if (val != const0_rtx)
4624 emit_insn (gen_iordi3 (scratch, scratch, val));
4626 emit_store_conditional (DImode, scratch, mem, scratch);
4628 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4629 emit_unlikely_jump (x, label);
4631 alpha_post_atomic_barrier (model);
4634 /* Adjust the cost of a scheduling dependency. Return the new cost of
4635 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4638 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4640 enum attr_type dep_insn_type;
4642 /* If the dependence is an anti-dependence, there is no cost. For an
4643 output dependence, there is sometimes a cost, but it doesn't seem
4644 worth handling those few cases. */
4645 if (REG_NOTE_KIND (link) != 0)
4648 /* If we can't recognize the insns, we can't really do anything. */
4649 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4652 dep_insn_type = get_attr_type (dep_insn);
4654 /* Bring in the user-defined memory latency. */
4655 if (dep_insn_type == TYPE_ILD
4656 || dep_insn_type == TYPE_FLD
4657 || dep_insn_type == TYPE_LDSYM)
4658 cost += alpha_memory_latency-1;
4660 /* Everything else handled in DFA bypasses now. */
4665 /* The number of instructions that can be issued per cycle. */
4668 alpha_issue_rate (void)
4670 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4673 /* How many alternative schedules to try. This should be as wide as the
4674 scheduling freedom in the DFA, but no wider. Making this value too
4675 large results extra work for the scheduler.
4677 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4678 alternative schedules. For EV5, we can choose between E0/E1 and
4679 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4682 alpha_multipass_dfa_lookahead (void)
4684 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4687 /* Machine-specific function data. */
4689 struct GTY(()) alpha_links;
4691 struct GTY(()) machine_function
4694 const char *some_ld_name;
4696 /* For TARGET_LD_BUGGY_LDGP. */
4699 /* For VMS condition handlers. */
4700 bool uses_condition_handler;
4702 /* Linkage entries. */
4703 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
4707 /* How to allocate a 'struct machine_function'. */
4709 static struct machine_function *
4710 alpha_init_machine_status (void)
4712 return ggc_alloc_cleared_machine_function ();
4715 /* Support for frame based VMS condition handlers. */
4717 /* A VMS condition handler may be established for a function with a call to
4718 __builtin_establish_vms_condition_handler, and cancelled with a call to
4719 __builtin_revert_vms_condition_handler.
4721 The VMS Condition Handling Facility knows about the existence of a handler
4722 from the procedure descriptor .handler field. As the VMS native compilers,
4723 we store the user specified handler's address at a fixed location in the
4724 stack frame and point the procedure descriptor at a common wrapper which
4725 fetches the real handler's address and issues an indirect call.
4727 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4729 We force the procedure kind to PT_STACK, and the fixed frame location is
4730 fp+8, just before the register save area. We use the handler_data field in
4731 the procedure descriptor to state the fp offset at which the installed
4732 handler address can be found. */
4734 #define VMS_COND_HANDLER_FP_OFFSET 8
4736 /* Expand code to store the currently installed user VMS condition handler
4737 into TARGET and install HANDLER as the new condition handler. */
4740 alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4742 rtx handler_slot_address
4743 = plus_constant (hard_frame_pointer_rtx, VMS_COND_HANDLER_FP_OFFSET);
4746 = gen_rtx_MEM (DImode, handler_slot_address);
4748 emit_move_insn (target, handler_slot);
4749 emit_move_insn (handler_slot, handler);
4751 /* Notify the start/prologue/epilogue emitters that the condition handler
4752 slot is needed. In addition to reserving the slot space, this will force
4753 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4754 use above is correct. */
4755 cfun->machine->uses_condition_handler = true;
4758 /* Expand code to store the current VMS condition handler into TARGET and
4762 alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4764 /* We implement this by establishing a null condition handler, with the tiny
4765 side effect of setting uses_condition_handler. This is a little bit
4766 pessimistic if no actual builtin_establish call is ever issued, which is
4767 not a real problem and expected never to happen anyway. */
4769 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4772 /* Functions to save and restore alpha_return_addr_rtx. */
4774 /* Start the ball rolling with RETURN_ADDR_RTX. */
4777 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4782 return get_hard_reg_initial_val (Pmode, REG_RA);
4785 /* Return or create a memory slot containing the gp value for the current
4786 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4789 alpha_gp_save_rtx (void)
4791 rtx seq, m = cfun->machine->gp_save_rtx;
4797 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4798 m = validize_mem (m);
4799 emit_move_insn (m, pic_offset_table_rtx);
4804 /* We used to simply emit the sequence after entry_of_function.
4805 However this breaks the CFG if the first instruction in the
4806 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4807 label. Emit the sequence properly on the edge. We are only
4808 invoked from dw2_build_landing_pads and finish_eh_generation
4809 will call commit_edge_insertions thanks to a kludge. */
4810 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4812 cfun->machine->gp_save_rtx = m;
4819 alpha_instantiate_decls (void)
4821 if (cfun->machine->gp_save_rtx != NULL_RTX)
4822 instantiate_decl_rtl (cfun->machine->gp_save_rtx);
4826 alpha_ra_ever_killed (void)
4830 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4831 return (int)df_regs_ever_live_p (REG_RA);
4833 push_topmost_sequence ();
4835 pop_topmost_sequence ();
4837 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4841 /* Return the trap mode suffix applicable to the current
4842 instruction, or NULL. */
4845 get_trap_mode_suffix (void)
4847 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4851 case TRAP_SUFFIX_NONE:
4854 case TRAP_SUFFIX_SU:
4855 if (alpha_fptm >= ALPHA_FPTM_SU)
4859 case TRAP_SUFFIX_SUI:
4860 if (alpha_fptm >= ALPHA_FPTM_SUI)
4864 case TRAP_SUFFIX_V_SV:
4872 case ALPHA_FPTM_SUI:
4878 case TRAP_SUFFIX_V_SV_SVI:
4887 case ALPHA_FPTM_SUI:
4894 case TRAP_SUFFIX_U_SU_SUI:
4903 case ALPHA_FPTM_SUI:
4916 /* Return the rounding mode suffix applicable to the current
4917 instruction, or NULL. */
4920 get_round_mode_suffix (void)
4922 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4926 case ROUND_SUFFIX_NONE:
4928 case ROUND_SUFFIX_NORMAL:
4931 case ALPHA_FPRM_NORM:
4933 case ALPHA_FPRM_MINF:
4935 case ALPHA_FPRM_CHOP:
4937 case ALPHA_FPRM_DYN:
4944 case ROUND_SUFFIX_C:
4953 /* Locate some local-dynamic symbol still in use by this function
4954 so that we can print its name in some movdi_er_tlsldm pattern. */
4957 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4961 if (GET_CODE (x) == SYMBOL_REF
4962 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4964 cfun->machine->some_ld_name = XSTR (x, 0);
4972 get_some_local_dynamic_name (void)
4976 if (cfun->machine->some_ld_name)
4977 return cfun->machine->some_ld_name;
4979 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4981 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4982 return cfun->machine->some_ld_name;
4987 /* Print an operand. Recognize special options, documented below. */
4990 print_operand (FILE *file, rtx x, int code)
4997 /* Print the assembler name of the current function. */
4998 assemble_name (file, alpha_fnname);
5002 assemble_name (file, get_some_local_dynamic_name ());
5007 const char *trap = get_trap_mode_suffix ();
5008 const char *round = get_round_mode_suffix ();
5011 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5012 (trap ? trap : ""), (round ? round : ""));
5017 /* Generates single precision instruction suffix. */
5018 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5022 /* Generates double precision instruction suffix. */
5023 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5027 if (alpha_this_literal_sequence_number == 0)
5028 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5029 fprintf (file, "%d", alpha_this_literal_sequence_number);
5033 if (alpha_this_gpdisp_sequence_number == 0)
5034 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5035 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5039 if (GET_CODE (x) == HIGH)
5040 output_addr_const (file, XEXP (x, 0));
5042 output_operand_lossage ("invalid %%H value");
5049 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5051 x = XVECEXP (x, 0, 0);
5052 lituse = "lituse_tlsgd";
5054 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5056 x = XVECEXP (x, 0, 0);
5057 lituse = "lituse_tlsldm";
5059 else if (CONST_INT_P (x))
5060 lituse = "lituse_jsr";
5063 output_operand_lossage ("invalid %%J value");
5067 if (x != const0_rtx)
5068 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5076 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5077 lituse = "lituse_jsrdirect";
5079 lituse = "lituse_jsr";
5082 gcc_assert (INTVAL (x) != 0);
5083 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5087 /* If this operand is the constant zero, write it as "$31". */
5089 fprintf (file, "%s", reg_names[REGNO (x)]);
5090 else if (x == CONST0_RTX (GET_MODE (x)))
5091 fprintf (file, "$31");
5093 output_operand_lossage ("invalid %%r value");
5097 /* Similar, but for floating-point. */
5099 fprintf (file, "%s", reg_names[REGNO (x)]);
5100 else if (x == CONST0_RTX (GET_MODE (x)))
5101 fprintf (file, "$f31");
5103 output_operand_lossage ("invalid %%R value");
5107 /* Write the 1's complement of a constant. */
5108 if (!CONST_INT_P (x))
5109 output_operand_lossage ("invalid %%N value");
5111 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5115 /* Write 1 << C, for a constant C. */
5116 if (!CONST_INT_P (x))
5117 output_operand_lossage ("invalid %%P value");
5119 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5123 /* Write the high-order 16 bits of a constant, sign-extended. */
5124 if (!CONST_INT_P (x))
5125 output_operand_lossage ("invalid %%h value");
5127 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5131 /* Write the low-order 16 bits of a constant, sign-extended. */
5132 if (!CONST_INT_P (x))
5133 output_operand_lossage ("invalid %%L value");
5135 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5136 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5140 /* Write mask for ZAP insn. */
5141 if (GET_CODE (x) == CONST_DOUBLE)
5143 HOST_WIDE_INT mask = 0;
5144 HOST_WIDE_INT value;
5146 value = CONST_DOUBLE_LOW (x);
5147 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5152 value = CONST_DOUBLE_HIGH (x);
5153 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5156 mask |= (1 << (i + sizeof (int)));
5158 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5161 else if (CONST_INT_P (x))
5163 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5165 for (i = 0; i < 8; i++, value >>= 8)
5169 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5172 output_operand_lossage ("invalid %%m value");
5176 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5177 if (!CONST_INT_P (x)
5178 || (INTVAL (x) != 8 && INTVAL (x) != 16
5179 && INTVAL (x) != 32 && INTVAL (x) != 64))
5180 output_operand_lossage ("invalid %%M value");
5182 fprintf (file, "%s",
5183 (INTVAL (x) == 8 ? "b"
5184 : INTVAL (x) == 16 ? "w"
5185 : INTVAL (x) == 32 ? "l"
5190 /* Similar, except do it from the mask. */
5191 if (CONST_INT_P (x))
5193 HOST_WIDE_INT value = INTVAL (x);
5200 if (value == 0xffff)
5205 if (value == 0xffffffff)
5216 else if (HOST_BITS_PER_WIDE_INT == 32
5217 && GET_CODE (x) == CONST_DOUBLE
5218 && CONST_DOUBLE_LOW (x) == 0xffffffff
5219 && CONST_DOUBLE_HIGH (x) == 0)
5224 output_operand_lossage ("invalid %%U value");
5228 /* Write the constant value divided by 8. */
5229 if (!CONST_INT_P (x)
5230 || (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5231 || (INTVAL (x) & 7) != 0)
5232 output_operand_lossage ("invalid %%s value");
5234 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
5238 /* Same, except compute (64 - c) / 8 */
5240 if (!CONST_INT_P (x)
5241 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5242 && (INTVAL (x) & 7) != 8)
5243 output_operand_lossage ("invalid %%s value");
5245 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5248 case 'C': case 'D': case 'c': case 'd':
5249 /* Write out comparison name. */
5251 enum rtx_code c = GET_CODE (x);
5253 if (!COMPARISON_P (x))
5254 output_operand_lossage ("invalid %%C value");
5256 else if (code == 'D')
5257 c = reverse_condition (c);
5258 else if (code == 'c')
5259 c = swap_condition (c);
5260 else if (code == 'd')
5261 c = swap_condition (reverse_condition (c));
5264 fprintf (file, "ule");
5266 fprintf (file, "ult");
5267 else if (c == UNORDERED)
5268 fprintf (file, "un");
5270 fprintf (file, "%s", GET_RTX_NAME (c));
5275 /* Write the divide or modulus operator. */
5276 switch (GET_CODE (x))
5279 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5282 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5285 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5288 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5291 output_operand_lossage ("invalid %%E value");
5297 /* Write "_u" for unaligned access. */
5298 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5299 fprintf (file, "_u");
5304 fprintf (file, "%s", reg_names[REGNO (x)]);
5306 output_address (XEXP (x, 0));
5307 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5309 switch (XINT (XEXP (x, 0), 1))
5313 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5316 output_operand_lossage ("unknown relocation unspec");
5321 output_addr_const (file, x);
5325 output_operand_lossage ("invalid %%xn code");
5330 print_operand_address (FILE *file, rtx addr)
5333 HOST_WIDE_INT offset = 0;
5335 if (GET_CODE (addr) == AND)
5336 addr = XEXP (addr, 0);
5338 if (GET_CODE (addr) == PLUS
5339 && CONST_INT_P (XEXP (addr, 1)))
5341 offset = INTVAL (XEXP (addr, 1));
5342 addr = XEXP (addr, 0);
5345 if (GET_CODE (addr) == LO_SUM)
5347 const char *reloc16, *reloclo;
5348 rtx op1 = XEXP (addr, 1);
5350 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5352 op1 = XEXP (op1, 0);
5353 switch (XINT (op1, 1))
5357 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5361 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5364 output_operand_lossage ("unknown relocation unspec");
5368 output_addr_const (file, XVECEXP (op1, 0, 0));
5373 reloclo = "gprellow";
5374 output_addr_const (file, op1);
5378 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5380 addr = XEXP (addr, 0);
5381 switch (GET_CODE (addr))
5384 basereg = REGNO (addr);
5388 basereg = subreg_regno (addr);
5395 fprintf (file, "($%d)\t\t!%s", basereg,
5396 (basereg == 29 ? reloc16 : reloclo));
5400 switch (GET_CODE (addr))
5403 basereg = REGNO (addr);
5407 basereg = subreg_regno (addr);
5411 offset = INTVAL (addr);
5414 #if TARGET_ABI_OPEN_VMS
5416 fprintf (file, "%s", XSTR (addr, 0));
5420 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5421 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5422 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5423 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5424 INTVAL (XEXP (XEXP (addr, 0), 1)));
5432 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5435 /* Emit RTL insns to initialize the variable parts of a trampoline at
5436 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5437 for the static chain value for the function. */
5440 alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5442 rtx fnaddr, mem, word1, word2;
5444 fnaddr = XEXP (DECL_RTL (fndecl), 0);
5446 #ifdef POINTERS_EXTEND_UNSIGNED
5447 fnaddr = convert_memory_address (Pmode, fnaddr);
5448 chain_value = convert_memory_address (Pmode, chain_value);
5451 if (TARGET_ABI_OPEN_VMS)
5456 /* Construct the name of the trampoline entry point. */
5457 fnname = XSTR (fnaddr, 0);
5458 trname = (char *) alloca (strlen (fnname) + 5);
5459 strcpy (trname, fnname);
5460 strcat (trname, "..tr");
5461 fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5462 word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
5464 /* Trampoline (or "bounded") procedure descriptor is constructed from
5465 the function's procedure descriptor with certain fields zeroed IAW
5466 the VMS calling standard. This is stored in the first quadword. */
5467 word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
5468 word1 = expand_and (DImode, word1, GEN_INT (0xffff0fff0000fff0), NULL);
5472 /* These 4 instructions are:
5477 We don't bother setting the HINT field of the jump; the nop
5478 is merely there for padding. */
5479 word1 = GEN_INT (0xa77b0010a43b0018);
5480 word2 = GEN_INT (0x47ff041f6bfb0000);
5483 /* Store the first two words, as computed above. */
5484 mem = adjust_address (m_tramp, DImode, 0);
5485 emit_move_insn (mem, word1);
5486 mem = adjust_address (m_tramp, DImode, 8);
5487 emit_move_insn (mem, word2);
5489 /* Store function address and static chain value. */
5490 mem = adjust_address (m_tramp, Pmode, 16);
5491 emit_move_insn (mem, fnaddr);
5492 mem = adjust_address (m_tramp, Pmode, 24);
5493 emit_move_insn (mem, chain_value);
5497 emit_insn (gen_imb ());
5498 #ifdef HAVE_ENABLE_EXECUTE_STACK
5499 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5500 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
5505 /* Determine where to put an argument to a function.
5506 Value is zero to push the argument on the stack,
5507 or a hard register in which to store the argument.
5509 MODE is the argument's machine mode.
5510 TYPE is the data type of the argument (as a tree).
5511 This is null for libcalls where that information may
5513 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5514 the preceding args and about the function being called.
5515 NAMED is nonzero if this argument is a named parameter
5516 (otherwise it is an extra parameter matching an ellipsis).
5518 On Alpha the first 6 words of args are normally in registers
5519 and the rest are pushed. */
5522 alpha_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
5523 const_tree type, bool named ATTRIBUTE_UNUSED)
5525 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5529 /* Don't get confused and pass small structures in FP registers. */
5530 if (type && AGGREGATE_TYPE_P (type))
5534 #ifdef ENABLE_CHECKING
5535 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5537 gcc_assert (!COMPLEX_MODE_P (mode));
5540 /* Set up defaults for FP operands passed in FP registers, and
5541 integral operands passed in integer registers. */
5542 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5548 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5549 the two platforms, so we can't avoid conditional compilation. */
5550 #if TARGET_ABI_OPEN_VMS
5552 if (mode == VOIDmode)
5553 return alpha_arg_info_reg_val (*cum);
5555 num_args = cum->num_args;
5557 || targetm.calls.must_pass_in_stack (mode, type))
5560 #elif TARGET_ABI_OSF
5566 /* VOID is passed as a special flag for "last argument". */
5567 if (type == void_type_node)
5569 else if (targetm.calls.must_pass_in_stack (mode, type))
5573 #error Unhandled ABI
5576 return gen_rtx_REG (mode, num_args + basereg);
5579 /* Update the data in CUM to advance over an argument
5580 of mode MODE and data type TYPE.
5581 (TYPE is null for libcalls where that information may not be available.) */
5584 alpha_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
5585 const_tree type, bool named ATTRIBUTE_UNUSED)
5587 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5588 bool onstack = targetm.calls.must_pass_in_stack (mode, type);
5589 int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type, named);
5594 if (!onstack && cum->num_args < 6)
5595 cum->atypes[cum->num_args] = alpha_arg_type (mode);
5596 cum->num_args += increment;
5601 alpha_arg_partial_bytes (cumulative_args_t cum_v,
5602 enum machine_mode mode ATTRIBUTE_UNUSED,
5603 tree type ATTRIBUTE_UNUSED,
5604 bool named ATTRIBUTE_UNUSED)
5607 CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED = get_cumulative_args (cum_v);
5609 #if TARGET_ABI_OPEN_VMS
5610 if (cum->num_args < 6
5611 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5612 words = 6 - cum->num_args;
5613 #elif TARGET_ABI_OSF
5614 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5617 #error Unhandled ABI
5620 return words * UNITS_PER_WORD;
5624 /* Return true if TYPE must be returned in memory, instead of in registers. */
5627 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5629 enum machine_mode mode = VOIDmode;
5634 mode = TYPE_MODE (type);
5636 /* All aggregates are returned in memory, except on OpenVMS where
5637 records that fit 64 bits should be returned by immediate value
5638 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5639 if (TARGET_ABI_OPEN_VMS
5640 && TREE_CODE (type) != ARRAY_TYPE
5641 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5644 if (AGGREGATE_TYPE_P (type))
5648 size = GET_MODE_SIZE (mode);
5649 switch (GET_MODE_CLASS (mode))
5651 case MODE_VECTOR_FLOAT:
5652 /* Pass all float vectors in memory, like an aggregate. */
5655 case MODE_COMPLEX_FLOAT:
5656 /* We judge complex floats on the size of their element,
5657 not the size of the whole type. */
5658 size = GET_MODE_UNIT_SIZE (mode);
5663 case MODE_COMPLEX_INT:
5664 case MODE_VECTOR_INT:
5668 /* ??? We get called on all sorts of random stuff from
5669 aggregate_value_p. We must return something, but it's not
5670 clear what's safe to return. Pretend it's a struct I
5675 /* Otherwise types must fit in one register. */
5676 return size > UNITS_PER_WORD;
5679 /* Return true if TYPE should be passed by invisible reference. */
5682 alpha_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
5683 enum machine_mode mode,
5684 const_tree type ATTRIBUTE_UNUSED,
5685 bool named ATTRIBUTE_UNUSED)
5687 return mode == TFmode || mode == TCmode;
5690 /* Define how to find the value returned by a function. VALTYPE is the
5691 data type of the value (as a tree). If the precise function being
5692 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5693 MODE is set instead of VALTYPE for libcalls.
5695 On Alpha the value is found in $0 for integer functions and
5696 $f0 for floating-point functions. */
5699 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5700 enum machine_mode mode)
5702 unsigned int regnum, dummy ATTRIBUTE_UNUSED;
5703 enum mode_class mclass;
5705 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5708 mode = TYPE_MODE (valtype);
5710 mclass = GET_MODE_CLASS (mode);
5714 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5715 where we have them returning both SImode and DImode. */
5716 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5717 PROMOTE_MODE (mode, dummy, valtype);
5720 case MODE_COMPLEX_INT:
5721 case MODE_VECTOR_INT:
5729 case MODE_COMPLEX_FLOAT:
5731 enum machine_mode cmode = GET_MODE_INNER (mode);
5733 return gen_rtx_PARALLEL
5736 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5738 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5739 GEN_INT (GET_MODE_SIZE (cmode)))));
5743 /* We should only reach here for BLKmode on VMS. */
5744 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5752 return gen_rtx_REG (mode, regnum);
5755 /* TCmode complex values are passed by invisible reference. We
5756 should not split these values. */
5759 alpha_split_complex_arg (const_tree type)
5761 return TYPE_MODE (type) != TCmode;
5765 alpha_build_builtin_va_list (void)
5767 tree base, ofs, space, record, type_decl;
5769 if (TARGET_ABI_OPEN_VMS)
5770 return ptr_type_node;
5772 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5773 type_decl = build_decl (BUILTINS_LOCATION,
5774 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5775 TYPE_STUB_DECL (record) = type_decl;
5776 TYPE_NAME (record) = type_decl;
5778 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5780 /* Dummy field to prevent alignment warnings. */
5781 space = build_decl (BUILTINS_LOCATION,
5782 FIELD_DECL, NULL_TREE, integer_type_node);
5783 DECL_FIELD_CONTEXT (space) = record;
5784 DECL_ARTIFICIAL (space) = 1;
5785 DECL_IGNORED_P (space) = 1;
5787 ofs = build_decl (BUILTINS_LOCATION,
5788 FIELD_DECL, get_identifier ("__offset"),
5790 DECL_FIELD_CONTEXT (ofs) = record;
5791 DECL_CHAIN (ofs) = space;
5792 /* ??? This is a hack, __offset is marked volatile to prevent
5793 DCE that confuses stdarg optimization and results in
5794 gcc.c-torture/execute/stdarg-1.c failure. See PR 41089. */
5795 TREE_THIS_VOLATILE (ofs) = 1;
5797 base = build_decl (BUILTINS_LOCATION,
5798 FIELD_DECL, get_identifier ("__base"),
5800 DECL_FIELD_CONTEXT (base) = record;
5801 DECL_CHAIN (base) = ofs;
5803 TYPE_FIELDS (record) = base;
5804 layout_type (record);
5806 va_list_gpr_counter_field = ofs;
5811 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5812 and constant additions. */
5815 va_list_skip_additions (tree lhs)
5821 enum tree_code code;
5823 stmt = SSA_NAME_DEF_STMT (lhs);
5825 if (gimple_code (stmt) == GIMPLE_PHI)
5828 if (!is_gimple_assign (stmt)
5829 || gimple_assign_lhs (stmt) != lhs)
5832 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5834 code = gimple_assign_rhs_code (stmt);
5835 if (!CONVERT_EXPR_CODE_P (code)
5836 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5837 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5838 || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5841 lhs = gimple_assign_rhs1 (stmt);
5845 /* Check if LHS = RHS statement is
5846 LHS = *(ap.__base + ap.__offset + cst)
5849 + ((ap.__offset + cst <= 47)
5850 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5851 If the former, indicate that GPR registers are needed,
5852 if the latter, indicate that FPR registers are needed.
5854 Also look for LHS = (*ptr).field, where ptr is one of the forms
5857 On alpha, cfun->va_list_gpr_size is used as size of the needed
5858 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5859 registers are needed and bit 1 set if FPR registers are needed.
5860 Return true if va_list references should not be scanned for the
5861 current statement. */
5864 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
5866 tree base, offset, rhs;
5870 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5871 != GIMPLE_SINGLE_RHS)
5874 rhs = gimple_assign_rhs1 (stmt);
5875 while (handled_component_p (rhs))
5876 rhs = TREE_OPERAND (rhs, 0);
5877 if (TREE_CODE (rhs) != MEM_REF
5878 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5881 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5883 || !is_gimple_assign (stmt)
5884 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
5887 base = gimple_assign_rhs1 (stmt);
5888 if (TREE_CODE (base) == SSA_NAME)
5890 base_stmt = va_list_skip_additions (base);
5892 && is_gimple_assign (base_stmt)
5893 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5894 base = gimple_assign_rhs1 (base_stmt);
5897 if (TREE_CODE (base) != COMPONENT_REF
5898 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5900 base = gimple_assign_rhs2 (stmt);
5901 if (TREE_CODE (base) == SSA_NAME)
5903 base_stmt = va_list_skip_additions (base);
5905 && is_gimple_assign (base_stmt)
5906 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5907 base = gimple_assign_rhs1 (base_stmt);
5910 if (TREE_CODE (base) != COMPONENT_REF
5911 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5917 base = get_base_address (base);
5918 if (TREE_CODE (base) != VAR_DECL
5919 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5922 offset = gimple_op (stmt, 1 + offset_arg);
5923 if (TREE_CODE (offset) == SSA_NAME)
5925 gimple offset_stmt = va_list_skip_additions (offset);
5928 && gimple_code (offset_stmt) == GIMPLE_PHI)
5931 gimple arg1_stmt, arg2_stmt;
5933 enum tree_code code1, code2;
5935 if (gimple_phi_num_args (offset_stmt) != 2)
5939 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
5941 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
5942 if (arg1_stmt == NULL
5943 || !is_gimple_assign (arg1_stmt)
5944 || arg2_stmt == NULL
5945 || !is_gimple_assign (arg2_stmt))
5948 code1 = gimple_assign_rhs_code (arg1_stmt);
5949 code2 = gimple_assign_rhs_code (arg2_stmt);
5950 if (code1 == COMPONENT_REF
5951 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
5953 else if (code2 == COMPONENT_REF
5954 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
5956 gimple tem = arg1_stmt;
5958 arg1_stmt = arg2_stmt;
5964 if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
5967 sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
5968 if (code2 == MINUS_EXPR)
5970 if (sub < -48 || sub > -32)
5973 arg1 = gimple_assign_rhs1 (arg1_stmt);
5974 arg2 = gimple_assign_rhs1 (arg2_stmt);
5975 if (TREE_CODE (arg2) == SSA_NAME)
5977 arg2_stmt = va_list_skip_additions (arg2);
5978 if (arg2_stmt == NULL
5979 || !is_gimple_assign (arg2_stmt)
5980 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
5982 arg2 = gimple_assign_rhs1 (arg2_stmt);
5987 if (TREE_CODE (arg1) != COMPONENT_REF
5988 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5989 || get_base_address (arg1) != base)
5992 /* Need floating point regs. */
5993 cfun->va_list_fpr_size |= 2;
5997 && is_gimple_assign (offset_stmt)
5998 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
5999 offset = gimple_assign_rhs1 (offset_stmt);
6001 if (TREE_CODE (offset) != COMPONENT_REF
6002 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6003 || get_base_address (offset) != base)
6006 /* Need general regs. */
6007 cfun->va_list_fpr_size |= 1;
6011 si->va_list_escapes = true;
6016 /* Perform any needed actions needed for a function that is receiving a
6017 variable number of arguments. */
6020 alpha_setup_incoming_varargs (cumulative_args_t pcum, enum machine_mode mode,
6021 tree type, int *pretend_size, int no_rtl)
6023 CUMULATIVE_ARGS cum = *get_cumulative_args (pcum);
6025 /* Skip the current argument. */
6026 targetm.calls.function_arg_advance (pack_cumulative_args (&cum), mode, type,
6029 #if TARGET_ABI_OPEN_VMS
6030 /* For VMS, we allocate space for all 6 arg registers plus a count.
6032 However, if NO registers need to be saved, don't allocate any space.
6033 This is not only because we won't need the space, but because AP
6034 includes the current_pretend_args_size and we don't want to mess up
6035 any ap-relative addresses already made. */
6036 if (cum.num_args < 6)
6040 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6041 emit_insn (gen_arg_home ());
6043 *pretend_size = 7 * UNITS_PER_WORD;
6046 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6047 only push those that are remaining. However, if NO registers need to
6048 be saved, don't allocate any space. This is not only because we won't
6049 need the space, but because AP includes the current_pretend_args_size
6050 and we don't want to mess up any ap-relative addresses already made.
6052 If we are not to use the floating-point registers, save the integer
6053 registers where we would put the floating-point registers. This is
6054 not the most efficient way to implement varargs with just one register
6055 class, but it isn't worth doing anything more efficient in this rare
6063 alias_set_type set = get_varargs_alias_set ();
6066 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6067 if (count > 6 - cum)
6070 /* Detect whether integer registers or floating-point registers
6071 are needed by the detected va_arg statements. See above for
6072 how these values are computed. Note that the "escape" value
6073 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6075 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6077 if (cfun->va_list_fpr_size & 1)
6079 tmp = gen_rtx_MEM (BLKmode,
6080 plus_constant (virtual_incoming_args_rtx,
6081 (cum + 6) * UNITS_PER_WORD));
6082 MEM_NOTRAP_P (tmp) = 1;
6083 set_mem_alias_set (tmp, set);
6084 move_block_from_reg (16 + cum, tmp, count);
6087 if (cfun->va_list_fpr_size & 2)
6089 tmp = gen_rtx_MEM (BLKmode,
6090 plus_constant (virtual_incoming_args_rtx,
6091 cum * UNITS_PER_WORD));
6092 MEM_NOTRAP_P (tmp) = 1;
6093 set_mem_alias_set (tmp, set);
6094 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6097 *pretend_size = 12 * UNITS_PER_WORD;
6102 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6104 HOST_WIDE_INT offset;
6105 tree t, offset_field, base_field;
6107 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6110 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6111 up by 48, storing fp arg registers in the first 48 bytes, and the
6112 integer arg registers in the next 48 bytes. This is only done,
6113 however, if any integer registers need to be stored.
6115 If no integer registers need be stored, then we must subtract 48
6116 in order to account for the integer arg registers which are counted
6117 in argsize above, but which are not actually stored on the stack.
6118 Must further be careful here about structures straddling the last
6119 integer argument register; that futzes with pretend_args_size,
6120 which changes the meaning of AP. */
6123 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6125 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6127 if (TARGET_ABI_OPEN_VMS)
6129 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6130 t = fold_build_pointer_plus_hwi (t, offset + NUM_ARGS * UNITS_PER_WORD);
6131 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6132 TREE_SIDE_EFFECTS (t) = 1;
6133 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6137 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6138 offset_field = DECL_CHAIN (base_field);
6140 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6141 valist, base_field, NULL_TREE);
6142 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6143 valist, offset_field, NULL_TREE);
6145 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6146 t = fold_build_pointer_plus_hwi (t, offset);
6147 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6148 TREE_SIDE_EFFECTS (t) = 1;
6149 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6151 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6152 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6153 TREE_SIDE_EFFECTS (t) = 1;
6154 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6159 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6162 tree type_size, ptr_type, addend, t, addr;
6163 gimple_seq internal_post;
6165 /* If the type could not be passed in registers, skip the block
6166 reserved for the registers. */
6167 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6169 t = build_int_cst (TREE_TYPE (offset), 6*8);
6170 gimplify_assign (offset,
6171 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6176 ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
6178 if (TREE_CODE (type) == COMPLEX_TYPE)
6180 tree real_part, imag_part, real_temp;
6182 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6185 /* Copy the value into a new temporary, lest the formal temporary
6186 be reused out from under us. */
6187 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6189 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6192 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6194 else if (TREE_CODE (type) == REAL_TYPE)
6196 tree fpaddend, cond, fourtyeight;
6198 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6199 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6200 addend, fourtyeight);
6201 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6202 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6206 /* Build the final address and force that value into a temporary. */
6207 addr = fold_build_pointer_plus (fold_convert (ptr_type, base), addend);
6208 internal_post = NULL;
6209 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6210 gimple_seq_add_seq (pre_p, internal_post);
6212 /* Update the offset field. */
6213 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6214 if (type_size == NULL || TREE_OVERFLOW (type_size))
6218 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6219 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6220 t = size_binop (MULT_EXPR, t, size_int (8));
6222 t = fold_convert (TREE_TYPE (offset), t);
6223 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6226 return build_va_arg_indirect_ref (addr);
6230 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6233 tree offset_field, base_field, offset, base, t, r;
6236 if (TARGET_ABI_OPEN_VMS)
6237 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6239 base_field = TYPE_FIELDS (va_list_type_node);
6240 offset_field = DECL_CHAIN (base_field);
6241 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6242 valist, base_field, NULL_TREE);
6243 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6244 valist, offset_field, NULL_TREE);
6246 /* Pull the fields of the structure out into temporaries. Since we never
6247 modify the base field, we can use a formal temporary. Sign-extend the
6248 offset field so that it's the proper width for pointer arithmetic. */
6249 base = get_formal_tmp_var (base_field, pre_p);
6251 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6252 offset = get_initialized_tmp_var (t, pre_p, NULL);
6254 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6256 type = build_pointer_type_for_mode (type, ptr_mode, true);
6258 /* Find the value. Note that this will be a stable indirection, or
6259 a composite of stable indirections in the case of complex. */
6260 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6262 /* Stuff the offset temporary back into its field. */
6263 gimplify_assign (unshare_expr (offset_field),
6264 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6267 r = build_va_arg_indirect_ref (r);
6276 ALPHA_BUILTIN_CMPBGE,
6277 ALPHA_BUILTIN_EXTBL,
6278 ALPHA_BUILTIN_EXTWL,
6279 ALPHA_BUILTIN_EXTLL,
6280 ALPHA_BUILTIN_EXTQL,
6281 ALPHA_BUILTIN_EXTWH,
6282 ALPHA_BUILTIN_EXTLH,
6283 ALPHA_BUILTIN_EXTQH,
6284 ALPHA_BUILTIN_INSBL,
6285 ALPHA_BUILTIN_INSWL,
6286 ALPHA_BUILTIN_INSLL,
6287 ALPHA_BUILTIN_INSQL,
6288 ALPHA_BUILTIN_INSWH,
6289 ALPHA_BUILTIN_INSLH,
6290 ALPHA_BUILTIN_INSQH,
6291 ALPHA_BUILTIN_MSKBL,
6292 ALPHA_BUILTIN_MSKWL,
6293 ALPHA_BUILTIN_MSKLL,
6294 ALPHA_BUILTIN_MSKQL,
6295 ALPHA_BUILTIN_MSKWH,
6296 ALPHA_BUILTIN_MSKLH,
6297 ALPHA_BUILTIN_MSKQH,
6298 ALPHA_BUILTIN_UMULH,
6300 ALPHA_BUILTIN_ZAPNOT,
6301 ALPHA_BUILTIN_AMASK,
6302 ALPHA_BUILTIN_IMPLVER,
6304 ALPHA_BUILTIN_THREAD_POINTER,
6305 ALPHA_BUILTIN_SET_THREAD_POINTER,
6306 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6307 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6310 ALPHA_BUILTIN_MINUB8,
6311 ALPHA_BUILTIN_MINSB8,
6312 ALPHA_BUILTIN_MINUW4,
6313 ALPHA_BUILTIN_MINSW4,
6314 ALPHA_BUILTIN_MAXUB8,
6315 ALPHA_BUILTIN_MAXSB8,
6316 ALPHA_BUILTIN_MAXUW4,
6317 ALPHA_BUILTIN_MAXSW4,
6321 ALPHA_BUILTIN_UNPKBL,
6322 ALPHA_BUILTIN_UNPKBW,
6327 ALPHA_BUILTIN_CTPOP,
6332 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6333 CODE_FOR_builtin_cmpbge,
6341 CODE_FOR_builtin_insbl,
6342 CODE_FOR_builtin_inswl,
6343 CODE_FOR_builtin_insll,
6355 CODE_FOR_umuldi3_highpart,
6356 CODE_FOR_builtin_zap,
6357 CODE_FOR_builtin_zapnot,
6358 CODE_FOR_builtin_amask,
6359 CODE_FOR_builtin_implver,
6360 CODE_FOR_builtin_rpcc,
6363 CODE_FOR_builtin_establish_vms_condition_handler,
6364 CODE_FOR_builtin_revert_vms_condition_handler,
6367 CODE_FOR_builtin_minub8,
6368 CODE_FOR_builtin_minsb8,
6369 CODE_FOR_builtin_minuw4,
6370 CODE_FOR_builtin_minsw4,
6371 CODE_FOR_builtin_maxub8,
6372 CODE_FOR_builtin_maxsb8,
6373 CODE_FOR_builtin_maxuw4,
6374 CODE_FOR_builtin_maxsw4,
6375 CODE_FOR_builtin_perr,
6376 CODE_FOR_builtin_pklb,
6377 CODE_FOR_builtin_pkwb,
6378 CODE_FOR_builtin_unpkbl,
6379 CODE_FOR_builtin_unpkbw,
6384 CODE_FOR_popcountdi2
6387 struct alpha_builtin_def
6390 enum alpha_builtin code;
6391 unsigned int target_mask;
6395 static struct alpha_builtin_def const zero_arg_builtins[] = {
6396 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6397 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6400 static struct alpha_builtin_def const one_arg_builtins[] = {
6401 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6402 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6403 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6404 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6405 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6406 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6407 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6408 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6411 static struct alpha_builtin_def const two_arg_builtins[] = {
6412 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6413 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6414 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6415 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6416 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6417 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6418 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6419 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6420 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6421 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6422 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6423 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6424 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6425 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6426 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6427 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6428 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6429 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6430 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6431 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6432 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6433 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6434 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6435 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6436 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6437 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6438 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6439 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6440 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6441 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6442 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6443 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6444 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6445 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6448 static GTY(()) tree alpha_v8qi_u;
6449 static GTY(()) tree alpha_v8qi_s;
6450 static GTY(()) tree alpha_v4hi_u;
6451 static GTY(()) tree alpha_v4hi_s;
6453 static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6455 /* Return the alpha builtin for CODE. */
6458 alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6460 if (code >= ALPHA_BUILTIN_max)
6461 return error_mark_node;
6462 return alpha_builtins[code];
6465 /* Helper function of alpha_init_builtins. Add the built-in specified
6466 by NAME, TYPE, CODE, and ECF. */
6469 alpha_builtin_function (const char *name, tree ftype,
6470 enum alpha_builtin code, unsigned ecf)
6472 tree decl = add_builtin_function (name, ftype, (int) code,
6473 BUILT_IN_MD, NULL, NULL_TREE);
6475 if (ecf & ECF_CONST)
6476 TREE_READONLY (decl) = 1;
6477 if (ecf & ECF_NOTHROW)
6478 TREE_NOTHROW (decl) = 1;
6480 alpha_builtins [(int) code] = decl;
6483 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6484 functions pointed to by P, with function type FTYPE. */
6487 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6492 for (i = 0; i < count; ++i, ++p)
6493 if ((target_flags & p->target_mask) == p->target_mask)
6494 alpha_builtin_function (p->name, ftype, p->code,
6495 (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
6499 alpha_init_builtins (void)
6501 tree dimode_integer_type_node;
6504 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6506 ftype = build_function_type_list (dimode_integer_type_node, NULL_TREE);
6507 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6510 ftype = build_function_type_list (dimode_integer_type_node,
6511 dimode_integer_type_node, NULL_TREE);
6512 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6515 ftype = build_function_type_list (dimode_integer_type_node,
6516 dimode_integer_type_node,
6517 dimode_integer_type_node, NULL_TREE);
6518 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6521 ftype = build_function_type_list (ptr_type_node, NULL_TREE);
6522 alpha_builtin_function ("__builtin_thread_pointer", ftype,
6523 ALPHA_BUILTIN_THREAD_POINTER, ECF_NOTHROW);
6525 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6526 alpha_builtin_function ("__builtin_set_thread_pointer", ftype,
6527 ALPHA_BUILTIN_SET_THREAD_POINTER, ECF_NOTHROW);
6529 if (TARGET_ABI_OPEN_VMS)
6531 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6533 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6535 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6538 ftype = build_function_type_list (ptr_type_node, void_type_node,
6540 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6541 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
6543 vms_patch_builtins ();
6546 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6547 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6548 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6549 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6552 /* Expand an expression EXP that calls a built-in function,
6553 with result going to TARGET if that's convenient
6554 (and in mode MODE if that's convenient).
6555 SUBTARGET may be used as the target for computing one of EXP's operands.
6556 IGNORE is nonzero if the value is to be ignored. */
6559 alpha_expand_builtin (tree exp, rtx target,
6560 rtx subtarget ATTRIBUTE_UNUSED,
6561 enum machine_mode mode ATTRIBUTE_UNUSED,
6562 int ignore ATTRIBUTE_UNUSED)
6566 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6567 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6569 call_expr_arg_iterator iter;
6570 enum insn_code icode;
6571 rtx op[MAX_ARGS], pat;
6575 if (fcode >= ALPHA_BUILTIN_max)
6576 internal_error ("bad builtin fcode");
6577 icode = code_for_builtin[fcode];
6579 internal_error ("bad builtin fcode");
6581 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6584 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6586 const struct insn_operand_data *insn_op;
6588 if (arg == error_mark_node)
6590 if (arity > MAX_ARGS)
6593 insn_op = &insn_data[icode].operand[arity + nonvoid];
6595 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6597 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6598 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6604 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6606 || GET_MODE (target) != tmode
6607 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6608 target = gen_reg_rtx (tmode);
6614 pat = GEN_FCN (icode) (target);
6618 pat = GEN_FCN (icode) (target, op[0]);
6620 pat = GEN_FCN (icode) (op[0]);
6623 pat = GEN_FCN (icode) (target, op[0], op[1]);
6639 /* Several bits below assume HWI >= 64 bits. This should be enforced
6641 #if HOST_BITS_PER_WIDE_INT < 64
6642 # error "HOST_WIDE_INT too small"
6645 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6646 with an 8-bit output vector. OPINT contains the integer operands; bit N
6647 of OP_CONST is set if OPINT[N] is valid. */
6650 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6655 for (i = 0, val = 0; i < 8; ++i)
6657 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6658 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6662 return build_int_cst (long_integer_type_node, val);
6664 else if (op_const == 2 && opint[1] == 0)
6665 return build_int_cst (long_integer_type_node, 0xff);
6669 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6670 specialized form of an AND operation. Other byte manipulation instructions
6671 are defined in terms of this instruction, so this is also used as a
6672 subroutine for other builtins.
6674 OP contains the tree operands; OPINT contains the extracted integer values.
6675 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6676 OPINT may be considered. */
6679 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6684 unsigned HOST_WIDE_INT mask = 0;
6687 for (i = 0; i < 8; ++i)
6688 if ((opint[1] >> i) & 1)
6689 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6692 return build_int_cst (long_integer_type_node, opint[0] & mask);
6695 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6696 build_int_cst (long_integer_type_node, mask));
6698 else if ((op_const & 1) && opint[0] == 0)
6699 return build_int_cst (long_integer_type_node, 0);
6703 /* Fold the builtins for the EXT family of instructions. */
6706 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6707 long op_const, unsigned HOST_WIDE_INT bytemask,
6711 tree *zap_op = NULL;
6715 unsigned HOST_WIDE_INT loc;
6718 loc *= BITS_PER_UNIT;
6724 unsigned HOST_WIDE_INT temp = opint[0];
6737 opint[1] = bytemask;
6738 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6741 /* Fold the builtins for the INS family of instructions. */
6744 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6745 long op_const, unsigned HOST_WIDE_INT bytemask,
6748 if ((op_const & 1) && opint[0] == 0)
6749 return build_int_cst (long_integer_type_node, 0);
6753 unsigned HOST_WIDE_INT temp, loc, byteloc;
6754 tree *zap_op = NULL;
6762 byteloc = (64 - (loc * 8)) & 0x3f;
6779 opint[1] = bytemask;
6780 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6787 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6788 long op_const, unsigned HOST_WIDE_INT bytemask,
6793 unsigned HOST_WIDE_INT loc;
6801 opint[1] = bytemask ^ 0xff;
6804 return alpha_fold_builtin_zapnot (op, opint, op_const);
6808 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6814 unsigned HOST_WIDE_INT l;
6817 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6819 #if HOST_BITS_PER_WIDE_INT > 64
6823 return build_int_cst (long_integer_type_node, h);
6827 opint[1] = opint[0];
6830 /* Note that (X*1) >> 64 == 0. */
6831 if (opint[1] == 0 || opint[1] == 1)
6832 return build_int_cst (long_integer_type_node, 0);
6839 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6841 tree op0 = fold_convert (vtype, op[0]);
6842 tree op1 = fold_convert (vtype, op[1]);
6843 tree val = fold_build2 (code, vtype, op0, op1);
6844 return fold_build1 (VIEW_CONVERT_EXPR, long_integer_type_node, val);
6848 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6850 unsigned HOST_WIDE_INT temp = 0;
6856 for (i = 0; i < 8; ++i)
6858 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6859 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6866 return build_int_cst (long_integer_type_node, temp);
6870 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6872 unsigned HOST_WIDE_INT temp;
6877 temp = opint[0] & 0xff;
6878 temp |= (opint[0] >> 24) & 0xff00;
6880 return build_int_cst (long_integer_type_node, temp);
6884 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6886 unsigned HOST_WIDE_INT temp;
6891 temp = opint[0] & 0xff;
6892 temp |= (opint[0] >> 8) & 0xff00;
6893 temp |= (opint[0] >> 16) & 0xff0000;
6894 temp |= (opint[0] >> 24) & 0xff000000;
6896 return build_int_cst (long_integer_type_node, temp);
6900 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6902 unsigned HOST_WIDE_INT temp;
6907 temp = opint[0] & 0xff;
6908 temp |= (opint[0] & 0xff00) << 24;
6910 return build_int_cst (long_integer_type_node, temp);
6914 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6916 unsigned HOST_WIDE_INT temp;
6921 temp = opint[0] & 0xff;
6922 temp |= (opint[0] & 0x0000ff00) << 8;
6923 temp |= (opint[0] & 0x00ff0000) << 16;
6924 temp |= (opint[0] & 0xff000000) << 24;
6926 return build_int_cst (long_integer_type_node, temp);
6930 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6932 unsigned HOST_WIDE_INT temp;
6940 temp = exact_log2 (opint[0] & -opint[0]);
6942 return build_int_cst (long_integer_type_node, temp);
6946 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6948 unsigned HOST_WIDE_INT temp;
6956 temp = 64 - floor_log2 (opint[0]) - 1;
6958 return build_int_cst (long_integer_type_node, temp);
6962 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6964 unsigned HOST_WIDE_INT temp, op;
6972 temp++, op &= op - 1;
6974 return build_int_cst (long_integer_type_node, temp);
6977 /* Fold one of our builtin functions. */
6980 alpha_fold_builtin (tree fndecl, int n_args, tree *op,
6981 bool ignore ATTRIBUTE_UNUSED)
6983 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6987 if (n_args >= MAX_ARGS)
6990 for (i = 0; i < n_args; i++)
6993 if (arg == error_mark_node)
6997 if (TREE_CODE (arg) == INTEGER_CST)
6999 op_const |= 1L << i;
7000 opint[i] = int_cst_value (arg);
7004 switch (DECL_FUNCTION_CODE (fndecl))
7006 case ALPHA_BUILTIN_CMPBGE:
7007 return alpha_fold_builtin_cmpbge (opint, op_const);
7009 case ALPHA_BUILTIN_EXTBL:
7010 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7011 case ALPHA_BUILTIN_EXTWL:
7012 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7013 case ALPHA_BUILTIN_EXTLL:
7014 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7015 case ALPHA_BUILTIN_EXTQL:
7016 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7017 case ALPHA_BUILTIN_EXTWH:
7018 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7019 case ALPHA_BUILTIN_EXTLH:
7020 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7021 case ALPHA_BUILTIN_EXTQH:
7022 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7024 case ALPHA_BUILTIN_INSBL:
7025 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7026 case ALPHA_BUILTIN_INSWL:
7027 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7028 case ALPHA_BUILTIN_INSLL:
7029 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7030 case ALPHA_BUILTIN_INSQL:
7031 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7032 case ALPHA_BUILTIN_INSWH:
7033 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7034 case ALPHA_BUILTIN_INSLH:
7035 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7036 case ALPHA_BUILTIN_INSQH:
7037 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7039 case ALPHA_BUILTIN_MSKBL:
7040 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7041 case ALPHA_BUILTIN_MSKWL:
7042 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7043 case ALPHA_BUILTIN_MSKLL:
7044 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7045 case ALPHA_BUILTIN_MSKQL:
7046 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7047 case ALPHA_BUILTIN_MSKWH:
7048 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7049 case ALPHA_BUILTIN_MSKLH:
7050 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7051 case ALPHA_BUILTIN_MSKQH:
7052 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7054 case ALPHA_BUILTIN_UMULH:
7055 return alpha_fold_builtin_umulh (opint, op_const);
7057 case ALPHA_BUILTIN_ZAP:
7060 case ALPHA_BUILTIN_ZAPNOT:
7061 return alpha_fold_builtin_zapnot (op, opint, op_const);
7063 case ALPHA_BUILTIN_MINUB8:
7064 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7065 case ALPHA_BUILTIN_MINSB8:
7066 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7067 case ALPHA_BUILTIN_MINUW4:
7068 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7069 case ALPHA_BUILTIN_MINSW4:
7070 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7071 case ALPHA_BUILTIN_MAXUB8:
7072 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7073 case ALPHA_BUILTIN_MAXSB8:
7074 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7075 case ALPHA_BUILTIN_MAXUW4:
7076 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7077 case ALPHA_BUILTIN_MAXSW4:
7078 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7080 case ALPHA_BUILTIN_PERR:
7081 return alpha_fold_builtin_perr (opint, op_const);
7082 case ALPHA_BUILTIN_PKLB:
7083 return alpha_fold_builtin_pklb (opint, op_const);
7084 case ALPHA_BUILTIN_PKWB:
7085 return alpha_fold_builtin_pkwb (opint, op_const);
7086 case ALPHA_BUILTIN_UNPKBL:
7087 return alpha_fold_builtin_unpkbl (opint, op_const);
7088 case ALPHA_BUILTIN_UNPKBW:
7089 return alpha_fold_builtin_unpkbw (opint, op_const);
7091 case ALPHA_BUILTIN_CTTZ:
7092 return alpha_fold_builtin_cttz (opint, op_const);
7093 case ALPHA_BUILTIN_CTLZ:
7094 return alpha_fold_builtin_ctlz (opint, op_const);
7095 case ALPHA_BUILTIN_CTPOP:
7096 return alpha_fold_builtin_ctpop (opint, op_const);
7098 case ALPHA_BUILTIN_AMASK:
7099 case ALPHA_BUILTIN_IMPLVER:
7100 case ALPHA_BUILTIN_RPCC:
7101 case ALPHA_BUILTIN_THREAD_POINTER:
7102 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7103 /* None of these are foldable at compile-time. */
7109 /* This page contains routines that are used to determine what the function
7110 prologue and epilogue code will do and write them out. */
7112 /* Compute the size of the save area in the stack. */
7114 /* These variables are used for communication between the following functions.
7115 They indicate various things about the current function being compiled
7116 that are used to tell what kind of prologue, epilogue and procedure
7117 descriptor to generate. */
7119 /* Nonzero if we need a stack procedure. */
7120 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7121 static enum alpha_procedure_types alpha_procedure_type;
7123 /* Register number (either FP or SP) that is used to unwind the frame. */
7124 static int vms_unwind_regno;
7126 /* Register number used to save FP. We need not have one for RA since
7127 we don't modify it for register procedures. This is only defined
7128 for register frame procedures. */
7129 static int vms_save_fp_regno;
7131 /* Register number used to reference objects off our PV. */
7132 static int vms_base_regno;
7134 /* Compute register masks for saved registers. */
7137 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7139 unsigned long imask = 0;
7140 unsigned long fmask = 0;
7143 /* When outputting a thunk, we don't have valid register life info,
7144 but assemble_start_function wants to output .frame and .mask
7153 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7154 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7156 /* One for every register we have to save. */
7157 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7158 if (! fixed_regs[i] && ! call_used_regs[i]
7159 && df_regs_ever_live_p (i) && i != REG_RA)
7162 imask |= (1UL << i);
7164 fmask |= (1UL << (i - 32));
7167 /* We need to restore these for the handler. */
7168 if (crtl->calls_eh_return)
7172 unsigned regno = EH_RETURN_DATA_REGNO (i);
7173 if (regno == INVALID_REGNUM)
7175 imask |= 1UL << regno;
7179 /* If any register spilled, then spill the return address also. */
7180 /* ??? This is required by the Digital stack unwind specification
7181 and isn't needed if we're doing Dwarf2 unwinding. */
7182 if (imask || fmask || alpha_ra_ever_killed ())
7183 imask |= (1UL << REG_RA);
7190 alpha_sa_size (void)
7192 unsigned long mask[2];
7196 alpha_sa_mask (&mask[0], &mask[1]);
7198 for (j = 0; j < 2; ++j)
7199 for (i = 0; i < 32; ++i)
7200 if ((mask[j] >> i) & 1)
7203 if (TARGET_ABI_OPEN_VMS)
7205 /* Start with a stack procedure if we make any calls (REG_RA used), or
7206 need a frame pointer, with a register procedure if we otherwise need
7207 at least a slot, and with a null procedure in other cases. */
7208 if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7209 alpha_procedure_type = PT_STACK;
7210 else if (get_frame_size() != 0)
7211 alpha_procedure_type = PT_REGISTER;
7213 alpha_procedure_type = PT_NULL;
7215 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7216 made the final decision on stack procedure vs register procedure. */
7217 if (alpha_procedure_type == PT_STACK)
7220 /* Decide whether to refer to objects off our PV via FP or PV.
7221 If we need FP for something else or if we receive a nonlocal
7222 goto (which expects PV to contain the value), we must use PV.
7223 Otherwise, start by assuming we can use FP. */
7226 = (frame_pointer_needed
7227 || cfun->has_nonlocal_label
7228 || alpha_procedure_type == PT_STACK
7229 || crtl->outgoing_args_size)
7230 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7232 /* If we want to copy PV into FP, we need to find some register
7233 in which to save FP. */
7235 vms_save_fp_regno = -1;
7236 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7237 for (i = 0; i < 32; i++)
7238 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7239 vms_save_fp_regno = i;
7241 /* A VMS condition handler requires a stack procedure in our
7242 implementation. (not required by the calling standard). */
7243 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7244 || cfun->machine->uses_condition_handler)
7245 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7246 else if (alpha_procedure_type == PT_NULL)
7247 vms_base_regno = REG_PV;
7249 /* Stack unwinding should be done via FP unless we use it for PV. */
7250 vms_unwind_regno = (vms_base_regno == REG_PV
7251 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7253 /* If this is a stack procedure, allow space for saving FP, RA and
7254 a condition handler slot if needed. */
7255 if (alpha_procedure_type == PT_STACK)
7256 sa_size += 2 + cfun->machine->uses_condition_handler;
7260 /* Our size must be even (multiple of 16 bytes). */
7268 /* Define the offset between two registers, one to be eliminated,
7269 and the other its replacement, at the start of a routine. */
7272 alpha_initial_elimination_offset (unsigned int from,
7273 unsigned int to ATTRIBUTE_UNUSED)
7277 ret = alpha_sa_size ();
7278 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7282 case FRAME_POINTER_REGNUM:
7285 case ARG_POINTER_REGNUM:
7286 ret += (ALPHA_ROUND (get_frame_size ()
7287 + crtl->args.pretend_args_size)
7288 - crtl->args.pretend_args_size);
7298 #if TARGET_ABI_OPEN_VMS
7300 /* Worker function for TARGET_CAN_ELIMINATE. */
7303 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
7305 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7308 switch (alpha_procedure_type)
7311 /* NULL procedures have no frame of their own and we only
7312 know how to resolve from the current stack pointer. */
7313 return to == STACK_POINTER_REGNUM;
7317 /* We always eliminate except to the stack pointer if there is no
7318 usable frame pointer at hand. */
7319 return (to != STACK_POINTER_REGNUM
7320 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7326 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7327 designates the same location as FROM. */
7330 alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7332 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7333 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7334 on the proper computations and will need the register save area size
7337 HOST_WIDE_INT sa_size = alpha_sa_size ();
7339 /* PT_NULL procedures have no frame of their own and we only allow
7340 elimination to the stack pointer. This is the argument pointer and we
7341 resolve the soft frame pointer to that as well. */
7343 if (alpha_procedure_type == PT_NULL)
7346 /* For a PT_STACK procedure the frame layout looks as follows
7348 -----> decreasing addresses
7350 < size rounded up to 16 | likewise >
7351 --------------#------------------------------+++--------------+++-------#
7352 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7353 --------------#---------------------------------------------------------#
7355 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7358 PT_REGISTER procedures are similar in that they may have a frame of their
7359 own. They have no regs-sa/pv/outgoing-args area.
7361 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7362 to STACK_PTR if need be. */
7365 HOST_WIDE_INT offset;
7366 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7370 case FRAME_POINTER_REGNUM:
7371 offset = ALPHA_ROUND (sa_size + pv_save_size);
7373 case ARG_POINTER_REGNUM:
7374 offset = (ALPHA_ROUND (sa_size + pv_save_size
7376 + crtl->args.pretend_args_size)
7377 - crtl->args.pretend_args_size);
7383 if (to == STACK_POINTER_REGNUM)
7384 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7390 #define COMMON_OBJECT "common_object"
7393 common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7394 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7395 bool *no_add_attrs ATTRIBUTE_UNUSED)
7398 gcc_assert (DECL_P (decl));
7400 DECL_COMMON (decl) = 1;
7404 static const struct attribute_spec vms_attribute_table[] =
7406 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
7407 affects_type_identity } */
7408 { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler, false },
7409 { NULL, 0, 0, false, false, false, NULL, false }
7413 vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7414 unsigned HOST_WIDE_INT size,
7417 tree attr = DECL_ATTRIBUTES (decl);
7418 fprintf (file, "%s", COMMON_ASM_OP);
7419 assemble_name (file, name);
7420 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7421 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7422 fprintf (file, ",%u", align / BITS_PER_UNIT);
7425 attr = lookup_attribute (COMMON_OBJECT, attr);
7427 fprintf (file, ",%s",
7428 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7433 #undef COMMON_OBJECT
7438 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7440 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7444 alpha_find_lo_sum_using_gp (rtx insn)
7446 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7450 alpha_does_function_need_gp (void)
7454 /* The GP being variable is an OSF abi thing. */
7455 if (! TARGET_ABI_OSF)
7458 /* We need the gp to load the address of __mcount. */
7459 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7462 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7466 /* The nonlocal receiver pattern assumes that the gp is valid for
7467 the nested function. Reasonable because it's almost always set
7468 correctly already. For the cases where that's wrong, make sure
7469 the nested function loads its gp on entry. */
7470 if (crtl->has_nonlocal_goto)
7473 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7474 Even if we are a static function, we still need to do this in case
7475 our address is taken and passed to something like qsort. */
7477 push_topmost_sequence ();
7478 insn = get_insns ();
7479 pop_topmost_sequence ();
7481 for (; insn; insn = NEXT_INSN (insn))
7482 if (NONDEBUG_INSN_P (insn)
7483 && ! JUMP_TABLE_DATA_P (insn)
7484 && GET_CODE (PATTERN (insn)) != USE
7485 && GET_CODE (PATTERN (insn)) != CLOBBER
7486 && get_attr_usegp (insn))
7493 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7497 set_frame_related_p (void)
7499 rtx seq = get_insns ();
7510 while (insn != NULL_RTX)
7512 RTX_FRAME_RELATED_P (insn) = 1;
7513 insn = NEXT_INSN (insn);
7515 seq = emit_insn (seq);
7519 seq = emit_insn (seq);
7520 RTX_FRAME_RELATED_P (seq) = 1;
7525 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7527 /* Generates a store with the proper unwind info attached. VALUE is
7528 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7529 contains SP+FRAME_BIAS, and that is the unwind info that should be
7530 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7531 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7534 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7535 HOST_WIDE_INT base_ofs, rtx frame_reg)
7537 rtx addr, mem, insn;
7539 addr = plus_constant (base_reg, base_ofs);
7540 mem = gen_frame_mem (DImode, addr);
7542 insn = emit_move_insn (mem, value);
7543 RTX_FRAME_RELATED_P (insn) = 1;
7545 if (frame_bias || value != frame_reg)
7549 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7550 mem = gen_rtx_MEM (DImode, addr);
7553 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7554 gen_rtx_SET (VOIDmode, mem, frame_reg));
7559 emit_frame_store (unsigned int regno, rtx base_reg,
7560 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7562 rtx reg = gen_rtx_REG (DImode, regno);
7563 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7566 /* Compute the frame size. SIZE is the size of the "naked" frame
7567 and SA_SIZE is the size of the register save area. */
7569 static HOST_WIDE_INT
7570 compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
7572 if (TARGET_ABI_OPEN_VMS)
7573 return ALPHA_ROUND (sa_size
7574 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7576 + crtl->args.pretend_args_size);
7578 return ALPHA_ROUND (crtl->outgoing_args_size)
7581 + crtl->args.pretend_args_size);
7584 /* Write function prologue. */
7586 /* On vms we have two kinds of functions:
7588 - stack frame (PROC_STACK)
7589 these are 'normal' functions with local vars and which are
7590 calling other functions
7591 - register frame (PROC_REGISTER)
7592 keeps all data in registers, needs no stack
7594 We must pass this to the assembler so it can generate the
7595 proper pdsc (procedure descriptor)
7596 This is done with the '.pdesc' command.
7598 On not-vms, we don't really differentiate between the two, as we can
7599 simply allocate stack without saving registers. */
7602 alpha_expand_prologue (void)
7604 /* Registers to save. */
7605 unsigned long imask = 0;
7606 unsigned long fmask = 0;
7607 /* Stack space needed for pushing registers clobbered by us. */
7608 HOST_WIDE_INT sa_size, sa_bias;
7609 /* Complete stack size needed. */
7610 HOST_WIDE_INT frame_size;
7611 /* Probed stack size; it additionally includes the size of
7612 the "reserve region" if any. */
7613 HOST_WIDE_INT probed_size;
7614 /* Offset from base reg to register save area. */
7615 HOST_WIDE_INT reg_offset;
7619 sa_size = alpha_sa_size ();
7620 frame_size = compute_frame_size (get_frame_size (), sa_size);
7622 if (flag_stack_usage_info)
7623 current_function_static_stack_size = frame_size;
7625 if (TARGET_ABI_OPEN_VMS)
7626 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7628 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7630 alpha_sa_mask (&imask, &fmask);
7632 /* Emit an insn to reload GP, if needed. */
7635 alpha_function_needs_gp = alpha_does_function_need_gp ();
7636 if (alpha_function_needs_gp)
7637 emit_insn (gen_prologue_ldgp ());
7640 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7641 the call to mcount ourselves, rather than having the linker do it
7642 magically in response to -pg. Since _mcount has special linkage,
7643 don't represent the call as a call. */
7644 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7645 emit_insn (gen_prologue_mcount ());
7647 /* Adjust the stack by the frame size. If the frame size is > 4096
7648 bytes, we need to be sure we probe somewhere in the first and last
7649 4096 bytes (we can probably get away without the latter test) and
7650 every 8192 bytes in between. If the frame size is > 32768, we
7651 do this in a loop. Otherwise, we generate the explicit probe
7654 Note that we are only allowed to adjust sp once in the prologue. */
7656 probed_size = frame_size;
7657 if (flag_stack_check)
7658 probed_size += STACK_CHECK_PROTECT;
7660 if (probed_size <= 32768)
7662 if (probed_size > 4096)
7666 for (probed = 4096; probed < probed_size; probed += 8192)
7667 emit_insn (gen_probe_stack (GEN_INT (-probed)));
7669 /* We only have to do this probe if we aren't saving registers or
7670 if we are probing beyond the frame because of -fstack-check. */
7671 if ((sa_size == 0 && probed_size > probed - 4096)
7672 || flag_stack_check)
7673 emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
7676 if (frame_size != 0)
7677 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7678 GEN_INT (-frame_size))));
7682 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7683 number of 8192 byte blocks to probe. We then probe each block
7684 in the loop and then set SP to the proper location. If the
7685 amount remaining is > 4096, we have to do one more probe if we
7686 are not saving any registers or if we are probing beyond the
7687 frame because of -fstack-check. */
7689 HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7690 HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
7691 rtx ptr = gen_rtx_REG (DImode, 22);
7692 rtx count = gen_rtx_REG (DImode, 23);
7695 emit_move_insn (count, GEN_INT (blocks));
7696 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
7698 /* Because of the difficulty in emitting a new basic block this
7699 late in the compilation, generate the loop as a single insn. */
7700 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7702 if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
7704 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7705 MEM_VOLATILE_P (last) = 1;
7706 emit_move_insn (last, const0_rtx);
7709 if (flag_stack_check)
7711 /* If -fstack-check is specified we have to load the entire
7712 constant into a register and subtract from the sp in one go,
7713 because the probed stack size is not equal to the frame size. */
7714 HOST_WIDE_INT lo, hi;
7715 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7716 hi = frame_size - lo;
7718 emit_move_insn (ptr, GEN_INT (hi));
7719 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7720 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7725 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7726 GEN_INT (-leftover)));
7729 /* This alternative is special, because the DWARF code cannot
7730 possibly intuit through the loop above. So we invent this
7731 note it looks at instead. */
7732 RTX_FRAME_RELATED_P (seq) = 1;
7733 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7734 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7735 plus_constant (stack_pointer_rtx,
7739 /* Cope with very large offsets to the register save area. */
7741 sa_reg = stack_pointer_rtx;
7742 if (reg_offset + sa_size > 0x8000)
7744 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7747 if (low + sa_size <= 0x8000)
7748 sa_bias = reg_offset - low, reg_offset = low;
7750 sa_bias = reg_offset, reg_offset = 0;
7752 sa_reg = gen_rtx_REG (DImode, 24);
7753 sa_bias_rtx = GEN_INT (sa_bias);
7755 if (add_operand (sa_bias_rtx, DImode))
7756 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7759 emit_move_insn (sa_reg, sa_bias_rtx);
7760 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7764 /* Save regs in stack order. Beginning with VMS PV. */
7765 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7766 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7768 /* Save register RA next. */
7769 if (imask & (1UL << REG_RA))
7771 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7772 imask &= ~(1UL << REG_RA);
7776 /* Now save any other registers required to be saved. */
7777 for (i = 0; i < 31; i++)
7778 if (imask & (1UL << i))
7780 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7784 for (i = 0; i < 31; i++)
7785 if (fmask & (1UL << i))
7787 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7791 if (TARGET_ABI_OPEN_VMS)
7793 /* Register frame procedures save the fp. */
7794 if (alpha_procedure_type == PT_REGISTER)
7796 rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7797 hard_frame_pointer_rtx);
7798 add_reg_note (insn, REG_CFA_REGISTER, NULL);
7799 RTX_FRAME_RELATED_P (insn) = 1;
7802 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7803 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7804 gen_rtx_REG (DImode, REG_PV)));
7806 if (alpha_procedure_type != PT_NULL
7807 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7808 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7810 /* If we have to allocate space for outgoing args, do it now. */
7811 if (crtl->outgoing_args_size != 0)
7814 = emit_move_insn (stack_pointer_rtx,
7816 (hard_frame_pointer_rtx,
7818 (crtl->outgoing_args_size))));
7820 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7821 if ! frame_pointer_needed. Setting the bit will change the CFA
7822 computation rule to use sp again, which would be wrong if we had
7823 frame_pointer_needed, as this means sp might move unpredictably
7827 frame_pointer_needed
7828 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7830 crtl->outgoing_args_size != 0
7831 => alpha_procedure_type != PT_NULL,
7833 so when we are not setting the bit here, we are guaranteed to
7834 have emitted an FRP frame pointer update just before. */
7835 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7840 /* If we need a frame pointer, set it from the stack pointer. */
7841 if (frame_pointer_needed)
7843 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7844 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7846 /* This must always be the last instruction in the
7847 prologue, thus we emit a special move + clobber. */
7848 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7849 stack_pointer_rtx, sa_reg)));
7853 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7854 the prologue, for exception handling reasons, we cannot do this for
7855 any insn that might fault. We could prevent this for mems with a
7856 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7857 have to prevent all such scheduling with a blockage.
7859 Linux, on the other hand, never bothered to implement OSF/1's
7860 exception handling, and so doesn't care about such things. Anyone
7861 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7863 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7864 emit_insn (gen_blockage ());
7867 /* Count the number of .file directives, so that .loc is up to date. */
7868 int num_source_filenames = 0;
7870 /* Output the textual info surrounding the prologue. */
7873 alpha_start_function (FILE *file, const char *fnname,
7874 tree decl ATTRIBUTE_UNUSED)
7876 unsigned long imask = 0;
7877 unsigned long fmask = 0;
7878 /* Stack space needed for pushing registers clobbered by us. */
7879 HOST_WIDE_INT sa_size;
7880 /* Complete stack size needed. */
7881 unsigned HOST_WIDE_INT frame_size;
7882 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7883 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7886 /* Offset from base reg to register save area. */
7887 HOST_WIDE_INT reg_offset;
7888 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7889 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
7892 #if TARGET_ABI_OPEN_VMS
7894 && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
7896 targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
7897 ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
7898 switch_to_section (text_section);
7899 vms_debug_main = NULL;
7903 alpha_fnname = fnname;
7904 sa_size = alpha_sa_size ();
7905 frame_size = compute_frame_size (get_frame_size (), sa_size);
7907 if (TARGET_ABI_OPEN_VMS)
7908 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7910 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7912 alpha_sa_mask (&imask, &fmask);
7914 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7915 We have to do that before the .ent directive as we cannot switch
7916 files within procedures with native ecoff because line numbers are
7917 linked to procedure descriptors.
7918 Outputting the lineno helps debugging of one line functions as they
7919 would otherwise get no line number at all. Please note that we would
7920 like to put out last_linenum from final.c, but it is not accessible. */
7922 if (write_symbols == SDB_DEBUG)
7924 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7925 ASM_OUTPUT_SOURCE_FILENAME (file,
7926 DECL_SOURCE_FILE (current_function_decl));
7928 #ifdef SDB_OUTPUT_SOURCE_LINE
7929 if (debug_info_level != DINFO_LEVEL_TERSE)
7930 SDB_OUTPUT_SOURCE_LINE (file,
7931 DECL_SOURCE_LINE (current_function_decl));
7935 /* Issue function start and label. */
7936 if (TARGET_ABI_OPEN_VMS || !flag_inhibit_size_directive)
7938 fputs ("\t.ent ", file);
7939 assemble_name (file, fnname);
7942 /* If the function needs GP, we'll write the "..ng" label there.
7943 Otherwise, do it here. */
7945 && ! alpha_function_needs_gp
7946 && ! cfun->is_thunk)
7949 assemble_name (file, fnname);
7950 fputs ("..ng:\n", file);
7953 /* Nested functions on VMS that are potentially called via trampoline
7954 get a special transfer entry point that loads the called functions
7955 procedure descriptor and static chain. */
7956 if (TARGET_ABI_OPEN_VMS
7957 && !TREE_PUBLIC (decl)
7958 && DECL_CONTEXT (decl)
7959 && !TYPE_P (DECL_CONTEXT (decl))
7960 && TREE_CODE (DECL_CONTEXT (decl)) != TRANSLATION_UNIT_DECL)
7962 strcpy (tramp_label, fnname);
7963 strcat (tramp_label, "..tr");
7964 ASM_OUTPUT_LABEL (file, tramp_label);
7965 fprintf (file, "\tldq $1,24($27)\n");
7966 fprintf (file, "\tldq $27,16($27)\n");
7969 strcpy (entry_label, fnname);
7970 if (TARGET_ABI_OPEN_VMS)
7971 strcat (entry_label, "..en");
7973 ASM_OUTPUT_LABEL (file, entry_label);
7974 inside_function = TRUE;
7976 if (TARGET_ABI_OPEN_VMS)
7977 fprintf (file, "\t.base $%d\n", vms_base_regno);
7980 && TARGET_IEEE_CONFORMANT
7981 && !flag_inhibit_size_directive)
7983 /* Set flags in procedure descriptor to request IEEE-conformant
7984 math-library routines. The value we set it to is PDSC_EXC_IEEE
7985 (/usr/include/pdsc.h). */
7986 fputs ("\t.eflag 48\n", file);
7989 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7990 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
7991 alpha_arg_offset = -frame_size + 48;
7993 /* Describe our frame. If the frame size is larger than an integer,
7994 print it as zero to avoid an assembler error. We won't be
7995 properly describing such a frame, but that's the best we can do. */
7996 if (TARGET_ABI_OPEN_VMS)
7997 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7998 HOST_WIDE_INT_PRINT_DEC "\n",
8000 frame_size >= (1UL << 31) ? 0 : frame_size,
8002 else if (!flag_inhibit_size_directive)
8003 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
8004 (frame_pointer_needed
8005 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
8006 frame_size >= max_frame_size ? 0 : frame_size,
8007 crtl->args.pretend_args_size);
8009 /* Describe which registers were spilled. */
8010 if (TARGET_ABI_OPEN_VMS)
8013 /* ??? Does VMS care if mask contains ra? The old code didn't
8014 set it, so I don't here. */
8015 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
8017 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
8018 if (alpha_procedure_type == PT_REGISTER)
8019 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
8021 else if (!flag_inhibit_size_directive)
8025 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
8026 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8028 for (i = 0; i < 32; ++i)
8029 if (imask & (1UL << i))
8034 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
8035 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8038 #if TARGET_ABI_OPEN_VMS
8039 /* If a user condition handler has been installed at some point, emit
8040 the procedure descriptor bits to point the Condition Handling Facility
8041 at the indirection wrapper, and state the fp offset at which the user
8042 handler may be found. */
8043 if (cfun->machine->uses_condition_handler)
8045 fprintf (file, "\t.handler __gcc_shell_handler\n");
8046 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
8049 #ifdef TARGET_VMS_CRASH_DEBUG
8050 /* Support of minimal traceback info. */
8051 switch_to_section (readonly_data_section);
8052 fprintf (file, "\t.align 3\n");
8053 assemble_name (file, fnname); fputs ("..na:\n", file);
8054 fputs ("\t.ascii \"", file);
8055 assemble_name (file, fnname);
8056 fputs ("\\0\"\n", file);
8057 switch_to_section (text_section);
8059 #endif /* TARGET_ABI_OPEN_VMS */
8062 /* Emit the .prologue note at the scheduled end of the prologue. */
8065 alpha_output_function_end_prologue (FILE *file)
8067 if (TARGET_ABI_OPEN_VMS)
8068 fputs ("\t.prologue\n", file);
8069 else if (!flag_inhibit_size_directive)
8070 fprintf (file, "\t.prologue %d\n",
8071 alpha_function_needs_gp || cfun->is_thunk);
8074 /* Write function epilogue. */
8077 alpha_expand_epilogue (void)
8079 /* Registers to save. */
8080 unsigned long imask = 0;
8081 unsigned long fmask = 0;
8082 /* Stack space needed for pushing registers clobbered by us. */
8083 HOST_WIDE_INT sa_size;
8084 /* Complete stack size needed. */
8085 HOST_WIDE_INT frame_size;
8086 /* Offset from base reg to register save area. */
8087 HOST_WIDE_INT reg_offset;
8088 int fp_is_frame_pointer, fp_offset;
8089 rtx sa_reg, sa_reg_exp = NULL;
8090 rtx sp_adj1, sp_adj2, mem, reg, insn;
8092 rtx cfa_restores = NULL_RTX;
8095 sa_size = alpha_sa_size ();
8096 frame_size = compute_frame_size (get_frame_size (), sa_size);
8098 if (TARGET_ABI_OPEN_VMS)
8100 if (alpha_procedure_type == PT_STACK)
8101 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8106 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8108 alpha_sa_mask (&imask, &fmask);
8111 = (TARGET_ABI_OPEN_VMS
8112 ? alpha_procedure_type == PT_STACK
8113 : frame_pointer_needed);
8115 sa_reg = stack_pointer_rtx;
8117 if (crtl->calls_eh_return)
8118 eh_ofs = EH_RETURN_STACKADJ_RTX;
8124 /* If we have a frame pointer, restore SP from it. */
8125 if (TARGET_ABI_OPEN_VMS
8126 ? vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8127 : frame_pointer_needed)
8128 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8130 /* Cope with very large offsets to the register save area. */
8131 if (reg_offset + sa_size > 0x8000)
8133 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8136 if (low + sa_size <= 0x8000)
8137 bias = reg_offset - low, reg_offset = low;
8139 bias = reg_offset, reg_offset = 0;
8141 sa_reg = gen_rtx_REG (DImode, 22);
8142 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8144 emit_move_insn (sa_reg, sa_reg_exp);
8147 /* Restore registers in order, excepting a true frame pointer. */
8149 mem = gen_frame_mem (DImode, plus_constant (sa_reg, reg_offset));
8150 reg = gen_rtx_REG (DImode, REG_RA);
8151 emit_move_insn (reg, mem);
8152 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8155 imask &= ~(1UL << REG_RA);
8157 for (i = 0; i < 31; ++i)
8158 if (imask & (1UL << i))
8160 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8161 fp_offset = reg_offset;
8164 mem = gen_frame_mem (DImode,
8165 plus_constant (sa_reg, reg_offset));
8166 reg = gen_rtx_REG (DImode, i);
8167 emit_move_insn (reg, mem);
8168 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8174 for (i = 0; i < 31; ++i)
8175 if (fmask & (1UL << i))
8177 mem = gen_frame_mem (DFmode, plus_constant (sa_reg, reg_offset));
8178 reg = gen_rtx_REG (DFmode, i+32);
8179 emit_move_insn (reg, mem);
8180 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8185 if (frame_size || eh_ofs)
8187 sp_adj1 = stack_pointer_rtx;
8191 sp_adj1 = gen_rtx_REG (DImode, 23);
8192 emit_move_insn (sp_adj1,
8193 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8196 /* If the stack size is large, begin computation into a temporary
8197 register so as not to interfere with a potential fp restore,
8198 which must be consecutive with an SP restore. */
8199 if (frame_size < 32768 && !cfun->calls_alloca)
8200 sp_adj2 = GEN_INT (frame_size);
8201 else if (frame_size < 0x40007fffL)
8203 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8205 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8206 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8210 sp_adj1 = gen_rtx_REG (DImode, 23);
8211 emit_move_insn (sp_adj1, sp_adj2);
8213 sp_adj2 = GEN_INT (low);
8217 rtx tmp = gen_rtx_REG (DImode, 23);
8218 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8221 /* We can't drop new things to memory this late, afaik,
8222 so build it up by pieces. */
8223 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8225 gcc_assert (sp_adj2);
8229 /* From now on, things must be in order. So emit blockages. */
8231 /* Restore the frame pointer. */
8232 if (fp_is_frame_pointer)
8234 emit_insn (gen_blockage ());
8235 mem = gen_frame_mem (DImode, plus_constant (sa_reg, fp_offset));
8236 emit_move_insn (hard_frame_pointer_rtx, mem);
8237 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8238 hard_frame_pointer_rtx, cfa_restores);
8240 else if (TARGET_ABI_OPEN_VMS)
8242 emit_insn (gen_blockage ());
8243 emit_move_insn (hard_frame_pointer_rtx,
8244 gen_rtx_REG (DImode, vms_save_fp_regno));
8245 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8246 hard_frame_pointer_rtx, cfa_restores);
8249 /* Restore the stack pointer. */
8250 emit_insn (gen_blockage ());
8251 if (sp_adj2 == const0_rtx)
8252 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8254 insn = emit_move_insn (stack_pointer_rtx,
8255 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8256 REG_NOTES (insn) = cfa_restores;
8257 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8258 RTX_FRAME_RELATED_P (insn) = 1;
8262 gcc_assert (cfa_restores == NULL);
8264 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8266 emit_insn (gen_blockage ());
8267 insn = emit_move_insn (hard_frame_pointer_rtx,
8268 gen_rtx_REG (DImode, vms_save_fp_regno));
8269 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8270 RTX_FRAME_RELATED_P (insn) = 1;
8275 /* Output the rest of the textual info surrounding the epilogue. */
8278 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8282 /* We output a nop after noreturn calls at the very end of the function to
8283 ensure that the return address always remains in the caller's code range,
8284 as not doing so might confuse unwinding engines. */
8285 insn = get_last_insn ();
8287 insn = prev_active_insn (insn);
8288 if (insn && CALL_P (insn))
8289 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8291 #if TARGET_ABI_OPEN_VMS
8292 /* Write the linkage entries. */
8293 alpha_write_linkage (file, fnname);
8296 /* End the function. */
8297 if (TARGET_ABI_OPEN_VMS
8298 || !flag_inhibit_size_directive)
8300 fputs ("\t.end ", file);
8301 assemble_name (file, fnname);
8304 inside_function = FALSE;
8308 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8310 In order to avoid the hordes of differences between generated code
8311 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8312 lots of code loading up large constants, generate rtl and emit it
8313 instead of going straight to text.
8315 Not sure why this idea hasn't been explored before... */
8318 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8319 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8322 HOST_WIDE_INT hi, lo;
8323 rtx this_rtx, insn, funexp;
8325 /* We always require a valid GP. */
8326 emit_insn (gen_prologue_ldgp ());
8327 emit_note (NOTE_INSN_PROLOGUE_END);
8329 /* Find the "this" pointer. If the function returns a structure,
8330 the structure return pointer is in $16. */
8331 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8332 this_rtx = gen_rtx_REG (Pmode, 17);
8334 this_rtx = gen_rtx_REG (Pmode, 16);
8336 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8337 entire constant for the add. */
8338 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8339 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8340 if (hi + lo == delta)
8343 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8345 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8349 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8350 delta, -(delta < 0));
8351 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8354 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8359 tmp = gen_rtx_REG (Pmode, 0);
8360 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8362 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8363 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8364 if (hi + lo == vcall_offset)
8367 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8371 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8372 vcall_offset, -(vcall_offset < 0));
8373 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8377 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8380 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8382 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8385 /* Generate a tail call to the target function. */
8386 if (! TREE_USED (function))
8388 assemble_external (function);
8389 TREE_USED (function) = 1;
8391 funexp = XEXP (DECL_RTL (function), 0);
8392 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8393 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8394 SIBLING_CALL_P (insn) = 1;
8396 /* Run just enough of rest_of_compilation to get the insns emitted.
8397 There's not really enough bulk here to make other passes such as
8398 instruction scheduling worth while. Note that use_thunk calls
8399 assemble_start_function and assemble_end_function. */
8400 insn = get_insns ();
8401 insn_locators_alloc ();
8402 shorten_branches (insn);
8403 final_start_function (insn, file, 1);
8404 final (insn, file, 1);
8405 final_end_function ();
8407 #endif /* TARGET_ABI_OSF */
8409 /* Debugging support. */
8413 /* Count the number of sdb related labels are generated (to find block
8414 start and end boundaries). */
8416 int sdb_label_count = 0;
8418 /* Name of the file containing the current function. */
8420 static const char *current_function_file = "";
8422 /* Offsets to alpha virtual arg/local debugging pointers. */
8424 long alpha_arg_offset;
8425 long alpha_auto_offset;
8427 /* Emit a new filename to a stream. */
8430 alpha_output_filename (FILE *stream, const char *name)
8432 static int first_time = TRUE;
8437 ++num_source_filenames;
8438 current_function_file = name;
8439 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8440 output_quoted_string (stream, name);
8441 fprintf (stream, "\n");
8442 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8443 fprintf (stream, "\t#@stabs\n");
8446 else if (write_symbols == DBX_DEBUG)
8447 /* dbxout.c will emit an appropriate .stabs directive. */
8450 else if (name != current_function_file
8451 && strcmp (name, current_function_file) != 0)
8453 if (inside_function && ! TARGET_GAS)
8454 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8457 ++num_source_filenames;
8458 current_function_file = name;
8459 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8462 output_quoted_string (stream, name);
8463 fprintf (stream, "\n");
8467 /* Structure to show the current status of registers and memory. */
8469 struct shadow_summary
8472 unsigned int i : 31; /* Mask of int regs */
8473 unsigned int fp : 31; /* Mask of fp regs */
8474 unsigned int mem : 1; /* mem == imem | fpmem */
8478 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8479 to the summary structure. SET is nonzero if the insn is setting the
8480 object, otherwise zero. */
8483 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8485 const char *format_ptr;
8491 switch (GET_CODE (x))
8493 /* ??? Note that this case would be incorrect if the Alpha had a
8494 ZERO_EXTRACT in SET_DEST. */
8496 summarize_insn (SET_SRC (x), sum, 0);
8497 summarize_insn (SET_DEST (x), sum, 1);
8501 summarize_insn (XEXP (x, 0), sum, 1);
8505 summarize_insn (XEXP (x, 0), sum, 0);
8509 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8510 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8514 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8515 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8519 summarize_insn (SUBREG_REG (x), sum, 0);
8524 int regno = REGNO (x);
8525 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8527 if (regno == 31 || regno == 63)
8533 sum->defd.i |= mask;
8535 sum->defd.fp |= mask;
8540 sum->used.i |= mask;
8542 sum->used.fp |= mask;
8553 /* Find the regs used in memory address computation: */
8554 summarize_insn (XEXP (x, 0), sum, 0);
8557 case CONST_INT: case CONST_DOUBLE:
8558 case SYMBOL_REF: case LABEL_REF: case CONST:
8559 case SCRATCH: case ASM_INPUT:
8562 /* Handle common unary and binary ops for efficiency. */
8563 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8564 case MOD: case UDIV: case UMOD: case AND: case IOR:
8565 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8566 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8567 case NE: case EQ: case GE: case GT: case LE:
8568 case LT: case GEU: case GTU: case LEU: case LTU:
8569 summarize_insn (XEXP (x, 0), sum, 0);
8570 summarize_insn (XEXP (x, 1), sum, 0);
8573 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8574 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8575 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8576 case SQRT: case FFS:
8577 summarize_insn (XEXP (x, 0), sum, 0);
8581 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8582 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8583 switch (format_ptr[i])
8586 summarize_insn (XEXP (x, i), sum, 0);
8590 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8591 summarize_insn (XVECEXP (x, i, j), sum, 0);
8603 /* Ensure a sufficient number of `trapb' insns are in the code when
8604 the user requests code with a trap precision of functions or
8607 In naive mode, when the user requests a trap-precision of
8608 "instruction", a trapb is needed after every instruction that may
8609 generate a trap. This ensures that the code is resumption safe but
8612 When optimizations are turned on, we delay issuing a trapb as long
8613 as possible. In this context, a trap shadow is the sequence of
8614 instructions that starts with a (potentially) trap generating
8615 instruction and extends to the next trapb or call_pal instruction
8616 (but GCC never generates call_pal by itself). We can delay (and
8617 therefore sometimes omit) a trapb subject to the following
8620 (a) On entry to the trap shadow, if any Alpha register or memory
8621 location contains a value that is used as an operand value by some
8622 instruction in the trap shadow (live on entry), then no instruction
8623 in the trap shadow may modify the register or memory location.
8625 (b) Within the trap shadow, the computation of the base register
8626 for a memory load or store instruction may not involve using the
8627 result of an instruction that might generate an UNPREDICTABLE
8630 (c) Within the trap shadow, no register may be used more than once
8631 as a destination register. (This is to make life easier for the
8634 (d) The trap shadow may not include any branch instructions. */
8637 alpha_handle_trap_shadows (void)
8639 struct shadow_summary shadow;
8640 int trap_pending, exception_nesting;
8644 exception_nesting = 0;
8647 shadow.used.mem = 0;
8648 shadow.defd = shadow.used;
8650 for (i = get_insns (); i ; i = NEXT_INSN (i))
8654 switch (NOTE_KIND (i))
8656 case NOTE_INSN_EH_REGION_BEG:
8657 exception_nesting++;
8662 case NOTE_INSN_EH_REGION_END:
8663 exception_nesting--;
8668 case NOTE_INSN_EPILOGUE_BEG:
8669 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8674 else if (trap_pending)
8676 if (alpha_tp == ALPHA_TP_FUNC)
8679 && GET_CODE (PATTERN (i)) == RETURN)
8682 else if (alpha_tp == ALPHA_TP_INSN)
8686 struct shadow_summary sum;
8691 sum.defd = sum.used;
8693 switch (GET_CODE (i))
8696 /* Annoyingly, get_attr_trap will die on these. */
8697 if (GET_CODE (PATTERN (i)) == USE
8698 || GET_CODE (PATTERN (i)) == CLOBBER)
8701 summarize_insn (PATTERN (i), &sum, 0);
8703 if ((sum.defd.i & shadow.defd.i)
8704 || (sum.defd.fp & shadow.defd.fp))
8706 /* (c) would be violated */
8710 /* Combine shadow with summary of current insn: */
8711 shadow.used.i |= sum.used.i;
8712 shadow.used.fp |= sum.used.fp;
8713 shadow.used.mem |= sum.used.mem;
8714 shadow.defd.i |= sum.defd.i;
8715 shadow.defd.fp |= sum.defd.fp;
8716 shadow.defd.mem |= sum.defd.mem;
8718 if ((sum.defd.i & shadow.used.i)
8719 || (sum.defd.fp & shadow.used.fp)
8720 || (sum.defd.mem & shadow.used.mem))
8722 /* (a) would be violated (also takes care of (b)) */
8723 gcc_assert (get_attr_trap (i) != TRAP_YES
8724 || (!(sum.defd.i & sum.used.i)
8725 && !(sum.defd.fp & sum.used.fp)));
8743 n = emit_insn_before (gen_trapb (), i);
8744 PUT_MODE (n, TImode);
8745 PUT_MODE (i, TImode);
8749 shadow.used.mem = 0;
8750 shadow.defd = shadow.used;
8755 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8756 && NONJUMP_INSN_P (i)
8757 && GET_CODE (PATTERN (i)) != USE
8758 && GET_CODE (PATTERN (i)) != CLOBBER
8759 && get_attr_trap (i) == TRAP_YES)
8761 if (optimize && !trap_pending)
8762 summarize_insn (PATTERN (i), &shadow, 0);
8768 /* Alpha can only issue instruction groups simultaneously if they are
8769 suitably aligned. This is very processor-specific. */
8770 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8771 that are marked "fake". These instructions do not exist on that target,
8772 but it is possible to see these insns with deranged combinations of
8773 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8774 choose a result at random. */
8776 enum alphaev4_pipe {
8783 enum alphaev5_pipe {
8794 static enum alphaev4_pipe
8795 alphaev4_insn_pipe (rtx insn)
8797 if (recog_memoized (insn) < 0)
8799 if (get_attr_length (insn) != 4)
8802 switch (get_attr_type (insn))
8818 case TYPE_MVI: /* fake */
8833 case TYPE_FSQRT: /* fake */
8834 case TYPE_FTOI: /* fake */
8835 case TYPE_ITOF: /* fake */
8843 static enum alphaev5_pipe
8844 alphaev5_insn_pipe (rtx insn)
8846 if (recog_memoized (insn) < 0)
8848 if (get_attr_length (insn) != 4)
8851 switch (get_attr_type (insn))
8871 case TYPE_FTOI: /* fake */
8872 case TYPE_ITOF: /* fake */
8887 case TYPE_FSQRT: /* fake */
8898 /* IN_USE is a mask of the slots currently filled within the insn group.
8899 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8900 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8902 LEN is, of course, the length of the group in bytes. */
8905 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8912 || GET_CODE (PATTERN (insn)) == CLOBBER
8913 || GET_CODE (PATTERN (insn)) == USE)
8918 enum alphaev4_pipe pipe;
8920 pipe = alphaev4_insn_pipe (insn);
8924 /* Force complex instructions to start new groups. */
8928 /* If this is a completely unrecognized insn, it's an asm.
8929 We don't know how long it is, so record length as -1 to
8930 signal a needed realignment. */
8931 if (recog_memoized (insn) < 0)
8934 len = get_attr_length (insn);
8938 if (in_use & EV4_IB0)
8940 if (in_use & EV4_IB1)
8945 in_use |= EV4_IB0 | EV4_IBX;
8949 if (in_use & EV4_IB0)
8951 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8959 if (in_use & EV4_IB1)
8969 /* Haifa doesn't do well scheduling branches. */
8974 insn = next_nonnote_insn (insn);
8976 if (!insn || ! INSN_P (insn))
8979 /* Let Haifa tell us where it thinks insn group boundaries are. */
8980 if (GET_MODE (insn) == TImode)
8983 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8988 insn = next_nonnote_insn (insn);
8996 /* IN_USE is a mask of the slots currently filled within the insn group.
8997 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8998 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9000 LEN is, of course, the length of the group in bytes. */
9003 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
9010 || GET_CODE (PATTERN (insn)) == CLOBBER
9011 || GET_CODE (PATTERN (insn)) == USE)
9016 enum alphaev5_pipe pipe;
9018 pipe = alphaev5_insn_pipe (insn);
9022 /* Force complex instructions to start new groups. */
9026 /* If this is a completely unrecognized insn, it's an asm.
9027 We don't know how long it is, so record length as -1 to
9028 signal a needed realignment. */
9029 if (recog_memoized (insn) < 0)
9032 len = get_attr_length (insn);
9035 /* ??? Most of the places below, we would like to assert never
9036 happen, as it would indicate an error either in Haifa, or
9037 in the scheduling description. Unfortunately, Haifa never
9038 schedules the last instruction of the BB, so we don't have
9039 an accurate TI bit to go off. */
9041 if (in_use & EV5_E0)
9043 if (in_use & EV5_E1)
9048 in_use |= EV5_E0 | EV5_E01;
9052 if (in_use & EV5_E0)
9054 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9062 if (in_use & EV5_E1)
9068 if (in_use & EV5_FA)
9070 if (in_use & EV5_FM)
9075 in_use |= EV5_FA | EV5_FAM;
9079 if (in_use & EV5_FA)
9085 if (in_use & EV5_FM)
9098 /* Haifa doesn't do well scheduling branches. */
9099 /* ??? If this is predicted not-taken, slotting continues, except
9100 that no more IBR, FBR, or JSR insns may be slotted. */
9105 insn = next_nonnote_insn (insn);
9107 if (!insn || ! INSN_P (insn))
9110 /* Let Haifa tell us where it thinks insn group boundaries are. */
9111 if (GET_MODE (insn) == TImode)
9114 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9119 insn = next_nonnote_insn (insn);
9128 alphaev4_next_nop (int *pin_use)
9130 int in_use = *pin_use;
9133 if (!(in_use & EV4_IB0))
9138 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9143 else if (TARGET_FP && !(in_use & EV4_IB1))
9156 alphaev5_next_nop (int *pin_use)
9158 int in_use = *pin_use;
9161 if (!(in_use & EV5_E1))
9166 else if (TARGET_FP && !(in_use & EV5_FA))
9171 else if (TARGET_FP && !(in_use & EV5_FM))
9183 /* The instruction group alignment main loop. */
9186 alpha_align_insns (unsigned int max_align,
9187 rtx (*next_group) (rtx, int *, int *),
9188 rtx (*next_nop) (int *))
9190 /* ALIGN is the known alignment for the insn group. */
9192 /* OFS is the offset of the current insn in the insn group. */
9194 int prev_in_use, in_use, len, ldgp;
9197 /* Let shorten branches care for assigning alignments to code labels. */
9198 shorten_branches (get_insns ());
9200 if (align_functions < 4)
9202 else if ((unsigned int) align_functions < max_align)
9203 align = align_functions;
9207 ofs = prev_in_use = 0;
9210 i = next_nonnote_insn (i);
9212 ldgp = alpha_function_needs_gp ? 8 : 0;
9216 next = (*next_group) (i, &in_use, &len);
9218 /* When we see a label, resync alignment etc. */
9221 unsigned int new_align = 1 << label_to_alignment (i);
9223 if (new_align >= align)
9225 align = new_align < max_align ? new_align : max_align;
9229 else if (ofs & (new_align-1))
9230 ofs = (ofs | (new_align-1)) + 1;
9234 /* Handle complex instructions special. */
9235 else if (in_use == 0)
9237 /* Asms will have length < 0. This is a signal that we have
9238 lost alignment knowledge. Assume, however, that the asm
9239 will not mis-align instructions. */
9248 /* If the known alignment is smaller than the recognized insn group,
9249 realign the output. */
9250 else if ((int) align < len)
9252 unsigned int new_log_align = len > 8 ? 4 : 3;
9255 where = prev = prev_nonnote_insn (i);
9256 if (!where || !LABEL_P (where))
9259 /* Can't realign between a call and its gp reload. */
9260 if (! (TARGET_EXPLICIT_RELOCS
9261 && prev && CALL_P (prev)))
9263 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9264 align = 1 << new_log_align;
9269 /* We may not insert padding inside the initial ldgp sequence. */
9273 /* If the group won't fit in the same INT16 as the previous,
9274 we need to add padding to keep the group together. Rather
9275 than simply leaving the insn filling to the assembler, we
9276 can make use of the knowledge of what sorts of instructions
9277 were issued in the previous group to make sure that all of
9278 the added nops are really free. */
9279 else if (ofs + len > (int) align)
9281 int nop_count = (align - ofs) / 4;
9284 /* Insert nops before labels, branches, and calls to truly merge
9285 the execution of the nops with the previous instruction group. */
9286 where = prev_nonnote_insn (i);
9289 if (LABEL_P (where))
9291 rtx where2 = prev_nonnote_insn (where);
9292 if (where2 && JUMP_P (where2))
9295 else if (NONJUMP_INSN_P (where))
9302 emit_insn_before ((*next_nop)(&prev_in_use), where);
9303 while (--nop_count);
9307 ofs = (ofs + len) & (align - 1);
9308 prev_in_use = in_use;
9313 /* Insert an unop between sibcall or noreturn function call and GP load. */
9316 alpha_pad_function_end (void)
9320 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9322 if (! (CALL_P (insn)
9323 && (SIBLING_CALL_P (insn)
9324 || find_reg_note (insn, REG_NORETURN, NULL_RTX))))
9327 /* Make sure we do not split a call and its corresponding
9328 CALL_ARG_LOCATION note. */
9331 next = NEXT_INSN (insn);
9332 if (next && NOTE_P (next)
9333 && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
9337 next = next_active_insn (insn);
9341 rtx pat = PATTERN (next);
9343 if (GET_CODE (pat) == SET
9344 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9345 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9346 emit_insn_after (gen_unop (), insn);
9351 /* Machine dependent reorg pass. */
9356 /* Workaround for a linker error that triggers when an exception
9357 handler immediatelly follows a sibcall or a noreturn function.
9359 In the sibcall case:
9361 The instruction stream from an object file:
9363 1d8: 00 00 fb 6b jmp (t12)
9364 1dc: 00 00 ba 27 ldah gp,0(ra)
9365 1e0: 00 00 bd 23 lda gp,0(gp)
9366 1e4: 00 00 7d a7 ldq t12,0(gp)
9367 1e8: 00 40 5b 6b jsr ra,(t12),1ec <__funcZ+0x1ec>
9369 was converted in the final link pass to:
9371 12003aa88: 67 fa ff c3 br 120039428 <...>
9372 12003aa8c: 00 00 fe 2f unop
9373 12003aa90: 00 00 fe 2f unop
9374 12003aa94: 48 83 7d a7 ldq t12,-31928(gp)
9375 12003aa98: 00 40 5b 6b jsr ra,(t12),12003aa9c <__func+0x1ec>
9377 And in the noreturn case:
9379 The instruction stream from an object file:
9381 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9382 58: 00 00 ba 27 ldah gp,0(ra)
9383 5c: 00 00 bd 23 lda gp,0(gp)
9384 60: 00 00 7d a7 ldq t12,0(gp)
9385 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9387 was converted in the final link pass to:
9389 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9390 fdb28: 00 00 fe 2f unop
9391 fdb2c: 00 00 fe 2f unop
9392 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9393 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9395 GP load instructions were wrongly cleared by the linker relaxation
9396 pass. This workaround prevents removal of GP loads by inserting
9397 an unop instruction between a sibcall or noreturn function call and
9398 exception handler prologue. */
9400 if (current_function_has_exception_handlers ())
9401 alpha_pad_function_end ();
9403 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9404 alpha_handle_trap_shadows ();
9406 /* Due to the number of extra trapb insns, don't bother fixing up
9407 alignment when trap precision is instruction. Moreover, we can
9408 only do our job when sched2 is run. */
9409 if (optimize && !optimize_size
9410 && alpha_tp != ALPHA_TP_INSN
9411 && flag_schedule_insns_after_reload)
9413 if (alpha_tune == PROCESSOR_EV4)
9414 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9415 else if (alpha_tune == PROCESSOR_EV5)
9416 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9425 alpha_file_start (void)
9427 #ifdef OBJECT_FORMAT_ELF
9428 /* If emitting dwarf2 debug information, we cannot generate a .file
9429 directive to start the file, as it will conflict with dwarf2out
9430 file numbers. So it's only useful when emitting mdebug output. */
9431 targetm.asm_file_start_file_directive = (write_symbols == DBX_DEBUG);
9434 default_file_start ();
9436 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9439 fputs ("\t.set noreorder\n", asm_out_file);
9440 fputs ("\t.set volatile\n", asm_out_file);
9442 fputs ("\t.set noat\n", asm_out_file);
9443 if (TARGET_EXPLICIT_RELOCS)
9444 fputs ("\t.set nomacro\n", asm_out_file);
9445 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9449 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9451 else if (TARGET_MAX)
9453 else if (TARGET_BWX)
9455 else if (alpha_cpu == PROCESSOR_EV5)
9460 fprintf (asm_out_file, "\t.arch %s\n", arch);
9464 #ifdef OBJECT_FORMAT_ELF
9465 /* Since we don't have a .dynbss section, we should not allow global
9466 relocations in the .rodata section. */
9469 alpha_elf_reloc_rw_mask (void)
9471 return flag_pic ? 3 : 2;
9474 /* Return a section for X. The only special thing we do here is to
9475 honor small data. */
9478 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9479 unsigned HOST_WIDE_INT align)
9481 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9482 /* ??? Consider using mergeable sdata sections. */
9483 return sdata_section;
9485 return default_elf_select_rtx_section (mode, x, align);
9489 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9491 unsigned int flags = 0;
9493 if (strcmp (name, ".sdata") == 0
9494 || strncmp (name, ".sdata.", 7) == 0
9495 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9496 || strcmp (name, ".sbss") == 0
9497 || strncmp (name, ".sbss.", 6) == 0
9498 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9499 flags = SECTION_SMALL;
9501 flags |= default_section_type_flags (decl, name, reloc);
9504 #endif /* OBJECT_FORMAT_ELF */
9506 /* Structure to collect function names for final output in link section. */
9507 /* Note that items marked with GTY can't be ifdef'ed out. */
9515 struct GTY(()) alpha_links
9519 enum reloc_kind rkind;
9522 #if TARGET_ABI_OPEN_VMS
9524 /* Return the VMS argument type corresponding to MODE. */
9527 alpha_arg_type (enum machine_mode mode)
9532 return TARGET_FLOAT_VAX ? FF : FS;
9534 return TARGET_FLOAT_VAX ? FD : FT;
9540 /* Return an rtx for an integer representing the VMS Argument Information
9544 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9546 unsigned HOST_WIDE_INT regval = cum.num_args;
9549 for (i = 0; i < 6; i++)
9550 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9552 return GEN_INT (regval);
9556 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9557 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9558 this is the reference to the linkage pointer value, 0 if this is the
9559 reference to the function entry value. RFLAG is 1 if this a reduced
9560 reference (code address only), 0 if this is a full reference. */
9563 alpha_use_linkage (rtx func, bool lflag, bool rflag)
9565 struct alpha_links *al = NULL;
9566 const char *name = XSTR (func, 0);
9568 if (cfun->machine->links)
9570 splay_tree_node lnode;
9572 /* Is this name already defined? */
9573 lnode = splay_tree_lookup (cfun->machine->links, (splay_tree_key) name);
9575 al = (struct alpha_links *) lnode->value;
9578 cfun->machine->links = splay_tree_new_ggc
9579 ((splay_tree_compare_fn) strcmp,
9580 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9581 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9592 /* Follow transparent alias, as this is used for CRTL translations. */
9593 id = maybe_get_identifier (name);
9596 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9597 id = TREE_CHAIN (id);
9598 name = IDENTIFIER_POINTER (id);
9601 buf_len = strlen (name) + 8 + 9;
9602 linksym = (char *) alloca (buf_len);
9603 snprintf (linksym, buf_len, "$%d..%s..lk", cfun->funcdef_no, name);
9605 al = ggc_alloc_alpha_links ();
9607 al->linkage = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (linksym));
9609 splay_tree_insert (cfun->machine->links,
9610 (splay_tree_key) ggc_strdup (name),
9611 (splay_tree_value) al);
9614 al->rkind = rflag ? KIND_CODEADDR : KIND_LINKAGE;
9617 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9623 alpha_write_one_linkage (splay_tree_node node, void *data)
9625 const char *const name = (const char *) node->key;
9626 struct alpha_links *link = (struct alpha_links *) node->value;
9627 FILE *stream = (FILE *) data;
9629 ASM_OUTPUT_INTERNAL_LABEL (stream, XSTR (link->linkage, 0));
9630 if (link->rkind == KIND_CODEADDR)
9632 /* External and used, request code address. */
9633 fprintf (stream, "\t.code_address ");
9637 if (!SYMBOL_REF_EXTERNAL_P (link->func)
9638 && SYMBOL_REF_LOCAL_P (link->func))
9640 /* Locally defined, build linkage pair. */
9641 fprintf (stream, "\t.quad %s..en\n", name);
9642 fprintf (stream, "\t.quad ");
9646 /* External, request linkage pair. */
9647 fprintf (stream, "\t.linkage ");
9650 assemble_name (stream, name);
9651 fputs ("\n", stream);
9657 alpha_write_linkage (FILE *stream, const char *funname)
9659 fprintf (stream, "\t.link\n");
9660 fprintf (stream, "\t.align 3\n");
9663 #ifdef TARGET_VMS_CRASH_DEBUG
9664 fputs ("\t.name ", stream);
9665 assemble_name (stream, funname);
9666 fputs ("..na\n", stream);
9669 ASM_OUTPUT_LABEL (stream, funname);
9670 fprintf (stream, "\t.pdesc ");
9671 assemble_name (stream, funname);
9672 fprintf (stream, "..en,%s\n",
9673 alpha_procedure_type == PT_STACK ? "stack"
9674 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9676 if (cfun->machine->links)
9678 splay_tree_foreach (cfun->machine->links, alpha_write_one_linkage, stream);
9679 /* splay_tree_delete (func->links); */
9683 /* Switch to an arbitrary section NAME with attributes as specified
9684 by FLAGS. ALIGN specifies any known alignment requirements for
9685 the section; 0 if the default should be used. */
9688 vms_asm_named_section (const char *name, unsigned int flags,
9689 tree decl ATTRIBUTE_UNUSED)
9691 fputc ('\n', asm_out_file);
9692 fprintf (asm_out_file, ".section\t%s", name);
9694 if (flags & SECTION_DEBUG)
9695 fprintf (asm_out_file, ",NOWRT");
9697 fputc ('\n', asm_out_file);
9700 /* Record an element in the table of global constructors. SYMBOL is
9701 a SYMBOL_REF of the function to be called; PRIORITY is a number
9702 between 0 and MAX_INIT_PRIORITY.
9704 Differs from default_ctors_section_asm_out_constructor in that the
9705 width of the .ctors entry is always 64 bits, rather than the 32 bits
9706 used by a normal pointer. */
9709 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9711 switch_to_section (ctors_section);
9712 assemble_align (BITS_PER_WORD);
9713 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9717 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9719 switch_to_section (dtors_section);
9720 assemble_align (BITS_PER_WORD);
9721 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9725 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
9726 bool lflag ATTRIBUTE_UNUSED,
9727 bool rflag ATTRIBUTE_UNUSED)
9732 #endif /* TARGET_ABI_OPEN_VMS */
9735 alpha_init_libfuncs (void)
9737 if (TARGET_ABI_OPEN_VMS)
9739 /* Use the VMS runtime library functions for division and
9741 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9742 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9743 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9744 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9745 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9746 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9747 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9748 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9749 abort_libfunc = init_one_libfunc ("decc$abort");
9750 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
9751 #ifdef MEM_LIBFUNCS_INIT
9757 /* On the Alpha, we use this to disable the floating-point registers
9758 when they don't exist. */
9761 alpha_conditional_register_usage (void)
9764 if (! TARGET_FPREGS)
9765 for (i = 32; i < 63; i++)
9766 fixed_regs[i] = call_used_regs[i] = 1;
9769 /* Initialize the GCC target structure. */
9770 #if TARGET_ABI_OPEN_VMS
9771 # undef TARGET_ATTRIBUTE_TABLE
9772 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
9773 # undef TARGET_CAN_ELIMINATE
9774 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
9777 #undef TARGET_IN_SMALL_DATA_P
9778 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9780 #undef TARGET_ASM_ALIGNED_HI_OP
9781 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9782 #undef TARGET_ASM_ALIGNED_DI_OP
9783 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9785 /* Default unaligned ops are provided for ELF systems. To get unaligned
9786 data for non-ELF systems, we have to turn off auto alignment. */
9787 #if !defined (OBJECT_FORMAT_ELF) || TARGET_ABI_OPEN_VMS
9788 #undef TARGET_ASM_UNALIGNED_HI_OP
9789 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9790 #undef TARGET_ASM_UNALIGNED_SI_OP
9791 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9792 #undef TARGET_ASM_UNALIGNED_DI_OP
9793 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9796 #ifdef OBJECT_FORMAT_ELF
9797 #undef TARGET_ASM_RELOC_RW_MASK
9798 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
9799 #undef TARGET_ASM_SELECT_RTX_SECTION
9800 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
9801 #undef TARGET_SECTION_TYPE_FLAGS
9802 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
9805 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
9806 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9808 #undef TARGET_INIT_LIBFUNCS
9809 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9811 #undef TARGET_LEGITIMIZE_ADDRESS
9812 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
9814 #undef TARGET_ASM_FILE_START
9815 #define TARGET_ASM_FILE_START alpha_file_start
9816 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
9817 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
9819 #undef TARGET_SCHED_ADJUST_COST
9820 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9821 #undef TARGET_SCHED_ISSUE_RATE
9822 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
9823 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9824 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
9825 alpha_multipass_dfa_lookahead
9827 #undef TARGET_HAVE_TLS
9828 #define TARGET_HAVE_TLS HAVE_AS_TLS
9830 #undef TARGET_BUILTIN_DECL
9831 #define TARGET_BUILTIN_DECL alpha_builtin_decl
9832 #undef TARGET_INIT_BUILTINS
9833 #define TARGET_INIT_BUILTINS alpha_init_builtins
9834 #undef TARGET_EXPAND_BUILTIN
9835 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
9836 #undef TARGET_FOLD_BUILTIN
9837 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
9839 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
9840 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
9841 #undef TARGET_CANNOT_COPY_INSN_P
9842 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
9843 #undef TARGET_LEGITIMATE_CONSTANT_P
9844 #define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
9845 #undef TARGET_CANNOT_FORCE_CONST_MEM
9846 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
9849 #undef TARGET_ASM_OUTPUT_MI_THUNK
9850 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
9851 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
9852 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
9853 #undef TARGET_STDARG_OPTIMIZE_HOOK
9854 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
9857 /* Use 16-bits anchor. */
9858 #undef TARGET_MIN_ANCHOR_OFFSET
9859 #define TARGET_MIN_ANCHOR_OFFSET -0x7fff - 1
9860 #undef TARGET_MAX_ANCHOR_OFFSET
9861 #define TARGET_MAX_ANCHOR_OFFSET 0x7fff
9862 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
9863 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
9865 #undef TARGET_RTX_COSTS
9866 #define TARGET_RTX_COSTS alpha_rtx_costs
9867 #undef TARGET_ADDRESS_COST
9868 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
9870 #undef TARGET_MACHINE_DEPENDENT_REORG
9871 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
9873 #undef TARGET_PROMOTE_FUNCTION_MODE
9874 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
9875 #undef TARGET_PROMOTE_PROTOTYPES
9876 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
9877 #undef TARGET_RETURN_IN_MEMORY
9878 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
9879 #undef TARGET_PASS_BY_REFERENCE
9880 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
9881 #undef TARGET_SETUP_INCOMING_VARARGS
9882 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
9883 #undef TARGET_STRICT_ARGUMENT_NAMING
9884 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
9885 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
9886 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
9887 #undef TARGET_SPLIT_COMPLEX_ARG
9888 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
9889 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
9890 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
9891 #undef TARGET_ARG_PARTIAL_BYTES
9892 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
9893 #undef TARGET_FUNCTION_ARG
9894 #define TARGET_FUNCTION_ARG alpha_function_arg
9895 #undef TARGET_FUNCTION_ARG_ADVANCE
9896 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
9897 #undef TARGET_TRAMPOLINE_INIT
9898 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
9900 #undef TARGET_INSTANTIATE_DECLS
9901 #define TARGET_INSTANTIATE_DECLS alpha_instantiate_decls
9903 #undef TARGET_SECONDARY_RELOAD
9904 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
9906 #undef TARGET_SCALAR_MODE_SUPPORTED_P
9907 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
9908 #undef TARGET_VECTOR_MODE_SUPPORTED_P
9909 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
9911 #undef TARGET_BUILD_BUILTIN_VA_LIST
9912 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
9914 #undef TARGET_EXPAND_BUILTIN_VA_START
9915 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
9917 /* The Alpha architecture does not require sequential consistency. See
9918 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
9919 for an example of how it can be violated in practice. */
9920 #undef TARGET_RELAXED_ORDERING
9921 #define TARGET_RELAXED_ORDERING true
9923 #undef TARGET_OPTION_OVERRIDE
9924 #define TARGET_OPTION_OVERRIDE alpha_option_override
9926 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9927 #undef TARGET_MANGLE_TYPE
9928 #define TARGET_MANGLE_TYPE alpha_mangle_type
9931 #undef TARGET_LEGITIMATE_ADDRESS_P
9932 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
9934 #undef TARGET_CONDITIONAL_REGISTER_USAGE
9935 #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
9937 struct gcc_target targetm = TARGET_INITIALIZER;
9940 #include "gt-alpha.h"