1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-attr.h"
44 #include "diagnostic-core.h"
46 #include "integrate.h"
49 #include "target-def.h"
51 #include "langhooks.h"
52 #include "splay-tree.h"
53 #include "cfglayout.h"
55 #include "tree-flow.h"
56 #include "tree-stdarg.h"
57 #include "tm-constrs.h"
62 /* Specify which cpu to schedule for. */
63 enum processor_type alpha_tune;
65 /* Which cpu we're generating code for. */
66 enum processor_type alpha_cpu;
68 static const char * const alpha_cpu_name[] =
73 /* Specify how accurate floating-point traps need to be. */
75 enum alpha_trap_precision alpha_tp;
77 /* Specify the floating-point rounding mode. */
79 enum alpha_fp_rounding_mode alpha_fprm;
81 /* Specify which things cause traps. */
83 enum alpha_fp_trap_mode alpha_fptm;
85 /* Nonzero if inside of a function, because the Alpha asm can't
86 handle .files inside of functions. */
88 static int inside_function = FALSE;
90 /* The number of cycles of latency we should assume on memory reads. */
92 int alpha_memory_latency = 3;
94 /* Whether the function needs the GP. */
96 static int alpha_function_needs_gp;
98 /* The assembler name of the current function. */
100 static const char *alpha_fnname;
102 /* The next explicit relocation sequence number. */
103 extern GTY(()) int alpha_next_sequence_number;
104 int alpha_next_sequence_number = 1;
106 /* The literal and gpdisp sequence numbers for this insn, as printed
107 by %# and %* respectively. */
108 extern GTY(()) int alpha_this_literal_sequence_number;
109 extern GTY(()) int alpha_this_gpdisp_sequence_number;
110 int alpha_this_literal_sequence_number;
111 int alpha_this_gpdisp_sequence_number;
113 /* Costs of various operations on the different architectures. */
115 struct alpha_rtx_cost_data
117 unsigned char fp_add;
118 unsigned char fp_mult;
119 unsigned char fp_div_sf;
120 unsigned char fp_div_df;
121 unsigned char int_mult_si;
122 unsigned char int_mult_di;
123 unsigned char int_shift;
124 unsigned char int_cmov;
125 unsigned short int_div;
128 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
131 COSTS_N_INSNS (6), /* fp_add */
132 COSTS_N_INSNS (6), /* fp_mult */
133 COSTS_N_INSNS (34), /* fp_div_sf */
134 COSTS_N_INSNS (63), /* fp_div_df */
135 COSTS_N_INSNS (23), /* int_mult_si */
136 COSTS_N_INSNS (23), /* int_mult_di */
137 COSTS_N_INSNS (2), /* int_shift */
138 COSTS_N_INSNS (2), /* int_cmov */
139 COSTS_N_INSNS (97), /* int_div */
142 COSTS_N_INSNS (4), /* fp_add */
143 COSTS_N_INSNS (4), /* fp_mult */
144 COSTS_N_INSNS (15), /* fp_div_sf */
145 COSTS_N_INSNS (22), /* fp_div_df */
146 COSTS_N_INSNS (8), /* int_mult_si */
147 COSTS_N_INSNS (12), /* int_mult_di */
148 COSTS_N_INSNS (1) + 1, /* int_shift */
149 COSTS_N_INSNS (1), /* int_cmov */
150 COSTS_N_INSNS (83), /* int_div */
153 COSTS_N_INSNS (4), /* fp_add */
154 COSTS_N_INSNS (4), /* fp_mult */
155 COSTS_N_INSNS (12), /* fp_div_sf */
156 COSTS_N_INSNS (15), /* fp_div_df */
157 COSTS_N_INSNS (7), /* int_mult_si */
158 COSTS_N_INSNS (7), /* int_mult_di */
159 COSTS_N_INSNS (1), /* int_shift */
160 COSTS_N_INSNS (2), /* int_cmov */
161 COSTS_N_INSNS (86), /* int_div */
165 /* Similar but tuned for code size instead of execution latency. The
166 extra +N is fractional cost tuning based on latency. It's used to
167 encourage use of cheaper insns like shift, but only if there's just
170 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
172 COSTS_N_INSNS (1), /* fp_add */
173 COSTS_N_INSNS (1), /* fp_mult */
174 COSTS_N_INSNS (1), /* fp_div_sf */
175 COSTS_N_INSNS (1) + 1, /* fp_div_df */
176 COSTS_N_INSNS (1) + 1, /* int_mult_si */
177 COSTS_N_INSNS (1) + 2, /* int_mult_di */
178 COSTS_N_INSNS (1), /* int_shift */
179 COSTS_N_INSNS (1), /* int_cmov */
180 COSTS_N_INSNS (6), /* int_div */
183 /* Get the number of args of a function in one of two ways. */
184 #if TARGET_ABI_OPEN_VMS
185 #define NUM_ARGS crtl->args.info.num_args
187 #define NUM_ARGS crtl->args.info
193 /* Declarations of static functions. */
194 static struct machine_function *alpha_init_machine_status (void);
195 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
197 #if TARGET_ABI_OPEN_VMS
198 static void alpha_write_linkage (FILE *, const char *, tree);
199 static bool vms_valid_pointer_mode (enum machine_mode);
202 /* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
203 static const struct default_options alpha_option_optimization_table[] =
205 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
206 { OPT_LEVELS_NONE, 0, NULL, 0 }
209 /* Implement TARGET_HANDLE_OPTION. */
212 alpha_handle_option (struct gcc_options *opts,
213 struct gcc_options *opts_set ATTRIBUTE_UNUSED,
214 const struct cl_decoded_option *decoded,
217 size_t code = decoded->opt_index;
218 const char *arg = decoded->arg;
219 int value = decoded->value;
225 opts->x_target_flags |= MASK_SOFT_FP;
229 case OPT_mieee_with_inexact:
230 opts->x_target_flags |= MASK_IEEE_CONFORMANT;
234 if (value != 16 && value != 32 && value != 64)
235 error_at (loc, "bad value %qs for -mtls-size switch", arg);
242 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
243 /* Implement TARGET_MANGLE_TYPE. */
246 alpha_mangle_type (const_tree type)
248 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
249 && TARGET_LONG_DOUBLE_128)
252 /* For all other types, use normal C++ mangling. */
257 /* Parse target option strings. */
260 alpha_option_override (void)
262 static const struct cpu_table {
263 const char *const name;
264 const enum processor_type processor;
267 { "ev4", PROCESSOR_EV4, 0 },
268 { "ev45", PROCESSOR_EV4, 0 },
269 { "21064", PROCESSOR_EV4, 0 },
270 { "ev5", PROCESSOR_EV5, 0 },
271 { "21164", PROCESSOR_EV5, 0 },
272 { "ev56", PROCESSOR_EV5, MASK_BWX },
273 { "21164a", PROCESSOR_EV5, MASK_BWX },
274 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
275 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
276 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
277 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
278 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
279 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
280 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX }
283 int const ct_size = ARRAY_SIZE (cpu_table);
286 #ifdef SUBTARGET_OVERRIDE_OPTIONS
287 SUBTARGET_OVERRIDE_OPTIONS;
290 alpha_fprm = ALPHA_FPRM_NORM;
291 alpha_tp = ALPHA_TP_PROG;
292 alpha_fptm = ALPHA_FPTM_N;
296 alpha_tp = ALPHA_TP_INSN;
297 alpha_fptm = ALPHA_FPTM_SU;
299 if (TARGET_IEEE_WITH_INEXACT)
301 alpha_tp = ALPHA_TP_INSN;
302 alpha_fptm = ALPHA_FPTM_SUI;
307 if (! strcmp (alpha_tp_string, "p"))
308 alpha_tp = ALPHA_TP_PROG;
309 else if (! strcmp (alpha_tp_string, "f"))
310 alpha_tp = ALPHA_TP_FUNC;
311 else if (! strcmp (alpha_tp_string, "i"))
312 alpha_tp = ALPHA_TP_INSN;
314 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
317 if (alpha_fprm_string)
319 if (! strcmp (alpha_fprm_string, "n"))
320 alpha_fprm = ALPHA_FPRM_NORM;
321 else if (! strcmp (alpha_fprm_string, "m"))
322 alpha_fprm = ALPHA_FPRM_MINF;
323 else if (! strcmp (alpha_fprm_string, "c"))
324 alpha_fprm = ALPHA_FPRM_CHOP;
325 else if (! strcmp (alpha_fprm_string,"d"))
326 alpha_fprm = ALPHA_FPRM_DYN;
328 error ("bad value %qs for -mfp-rounding-mode switch",
332 if (alpha_fptm_string)
334 if (strcmp (alpha_fptm_string, "n") == 0)
335 alpha_fptm = ALPHA_FPTM_N;
336 else if (strcmp (alpha_fptm_string, "u") == 0)
337 alpha_fptm = ALPHA_FPTM_U;
338 else if (strcmp (alpha_fptm_string, "su") == 0)
339 alpha_fptm = ALPHA_FPTM_SU;
340 else if (strcmp (alpha_fptm_string, "sui") == 0)
341 alpha_fptm = ALPHA_FPTM_SUI;
343 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
346 if (alpha_cpu_string)
348 for (i = 0; i < ct_size; i++)
349 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
351 alpha_tune = alpha_cpu = cpu_table [i].processor;
352 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
353 target_flags |= cpu_table [i].flags;
357 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
360 if (alpha_tune_string)
362 for (i = 0; i < ct_size; i++)
363 if (! strcmp (alpha_tune_string, cpu_table [i].name))
365 alpha_tune = cpu_table [i].processor;
369 error ("bad value %qs for -mtune switch", alpha_tune_string);
372 /* Do some sanity checks on the above options. */
374 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
375 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
377 warning (0, "fp software completion requires -mtrap-precision=i");
378 alpha_tp = ALPHA_TP_INSN;
381 if (alpha_cpu == PROCESSOR_EV6)
383 /* Except for EV6 pass 1 (not released), we always have precise
384 arithmetic traps. Which means we can do software completion
385 without minding trap shadows. */
386 alpha_tp = ALPHA_TP_PROG;
389 if (TARGET_FLOAT_VAX)
391 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
393 warning (0, "rounding mode not supported for VAX floats");
394 alpha_fprm = ALPHA_FPRM_NORM;
396 if (alpha_fptm == ALPHA_FPTM_SUI)
398 warning (0, "trap mode not supported for VAX floats");
399 alpha_fptm = ALPHA_FPTM_SU;
401 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
402 warning (0, "128-bit long double not supported for VAX floats");
403 target_flags &= ~MASK_LONG_DOUBLE_128;
410 if (!alpha_mlat_string)
411 alpha_mlat_string = "L1";
413 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
414 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
416 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
417 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
418 && alpha_mlat_string[2] == '\0')
420 static int const cache_latency[][4] =
422 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
423 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
424 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
427 lat = alpha_mlat_string[1] - '0';
428 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
430 warning (0, "L%d cache latency unknown for %s",
431 lat, alpha_cpu_name[alpha_tune]);
435 lat = cache_latency[alpha_tune][lat-1];
437 else if (! strcmp (alpha_mlat_string, "main"))
439 /* Most current memories have about 370ns latency. This is
440 a reasonable guess for a fast cpu. */
445 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
449 alpha_memory_latency = lat;
452 /* Default the definition of "small data" to 8 bytes. */
453 if (!global_options_set.x_g_switch_value)
456 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
458 target_flags |= MASK_SMALL_DATA;
459 else if (flag_pic == 2)
460 target_flags &= ~MASK_SMALL_DATA;
462 /* Align labels and loops for optimal branching. */
463 /* ??? Kludge these by not doing anything if we don't optimize and also if
464 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
465 if (optimize > 0 && write_symbols != SDB_DEBUG)
467 if (align_loops <= 0)
469 if (align_jumps <= 0)
472 if (align_functions <= 0)
473 align_functions = 16;
475 /* Register variables and functions with the garbage collector. */
477 /* Set up function hooks. */
478 init_machine_status = alpha_init_machine_status;
480 /* Tell the compiler when we're using VAX floating point. */
481 if (TARGET_FLOAT_VAX)
483 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
484 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
485 REAL_MODE_FORMAT (TFmode) = NULL;
488 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
489 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
490 target_flags |= MASK_LONG_DOUBLE_128;
494 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
497 zap_mask (HOST_WIDE_INT value)
501 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
503 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
509 /* Return true if OP is valid for a particular TLS relocation.
510 We are already guaranteed that OP is a CONST. */
513 tls_symbolic_operand_1 (rtx op, int size, int unspec)
517 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
519 op = XVECEXP (op, 0, 0);
521 if (GET_CODE (op) != SYMBOL_REF)
524 switch (SYMBOL_REF_TLS_MODEL (op))
526 case TLS_MODEL_LOCAL_DYNAMIC:
527 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
528 case TLS_MODEL_INITIAL_EXEC:
529 return unspec == UNSPEC_TPREL && size == 64;
530 case TLS_MODEL_LOCAL_EXEC:
531 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
537 /* Used by aligned_memory_operand and unaligned_memory_operand to
538 resolve what reload is going to do with OP if it's a register. */
541 resolve_reload_operand (rtx op)
543 if (reload_in_progress)
546 if (GET_CODE (tmp) == SUBREG)
547 tmp = SUBREG_REG (tmp);
549 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
551 op = reg_equiv_memory_loc (REGNO (tmp));
559 /* The scalar modes supported differs from the default check-what-c-supports
560 version in that sometimes TFmode is available even when long double
561 indicates only DFmode. */
564 alpha_scalar_mode_supported_p (enum machine_mode mode)
572 case TImode: /* via optabs.c */
580 return TARGET_HAS_XFLOATING_LIBS;
587 /* Alpha implements a couple of integer vector mode operations when
588 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
589 which allows the vectorizer to operate on e.g. move instructions,
590 or when expand_vector_operations can do something useful. */
593 alpha_vector_mode_supported_p (enum machine_mode mode)
595 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
598 /* Return 1 if this function can directly return via $26. */
603 return (TARGET_ABI_OSF
605 && alpha_sa_size () == 0
606 && get_frame_size () == 0
607 && crtl->outgoing_args_size == 0
608 && crtl->args.pretend_args_size == 0);
611 /* Return the ADDR_VEC associated with a tablejump insn. */
614 alpha_tablejump_addr_vec (rtx insn)
618 tmp = JUMP_LABEL (insn);
621 tmp = NEXT_INSN (tmp);
625 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
626 return PATTERN (tmp);
630 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
633 alpha_tablejump_best_label (rtx insn)
635 rtx jump_table = alpha_tablejump_addr_vec (insn);
636 rtx best_label = NULL_RTX;
638 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
639 there for edge frequency counts from profile data. */
643 int n_labels = XVECLEN (jump_table, 1);
647 for (i = 0; i < n_labels; i++)
651 for (j = i + 1; j < n_labels; j++)
652 if (XEXP (XVECEXP (jump_table, 1, i), 0)
653 == XEXP (XVECEXP (jump_table, 1, j), 0))
656 if (count > best_count)
657 best_count = count, best_label = XVECEXP (jump_table, 1, i);
661 return best_label ? best_label : const0_rtx;
664 /* Return the TLS model to use for SYMBOL. */
666 static enum tls_model
667 tls_symbolic_operand_type (rtx symbol)
669 enum tls_model model;
671 if (GET_CODE (symbol) != SYMBOL_REF)
672 return TLS_MODEL_NONE;
673 model = SYMBOL_REF_TLS_MODEL (symbol);
675 /* Local-exec with a 64-bit size is the same code as initial-exec. */
676 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
677 model = TLS_MODEL_INITIAL_EXEC;
682 /* Return true if the function DECL will share the same GP as any
683 function in the current unit of translation. */
686 decl_has_samegp (const_tree decl)
688 /* Functions that are not local can be overridden, and thus may
689 not share the same gp. */
690 if (!(*targetm.binds_local_p) (decl))
693 /* If -msmall-data is in effect, assume that there is only one GP
694 for the module, and so any local symbol has this property. We
695 need explicit relocations to be able to enforce this for symbols
696 not defined in this unit of translation, however. */
697 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
700 /* Functions that are not external are defined in this UoT. */
701 /* ??? Irritatingly, static functions not yet emitted are still
702 marked "external". Apply this to non-static functions only. */
703 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
706 /* Return true if EXP should be placed in the small data section. */
709 alpha_in_small_data_p (const_tree exp)
711 /* We want to merge strings, so we never consider them small data. */
712 if (TREE_CODE (exp) == STRING_CST)
715 /* Functions are never in the small data area. Duh. */
716 if (TREE_CODE (exp) == FUNCTION_DECL)
719 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
721 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
722 if (strcmp (section, ".sdata") == 0
723 || strcmp (section, ".sbss") == 0)
728 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
730 /* If this is an incomplete type with size 0, then we can't put it
731 in sdata because it might be too big when completed. */
732 if (size > 0 && size <= g_switch_value)
739 #if TARGET_ABI_OPEN_VMS
741 vms_valid_pointer_mode (enum machine_mode mode)
743 return (mode == SImode || mode == DImode);
747 alpha_linkage_symbol_p (const char *symname)
749 int symlen = strlen (symname);
752 return strcmp (&symname [symlen - 4], "..lk") == 0;
757 #define LINKAGE_SYMBOL_REF_P(X) \
758 ((GET_CODE (X) == SYMBOL_REF \
759 && alpha_linkage_symbol_p (XSTR (X, 0))) \
760 || (GET_CODE (X) == CONST \
761 && GET_CODE (XEXP (X, 0)) == PLUS \
762 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
763 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
766 /* legitimate_address_p recognizes an RTL expression that is a valid
767 memory address for an instruction. The MODE argument is the
768 machine mode for the MEM expression that wants to use this address.
770 For Alpha, we have either a constant address or the sum of a
771 register and a constant address, or just a register. For DImode,
772 any of those forms can be surrounded with an AND that clear the
773 low-order three bits; this is an "unaligned" access. */
776 alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
778 /* If this is an ldq_u type address, discard the outer AND. */
780 && GET_CODE (x) == AND
781 && CONST_INT_P (XEXP (x, 1))
782 && INTVAL (XEXP (x, 1)) == -8)
785 /* Discard non-paradoxical subregs. */
786 if (GET_CODE (x) == SUBREG
787 && (GET_MODE_SIZE (GET_MODE (x))
788 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
791 /* Unadorned general registers are valid. */
794 ? STRICT_REG_OK_FOR_BASE_P (x)
795 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
798 /* Constant addresses (i.e. +/- 32k) are valid. */
799 if (CONSTANT_ADDRESS_P (x))
802 #if TARGET_ABI_OPEN_VMS
803 if (LINKAGE_SYMBOL_REF_P (x))
807 /* Register plus a small constant offset is valid. */
808 if (GET_CODE (x) == PLUS)
810 rtx ofs = XEXP (x, 1);
813 /* Discard non-paradoxical subregs. */
814 if (GET_CODE (x) == SUBREG
815 && (GET_MODE_SIZE (GET_MODE (x))
816 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
822 && NONSTRICT_REG_OK_FP_BASE_P (x)
823 && CONST_INT_P (ofs))
826 ? STRICT_REG_OK_FOR_BASE_P (x)
827 : NONSTRICT_REG_OK_FOR_BASE_P (x))
828 && CONSTANT_ADDRESS_P (ofs))
833 /* If we're managing explicit relocations, LO_SUM is valid, as are small
834 data symbols. Avoid explicit relocations of modes larger than word
835 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
836 else if (TARGET_EXPLICIT_RELOCS
837 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
839 if (small_symbolic_operand (x, Pmode))
842 if (GET_CODE (x) == LO_SUM)
844 rtx ofs = XEXP (x, 1);
847 /* Discard non-paradoxical subregs. */
848 if (GET_CODE (x) == SUBREG
849 && (GET_MODE_SIZE (GET_MODE (x))
850 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
853 /* Must have a valid base register. */
856 ? STRICT_REG_OK_FOR_BASE_P (x)
857 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
860 /* The symbol must be local. */
861 if (local_symbolic_operand (ofs, Pmode)
862 || dtp32_symbolic_operand (ofs, Pmode)
863 || tp32_symbolic_operand (ofs, Pmode))
871 /* Build the SYMBOL_REF for __tls_get_addr. */
873 static GTY(()) rtx tls_get_addr_libfunc;
876 get_tls_get_addr (void)
878 if (!tls_get_addr_libfunc)
879 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
880 return tls_get_addr_libfunc;
883 /* Try machine-dependent ways of modifying an illegitimate address
884 to be legitimate. If we find one, return the new, valid address. */
887 alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
889 HOST_WIDE_INT addend;
891 /* If the address is (plus reg const_int) and the CONST_INT is not a
892 valid offset, compute the high part of the constant and add it to
893 the register. Then our address is (plus temp low-part-const). */
894 if (GET_CODE (x) == PLUS
895 && REG_P (XEXP (x, 0))
896 && CONST_INT_P (XEXP (x, 1))
897 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
899 addend = INTVAL (XEXP (x, 1));
904 /* If the address is (const (plus FOO const_int)), find the low-order
905 part of the CONST_INT. Then load FOO plus any high-order part of the
906 CONST_INT into a register. Our address is (plus reg low-part-const).
907 This is done to reduce the number of GOT entries. */
908 if (can_create_pseudo_p ()
909 && GET_CODE (x) == CONST
910 && GET_CODE (XEXP (x, 0)) == PLUS
911 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
913 addend = INTVAL (XEXP (XEXP (x, 0), 1));
914 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
918 /* If we have a (plus reg const), emit the load as in (2), then add
919 the two registers, and finally generate (plus reg low-part-const) as
921 if (can_create_pseudo_p ()
922 && GET_CODE (x) == PLUS
923 && REG_P (XEXP (x, 0))
924 && GET_CODE (XEXP (x, 1)) == CONST
925 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
926 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
928 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
929 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
930 XEXP (XEXP (XEXP (x, 1), 0), 0),
931 NULL_RTX, 1, OPTAB_LIB_WIDEN);
935 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
936 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
937 around +/- 32k offset. */
938 if (TARGET_EXPLICIT_RELOCS
939 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
940 && symbolic_operand (x, Pmode))
942 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
944 switch (tls_symbolic_operand_type (x))
949 case TLS_MODEL_GLOBAL_DYNAMIC:
952 r0 = gen_rtx_REG (Pmode, 0);
953 r16 = gen_rtx_REG (Pmode, 16);
954 tga = get_tls_get_addr ();
955 dest = gen_reg_rtx (Pmode);
956 seq = GEN_INT (alpha_next_sequence_number++);
958 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
959 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
960 insn = emit_call_insn (insn);
961 RTL_CONST_CALL_P (insn) = 1;
962 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
967 emit_libcall_block (insn, dest, r0, x);
970 case TLS_MODEL_LOCAL_DYNAMIC:
973 r0 = gen_rtx_REG (Pmode, 0);
974 r16 = gen_rtx_REG (Pmode, 16);
975 tga = get_tls_get_addr ();
976 scratch = gen_reg_rtx (Pmode);
977 seq = GEN_INT (alpha_next_sequence_number++);
979 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
980 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
981 insn = emit_call_insn (insn);
982 RTL_CONST_CALL_P (insn) = 1;
983 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
988 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
990 emit_libcall_block (insn, scratch, r0, eqv);
992 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
993 eqv = gen_rtx_CONST (Pmode, eqv);
995 if (alpha_tls_size == 64)
997 dest = gen_reg_rtx (Pmode);
998 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
999 emit_insn (gen_adddi3 (dest, dest, scratch));
1002 if (alpha_tls_size == 32)
1004 insn = gen_rtx_HIGH (Pmode, eqv);
1005 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1006 scratch = gen_reg_rtx (Pmode);
1007 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1009 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1011 case TLS_MODEL_INITIAL_EXEC:
1012 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1013 eqv = gen_rtx_CONST (Pmode, eqv);
1014 tp = gen_reg_rtx (Pmode);
1015 scratch = gen_reg_rtx (Pmode);
1016 dest = gen_reg_rtx (Pmode);
1018 emit_insn (gen_load_tp (tp));
1019 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1020 emit_insn (gen_adddi3 (dest, tp, scratch));
1023 case TLS_MODEL_LOCAL_EXEC:
1024 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1025 eqv = gen_rtx_CONST (Pmode, eqv);
1026 tp = gen_reg_rtx (Pmode);
1028 emit_insn (gen_load_tp (tp));
1029 if (alpha_tls_size == 32)
1031 insn = gen_rtx_HIGH (Pmode, eqv);
1032 insn = gen_rtx_PLUS (Pmode, tp, insn);
1033 tp = gen_reg_rtx (Pmode);
1034 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1036 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1042 if (local_symbolic_operand (x, Pmode))
1044 if (small_symbolic_operand (x, Pmode))
1048 if (can_create_pseudo_p ())
1049 scratch = gen_reg_rtx (Pmode);
1050 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1051 gen_rtx_HIGH (Pmode, x)));
1052 return gen_rtx_LO_SUM (Pmode, scratch, x);
1061 HOST_WIDE_INT low, high;
1063 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1065 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1069 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1070 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1071 1, OPTAB_LIB_WIDEN);
1073 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1074 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1075 1, OPTAB_LIB_WIDEN);
1077 return plus_constant (x, low);
1082 /* Try machine-dependent ways of modifying an illegitimate address
1083 to be legitimate. Return X or the new, valid address. */
1086 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1087 enum machine_mode mode)
1089 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1090 return new_x ? new_x : x;
1093 /* Primarily this is required for TLS symbols, but given that our move
1094 patterns *ought* to be able to handle any symbol at any time, we
1095 should never be spilling symbolic operands to the constant pool, ever. */
1098 alpha_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1100 enum rtx_code code = GET_CODE (x);
1101 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1104 /* We do not allow indirect calls to be optimized into sibling calls, nor
1105 can we allow a call to a function with a different GP to be optimized
1109 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1111 /* Can't do indirect tail calls, since we don't know if the target
1112 uses the same GP. */
1116 /* Otherwise, we can make a tail call if the target function shares
1118 return decl_has_samegp (decl);
1122 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1126 /* Don't re-split. */
1127 if (GET_CODE (x) == LO_SUM)
1130 return small_symbolic_operand (x, Pmode) != 0;
1134 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1138 /* Don't re-split. */
1139 if (GET_CODE (x) == LO_SUM)
1142 if (small_symbolic_operand (x, Pmode))
1144 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1153 split_small_symbolic_operand (rtx x)
1156 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1160 /* Indicate that INSN cannot be duplicated. This is true for any insn
1161 that we've marked with gpdisp relocs, since those have to stay in
1162 1-1 correspondence with one another.
1164 Technically we could copy them if we could set up a mapping from one
1165 sequence number to another, across the set of insns to be duplicated.
1166 This seems overly complicated and error-prone since interblock motion
1167 from sched-ebb could move one of the pair of insns to a different block.
1169 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1170 then they'll be in a different block from their ldgp. Which could lead
1171 the bb reorder code to think that it would be ok to copy just the block
1172 containing the call and branch to the block containing the ldgp. */
1175 alpha_cannot_copy_insn_p (rtx insn)
1177 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1179 if (recog_memoized (insn) >= 0)
1180 return get_attr_cannot_copy (insn);
1186 /* Try a machine-dependent way of reloading an illegitimate address
1187 operand. If we find one, push the reload and return the new rtx. */
1190 alpha_legitimize_reload_address (rtx x,
1191 enum machine_mode mode ATTRIBUTE_UNUSED,
1192 int opnum, int type,
1193 int ind_levels ATTRIBUTE_UNUSED)
1195 /* We must recognize output that we have already generated ourselves. */
1196 if (GET_CODE (x) == PLUS
1197 && GET_CODE (XEXP (x, 0)) == PLUS
1198 && REG_P (XEXP (XEXP (x, 0), 0))
1199 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1200 && CONST_INT_P (XEXP (x, 1)))
1202 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1203 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1204 opnum, (enum reload_type) type);
1208 /* We wish to handle large displacements off a base register by
1209 splitting the addend across an ldah and the mem insn. This
1210 cuts number of extra insns needed from 3 to 1. */
1211 if (GET_CODE (x) == PLUS
1212 && REG_P (XEXP (x, 0))
1213 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1214 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1215 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1217 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1218 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1220 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1222 /* Check for 32-bit overflow. */
1223 if (high + low != val)
1226 /* Reload the high part into a base reg; leave the low part
1227 in the mem directly. */
1228 x = gen_rtx_PLUS (GET_MODE (x),
1229 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1233 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1234 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1235 opnum, (enum reload_type) type);
1242 /* Compute a (partial) cost for rtx X. Return true if the complete
1243 cost has been computed, and false if subexpressions should be
1244 scanned. In either case, *TOTAL contains the cost result. */
1247 alpha_rtx_costs (rtx x, int code, int outer_code, int *total,
1250 enum machine_mode mode = GET_MODE (x);
1251 bool float_mode_p = FLOAT_MODE_P (mode);
1252 const struct alpha_rtx_cost_data *cost_data;
1255 cost_data = &alpha_rtx_cost_size;
1257 cost_data = &alpha_rtx_cost_data[alpha_tune];
1262 /* If this is an 8-bit constant, return zero since it can be used
1263 nearly anywhere with no cost. If it is a valid operand for an
1264 ADD or AND, likewise return 0 if we know it will be used in that
1265 context. Otherwise, return 2 since it might be used there later.
1266 All other constants take at least two insns. */
1267 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1275 if (x == CONST0_RTX (mode))
1277 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1278 || (outer_code == AND && and_operand (x, VOIDmode)))
1280 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1283 *total = COSTS_N_INSNS (2);
1289 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1290 *total = COSTS_N_INSNS (outer_code != MEM);
1291 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1292 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1293 else if (tls_symbolic_operand_type (x))
1294 /* Estimate of cost for call_pal rduniq. */
1295 /* ??? How many insns do we emit here? More than one... */
1296 *total = COSTS_N_INSNS (15);
1298 /* Otherwise we do a load from the GOT. */
1299 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1303 /* This is effectively an add_operand. */
1310 *total = cost_data->fp_add;
1311 else if (GET_CODE (XEXP (x, 0)) == MULT
1312 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1314 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1315 (enum rtx_code) outer_code, speed)
1316 + rtx_cost (XEXP (x, 1),
1317 (enum rtx_code) outer_code, speed)
1318 + COSTS_N_INSNS (1));
1325 *total = cost_data->fp_mult;
1326 else if (mode == DImode)
1327 *total = cost_data->int_mult_di;
1329 *total = cost_data->int_mult_si;
1333 if (CONST_INT_P (XEXP (x, 1))
1334 && INTVAL (XEXP (x, 1)) <= 3)
1336 *total = COSTS_N_INSNS (1);
1343 *total = cost_data->int_shift;
1348 *total = cost_data->fp_add;
1350 *total = cost_data->int_cmov;
1358 *total = cost_data->int_div;
1359 else if (mode == SFmode)
1360 *total = cost_data->fp_div_sf;
1362 *total = cost_data->fp_div_df;
1366 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1372 *total = COSTS_N_INSNS (1);
1380 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1386 case UNSIGNED_FLOAT:
1389 case FLOAT_TRUNCATE:
1390 *total = cost_data->fp_add;
1394 if (MEM_P (XEXP (x, 0)))
1397 *total = cost_data->fp_add;
1405 /* REF is an alignable memory location. Place an aligned SImode
1406 reference into *PALIGNED_MEM and the number of bits to shift into
1407 *PBITNUM. SCRATCH is a free register for use in reloading out
1408 of range stack slots. */
1411 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1414 HOST_WIDE_INT disp, offset;
1416 gcc_assert (MEM_P (ref));
1418 if (reload_in_progress
1419 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1421 base = find_replacement (&XEXP (ref, 0));
1422 gcc_assert (memory_address_p (GET_MODE (ref), base));
1425 base = XEXP (ref, 0);
1427 if (GET_CODE (base) == PLUS)
1428 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1432 /* Find the byte offset within an aligned word. If the memory itself is
1433 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1434 will have examined the base register and determined it is aligned, and
1435 thus displacements from it are naturally alignable. */
1436 if (MEM_ALIGN (ref) >= 32)
1441 /* The location should not cross aligned word boundary. */
1442 gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1443 <= GET_MODE_SIZE (SImode));
1445 /* Access the entire aligned word. */
1446 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1448 /* Convert the byte offset within the word to a bit offset. */
1449 offset *= BITS_PER_UNIT;
1450 *pbitnum = GEN_INT (offset);
1453 /* Similar, but just get the address. Handle the two reload cases.
1454 Add EXTRA_OFFSET to the address we return. */
1457 get_unaligned_address (rtx ref)
1460 HOST_WIDE_INT offset = 0;
1462 gcc_assert (MEM_P (ref));
1464 if (reload_in_progress
1465 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1467 base = find_replacement (&XEXP (ref, 0));
1469 gcc_assert (memory_address_p (GET_MODE (ref), base));
1472 base = XEXP (ref, 0);
1474 if (GET_CODE (base) == PLUS)
1475 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1477 return plus_constant (base, offset);
1480 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1481 X is always returned in a register. */
1484 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1486 if (GET_CODE (addr) == PLUS)
1488 ofs += INTVAL (XEXP (addr, 1));
1489 addr = XEXP (addr, 0);
1492 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1493 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1496 /* On the Alpha, all (non-symbolic) constants except zero go into
1497 a floating-point register via memory. Note that we cannot
1498 return anything that is not a subset of RCLASS, and that some
1499 symbolic constants cannot be dropped to memory. */
1502 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1504 /* Zero is present in any register class. */
1505 if (x == CONST0_RTX (GET_MODE (x)))
1508 /* These sorts of constants we can easily drop to memory. */
1510 || GET_CODE (x) == CONST_DOUBLE
1511 || GET_CODE (x) == CONST_VECTOR)
1513 if (rclass == FLOAT_REGS)
1515 if (rclass == ALL_REGS)
1516 return GENERAL_REGS;
1520 /* All other kinds of constants should not (and in the case of HIGH
1521 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1522 secondary reload. */
1524 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1529 /* Inform reload about cases where moving X with a mode MODE to a register in
1530 RCLASS requires an extra scratch or immediate register. Return the class
1531 needed for the immediate register. */
1534 alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1535 enum machine_mode mode, secondary_reload_info *sri)
1537 enum reg_class rclass = (enum reg_class) rclass_i;
1539 /* Loading and storing HImode or QImode values to and from memory
1540 usually requires a scratch register. */
1541 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1543 if (any_memory_operand (x, mode))
1547 if (!aligned_memory_operand (x, mode))
1548 sri->icode = direct_optab_handler (reload_in_optab, mode);
1551 sri->icode = direct_optab_handler (reload_out_optab, mode);
1556 /* We also cannot do integral arithmetic into FP regs, as might result
1557 from register elimination into a DImode fp register. */
1558 if (rclass == FLOAT_REGS)
1560 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1561 return GENERAL_REGS;
1562 if (in_p && INTEGRAL_MODE_P (mode)
1563 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1564 return GENERAL_REGS;
1570 /* Subfunction of the following function. Update the flags of any MEM
1571 found in part of X. */
1574 alpha_set_memflags_1 (rtx *xp, void *data)
1576 rtx x = *xp, orig = (rtx) data;
1581 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1582 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1583 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1584 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1585 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1587 /* Sadly, we cannot use alias sets because the extra aliasing
1588 produced by the AND interferes. Given that two-byte quantities
1589 are the only thing we would be able to differentiate anyway,
1590 there does not seem to be any point in convoluting the early
1591 out of the alias check. */
1596 /* Given SEQ, which is an INSN list, look for any MEMs in either
1597 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1598 volatile flags from REF into each of the MEMs found. If REF is not
1599 a MEM, don't do anything. */
1602 alpha_set_memflags (rtx seq, rtx ref)
1609 /* This is only called from alpha.md, after having had something
1610 generated from one of the insn patterns. So if everything is
1611 zero, the pattern is already up-to-date. */
1612 if (!MEM_VOLATILE_P (ref)
1613 && !MEM_IN_STRUCT_P (ref)
1614 && !MEM_SCALAR_P (ref)
1615 && !MEM_NOTRAP_P (ref)
1616 && !MEM_READONLY_P (ref))
1619 for (insn = seq; insn; insn = NEXT_INSN (insn))
1621 for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1626 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1629 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1630 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1631 and return pc_rtx if successful. */
1634 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1635 HOST_WIDE_INT c, int n, bool no_output)
1637 HOST_WIDE_INT new_const;
1639 /* Use a pseudo if highly optimizing and still generating RTL. */
1641 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1644 /* If this is a sign-extended 32-bit constant, we can do this in at most
1645 three insns, so do it if we have enough insns left. We always have
1646 a sign-extended 32-bit constant when compiling on a narrow machine. */
1648 if (HOST_BITS_PER_WIDE_INT != 64
1649 || c >> 31 == -1 || c >> 31 == 0)
1651 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1652 HOST_WIDE_INT tmp1 = c - low;
1653 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1654 HOST_WIDE_INT extra = 0;
1656 /* If HIGH will be interpreted as negative but the constant is
1657 positive, we must adjust it to do two ldha insns. */
1659 if ((high & 0x8000) != 0 && c >= 0)
1663 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1666 if (c == low || (low == 0 && extra == 0))
1668 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1669 but that meant that we can't handle INT_MIN on 32-bit machines
1670 (like NT/Alpha), because we recurse indefinitely through
1671 emit_move_insn to gen_movdi. So instead, since we know exactly
1672 what we want, create it explicitly. */
1677 target = gen_reg_rtx (mode);
1678 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1681 else if (n >= 2 + (extra != 0))
1685 if (!can_create_pseudo_p ())
1687 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1691 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1694 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1695 This means that if we go through expand_binop, we'll try to
1696 generate extensions, etc, which will require new pseudos, which
1697 will fail during some split phases. The SImode add patterns
1698 still exist, but are not named. So build the insns by hand. */
1703 subtarget = gen_reg_rtx (mode);
1704 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1705 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1711 target = gen_reg_rtx (mode);
1712 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1713 insn = gen_rtx_SET (VOIDmode, target, insn);
1719 /* If we couldn't do it that way, try some other methods. But if we have
1720 no instructions left, don't bother. Likewise, if this is SImode and
1721 we can't make pseudos, we can't do anything since the expand_binop
1722 and expand_unop calls will widen and try to make pseudos. */
1724 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1727 /* Next, see if we can load a related constant and then shift and possibly
1728 negate it to get the constant we want. Try this once each increasing
1729 numbers of insns. */
1731 for (i = 1; i < n; i++)
1733 /* First, see if minus some low bits, we've an easy load of
1736 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1739 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1744 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1745 target, 0, OPTAB_WIDEN);
1749 /* Next try complementing. */
1750 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1755 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1758 /* Next try to form a constant and do a left shift. We can do this
1759 if some low-order bits are zero; the exact_log2 call below tells
1760 us that information. The bits we are shifting out could be any
1761 value, but here we'll just try the 0- and sign-extended forms of
1762 the constant. To try to increase the chance of having the same
1763 constant in more than one insn, start at the highest number of
1764 bits to shift, but try all possibilities in case a ZAPNOT will
1767 bits = exact_log2 (c & -c);
1769 for (; bits > 0; bits--)
1771 new_const = c >> bits;
1772 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1775 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1776 temp = alpha_emit_set_const (subtarget, mode, new_const,
1783 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1784 target, 0, OPTAB_WIDEN);
1788 /* Now try high-order zero bits. Here we try the shifted-in bits as
1789 all zero and all ones. Be careful to avoid shifting outside the
1790 mode and to avoid shifting outside the host wide int size. */
1791 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1792 confuse the recursive call and set all of the high 32 bits. */
1794 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1795 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1797 for (; bits > 0; bits--)
1799 new_const = c << bits;
1800 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1803 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1804 temp = alpha_emit_set_const (subtarget, mode, new_const,
1811 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1812 target, 1, OPTAB_WIDEN);
1816 /* Now try high-order 1 bits. We get that with a sign-extension.
1817 But one bit isn't enough here. Be careful to avoid shifting outside
1818 the mode and to avoid shifting outside the host wide int size. */
1820 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1821 - floor_log2 (~ c) - 2);
1823 for (; bits > 0; bits--)
1825 new_const = c << bits;
1826 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1829 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1830 temp = alpha_emit_set_const (subtarget, mode, new_const,
1837 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1838 target, 0, OPTAB_WIDEN);
1843 #if HOST_BITS_PER_WIDE_INT == 64
1844 /* Finally, see if can load a value into the target that is the same as the
1845 constant except that all bytes that are 0 are changed to be 0xff. If we
1846 can, then we can do a ZAPNOT to obtain the desired constant. */
1849 for (i = 0; i < 64; i += 8)
1850 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1851 new_const |= (HOST_WIDE_INT) 0xff << i;
1853 /* We are only called for SImode and DImode. If this is SImode, ensure that
1854 we are sign extended to a full word. */
1857 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1861 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1866 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1867 target, 0, OPTAB_WIDEN);
1875 /* Try to output insns to set TARGET equal to the constant C if it can be
1876 done in less than N insns. Do all computations in MODE. Returns the place
1877 where the output has been placed if it can be done and the insns have been
1878 emitted. If it would take more than N insns, zero is returned and no
1879 insns and emitted. */
1882 alpha_emit_set_const (rtx target, enum machine_mode mode,
1883 HOST_WIDE_INT c, int n, bool no_output)
1885 enum machine_mode orig_mode = mode;
1886 rtx orig_target = target;
1890 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1891 can't load this constant in one insn, do this in DImode. */
1892 if (!can_create_pseudo_p () && mode == SImode
1893 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1895 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1899 target = no_output ? NULL : gen_lowpart (DImode, target);
1902 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1904 target = no_output ? NULL : gen_lowpart (DImode, target);
1908 /* Try 1 insn, then 2, then up to N. */
1909 for (i = 1; i <= n; i++)
1911 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1919 insn = get_last_insn ();
1920 set = single_set (insn);
1921 if (! CONSTANT_P (SET_SRC (set)))
1922 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1927 /* Allow for the case where we changed the mode of TARGET. */
1930 if (result == target)
1931 result = orig_target;
1932 else if (mode != orig_mode)
1933 result = gen_lowpart (orig_mode, result);
1939 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1940 fall back to a straight forward decomposition. We do this to avoid
1941 exponential run times encountered when looking for longer sequences
1942 with alpha_emit_set_const. */
1945 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1947 HOST_WIDE_INT d1, d2, d3, d4;
1949 /* Decompose the entire word */
1950 #if HOST_BITS_PER_WIDE_INT >= 64
1951 gcc_assert (c2 == -(c1 < 0));
1952 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1954 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1955 c1 = (c1 - d2) >> 32;
1956 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1958 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1959 gcc_assert (c1 == d4);
1961 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1963 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1964 gcc_assert (c1 == d2);
1966 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1968 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1969 gcc_assert (c2 == d4);
1972 /* Construct the high word */
1975 emit_move_insn (target, GEN_INT (d4));
1977 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1980 emit_move_insn (target, GEN_INT (d3));
1982 /* Shift it into place */
1983 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1985 /* Add in the low bits. */
1987 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1989 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
1994 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
1998 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2000 HOST_WIDE_INT i0, i1;
2002 if (GET_CODE (x) == CONST_VECTOR)
2003 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2006 if (CONST_INT_P (x))
2011 else if (HOST_BITS_PER_WIDE_INT >= 64)
2013 i0 = CONST_DOUBLE_LOW (x);
2018 i0 = CONST_DOUBLE_LOW (x);
2019 i1 = CONST_DOUBLE_HIGH (x);
2026 /* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which
2027 we are willing to load the value into a register via a move pattern.
2028 Normally this is all symbolic constants, integral constants that
2029 take three or fewer instructions, and floating-point zero. */
2032 alpha_legitimate_constant_p (enum machine_mode mode, rtx x)
2034 HOST_WIDE_INT i0, i1;
2036 switch (GET_CODE (x))
2043 if (GET_CODE (XEXP (x, 0)) == PLUS
2044 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2045 x = XEXP (XEXP (x, 0), 0);
2049 if (GET_CODE (x) != SYMBOL_REF)
2055 /* TLS symbols are never valid. */
2056 return SYMBOL_REF_TLS_MODEL (x) == 0;
2059 if (x == CONST0_RTX (mode))
2061 if (FLOAT_MODE_P (mode))
2066 if (x == CONST0_RTX (mode))
2068 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2070 if (GET_MODE_SIZE (mode) != 8)
2076 if (TARGET_BUILD_CONSTANTS)
2078 alpha_extract_integer (x, &i0, &i1);
2079 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2080 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2088 /* Operand 1 is known to be a constant, and should require more than one
2089 instruction to load. Emit that multi-part load. */
2092 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2094 HOST_WIDE_INT i0, i1;
2095 rtx temp = NULL_RTX;
2097 alpha_extract_integer (operands[1], &i0, &i1);
2099 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2100 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2102 if (!temp && TARGET_BUILD_CONSTANTS)
2103 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2107 if (!rtx_equal_p (operands[0], temp))
2108 emit_move_insn (operands[0], temp);
2115 /* Expand a move instruction; return true if all work is done.
2116 We don't handle non-bwx subword loads here. */
2119 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2123 /* If the output is not a register, the input must be. */
2124 if (MEM_P (operands[0])
2125 && ! reg_or_0_operand (operands[1], mode))
2126 operands[1] = force_reg (mode, operands[1]);
2128 /* Allow legitimize_address to perform some simplifications. */
2129 if (mode == Pmode && symbolic_operand (operands[1], mode))
2131 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2134 if (tmp == operands[0])
2141 /* Early out for non-constants and valid constants. */
2142 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2145 /* Split large integers. */
2146 if (CONST_INT_P (operands[1])
2147 || GET_CODE (operands[1]) == CONST_DOUBLE
2148 || GET_CODE (operands[1]) == CONST_VECTOR)
2150 if (alpha_split_const_mov (mode, operands))
2154 /* Otherwise we've nothing left but to drop the thing to memory. */
2155 tmp = force_const_mem (mode, operands[1]);
2157 if (tmp == NULL_RTX)
2160 if (reload_in_progress)
2162 emit_move_insn (operands[0], XEXP (tmp, 0));
2163 operands[1] = replace_equiv_address (tmp, operands[0]);
2166 operands[1] = validize_mem (tmp);
2170 /* Expand a non-bwx QImode or HImode move instruction;
2171 return true if all work is done. */
2174 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2178 /* If the output is not a register, the input must be. */
2179 if (MEM_P (operands[0]))
2180 operands[1] = force_reg (mode, operands[1]);
2182 /* Handle four memory cases, unaligned and aligned for either the input
2183 or the output. The only case where we can be called during reload is
2184 for aligned loads; all other cases require temporaries. */
2186 if (any_memory_operand (operands[1], mode))
2188 if (aligned_memory_operand (operands[1], mode))
2190 if (reload_in_progress)
2193 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2195 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2200 rtx aligned_mem, bitnum;
2201 rtx scratch = gen_reg_rtx (SImode);
2205 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2207 subtarget = operands[0];
2208 if (REG_P (subtarget))
2209 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2211 subtarget = gen_reg_rtx (DImode), copyout = true;
2214 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2217 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2222 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2227 /* Don't pass these as parameters since that makes the generated
2228 code depend on parameter evaluation order which will cause
2229 bootstrap failures. */
2231 rtx temp1, temp2, subtarget, ua;
2234 temp1 = gen_reg_rtx (DImode);
2235 temp2 = gen_reg_rtx (DImode);
2237 subtarget = operands[0];
2238 if (REG_P (subtarget))
2239 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2241 subtarget = gen_reg_rtx (DImode), copyout = true;
2243 ua = get_unaligned_address (operands[1]);
2245 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2247 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2249 alpha_set_memflags (seq, operands[1]);
2253 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2258 if (any_memory_operand (operands[0], mode))
2260 if (aligned_memory_operand (operands[0], mode))
2262 rtx aligned_mem, bitnum;
2263 rtx temp1 = gen_reg_rtx (SImode);
2264 rtx temp2 = gen_reg_rtx (SImode);
2266 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2268 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2273 rtx temp1 = gen_reg_rtx (DImode);
2274 rtx temp2 = gen_reg_rtx (DImode);
2275 rtx temp3 = gen_reg_rtx (DImode);
2276 rtx ua = get_unaligned_address (operands[0]);
2279 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2281 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2283 alpha_set_memflags (seq, operands[0]);
2292 /* Implement the movmisalign patterns. One of the operands is a memory
2293 that is not naturally aligned. Emit instructions to load it. */
2296 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2298 /* Honor misaligned loads, for those we promised to do so. */
2299 if (MEM_P (operands[1]))
2303 if (register_operand (operands[0], mode))
2306 tmp = gen_reg_rtx (mode);
2308 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2309 if (tmp != operands[0])
2310 emit_move_insn (operands[0], tmp);
2312 else if (MEM_P (operands[0]))
2314 if (!reg_or_0_operand (operands[1], mode))
2315 operands[1] = force_reg (mode, operands[1]);
2316 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2322 /* Generate an unsigned DImode to FP conversion. This is the same code
2323 optabs would emit if we didn't have TFmode patterns.
2325 For SFmode, this is the only construction I've found that can pass
2326 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2327 intermediates will work, because you'll get intermediate rounding
2328 that ruins the end result. Some of this could be fixed by turning
2329 on round-to-positive-infinity, but that requires diddling the fpsr,
2330 which kills performance. I tried turning this around and converting
2331 to a negative number, so that I could turn on /m, but either I did
2332 it wrong or there's something else cause I wound up with the exact
2333 same single-bit error. There is a branch-less form of this same code:
2344 fcmoveq $f10,$f11,$f0
2346 I'm not using it because it's the same number of instructions as
2347 this branch-full form, and it has more serialized long latency
2348 instructions on the critical path.
2350 For DFmode, we can avoid rounding errors by breaking up the word
2351 into two pieces, converting them separately, and adding them back:
2353 LC0: .long 0,0x5f800000
2358 cpyse $f11,$f31,$f10
2359 cpyse $f31,$f11,$f11
2367 This doesn't seem to be a clear-cut win over the optabs form.
2368 It probably all depends on the distribution of numbers being
2369 converted -- in the optabs form, all but high-bit-set has a
2370 much lower minimum execution time. */
2373 alpha_emit_floatuns (rtx operands[2])
2375 rtx neglab, donelab, i0, i1, f0, in, out;
2376 enum machine_mode mode;
2379 in = force_reg (DImode, operands[1]);
2380 mode = GET_MODE (out);
2381 neglab = gen_label_rtx ();
2382 donelab = gen_label_rtx ();
2383 i0 = gen_reg_rtx (DImode);
2384 i1 = gen_reg_rtx (DImode);
2385 f0 = gen_reg_rtx (mode);
2387 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2389 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2390 emit_jump_insn (gen_jump (donelab));
2393 emit_label (neglab);
2395 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2396 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2397 emit_insn (gen_iordi3 (i0, i0, i1));
2398 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2399 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2401 emit_label (donelab);
2404 /* Generate the comparison for a conditional branch. */
2407 alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
2409 enum rtx_code cmp_code, branch_code;
2410 enum machine_mode branch_mode = VOIDmode;
2411 enum rtx_code code = GET_CODE (operands[0]);
2412 rtx op0 = operands[1], op1 = operands[2];
2415 if (cmp_mode == TFmode)
2417 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2422 /* The general case: fold the comparison code to the types of compares
2423 that we have, choosing the branch as necessary. */
2426 case EQ: case LE: case LT: case LEU: case LTU:
2428 /* We have these compares: */
2429 cmp_code = code, branch_code = NE;
2434 /* These must be reversed. */
2435 cmp_code = reverse_condition (code), branch_code = EQ;
2438 case GE: case GT: case GEU: case GTU:
2439 /* For FP, we swap them, for INT, we reverse them. */
2440 if (cmp_mode == DFmode)
2442 cmp_code = swap_condition (code);
2444 tem = op0, op0 = op1, op1 = tem;
2448 cmp_code = reverse_condition (code);
2457 if (cmp_mode == DFmode)
2459 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2461 /* When we are not as concerned about non-finite values, and we
2462 are comparing against zero, we can branch directly. */
2463 if (op1 == CONST0_RTX (DFmode))
2464 cmp_code = UNKNOWN, branch_code = code;
2465 else if (op0 == CONST0_RTX (DFmode))
2467 /* Undo the swap we probably did just above. */
2468 tem = op0, op0 = op1, op1 = tem;
2469 branch_code = swap_condition (cmp_code);
2475 /* ??? We mark the branch mode to be CCmode to prevent the
2476 compare and branch from being combined, since the compare
2477 insn follows IEEE rules that the branch does not. */
2478 branch_mode = CCmode;
2483 /* The following optimizations are only for signed compares. */
2484 if (code != LEU && code != LTU && code != GEU && code != GTU)
2486 /* Whee. Compare and branch against 0 directly. */
2487 if (op1 == const0_rtx)
2488 cmp_code = UNKNOWN, branch_code = code;
2490 /* If the constants doesn't fit into an immediate, but can
2491 be generated by lda/ldah, we adjust the argument and
2492 compare against zero, so we can use beq/bne directly. */
2493 /* ??? Don't do this when comparing against symbols, otherwise
2494 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2495 be declared false out of hand (at least for non-weak). */
2496 else if (CONST_INT_P (op1)
2497 && (code == EQ || code == NE)
2498 && !(symbolic_operand (op0, VOIDmode)
2499 || (REG_P (op0) && REG_POINTER (op0))))
2501 rtx n_op1 = GEN_INT (-INTVAL (op1));
2503 if (! satisfies_constraint_I (op1)
2504 && (satisfies_constraint_K (n_op1)
2505 || satisfies_constraint_L (n_op1)))
2506 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2510 if (!reg_or_0_operand (op0, DImode))
2511 op0 = force_reg (DImode, op0);
2512 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2513 op1 = force_reg (DImode, op1);
2516 /* Emit an initial compare instruction, if necessary. */
2518 if (cmp_code != UNKNOWN)
2520 tem = gen_reg_rtx (cmp_mode);
2521 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2524 /* Emit the branch instruction. */
2525 tem = gen_rtx_SET (VOIDmode, pc_rtx,
2526 gen_rtx_IF_THEN_ELSE (VOIDmode,
2527 gen_rtx_fmt_ee (branch_code,
2529 CONST0_RTX (cmp_mode)),
2530 gen_rtx_LABEL_REF (VOIDmode,
2533 emit_jump_insn (tem);
2536 /* Certain simplifications can be done to make invalid setcc operations
2537 valid. Return the final comparison, or NULL if we can't work. */
2540 alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
2542 enum rtx_code cmp_code;
2543 enum rtx_code code = GET_CODE (operands[1]);
2544 rtx op0 = operands[2], op1 = operands[3];
2547 if (cmp_mode == TFmode)
2549 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2554 if (cmp_mode == DFmode && !TARGET_FIX)
2557 /* The general case: fold the comparison code to the types of compares
2558 that we have, choosing the branch as necessary. */
2563 case EQ: case LE: case LT: case LEU: case LTU:
2565 /* We have these compares. */
2566 if (cmp_mode == DFmode)
2567 cmp_code = code, code = NE;
2571 if (cmp_mode == DImode && op1 == const0_rtx)
2576 cmp_code = reverse_condition (code);
2580 case GE: case GT: case GEU: case GTU:
2581 /* These normally need swapping, but for integer zero we have
2582 special patterns that recognize swapped operands. */
2583 if (cmp_mode == DImode && op1 == const0_rtx)
2585 code = swap_condition (code);
2586 if (cmp_mode == DFmode)
2587 cmp_code = code, code = NE;
2588 tmp = op0, op0 = op1, op1 = tmp;
2595 if (cmp_mode == DImode)
2597 if (!register_operand (op0, DImode))
2598 op0 = force_reg (DImode, op0);
2599 if (!reg_or_8bit_operand (op1, DImode))
2600 op1 = force_reg (DImode, op1);
2603 /* Emit an initial compare instruction, if necessary. */
2604 if (cmp_code != UNKNOWN)
2606 tmp = gen_reg_rtx (cmp_mode);
2607 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2608 gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2610 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2614 /* Emit the setcc instruction. */
2615 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2616 gen_rtx_fmt_ee (code, DImode, op0, op1)));
2621 /* Rewrite a comparison against zero CMP of the form
2622 (CODE (cc0) (const_int 0)) so it can be written validly in
2623 a conditional move (if_then_else CMP ...).
2624 If both of the operands that set cc0 are nonzero we must emit
2625 an insn to perform the compare (it can't be done within
2626 the conditional move). */
2629 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2631 enum rtx_code code = GET_CODE (cmp);
2632 enum rtx_code cmov_code = NE;
2633 rtx op0 = XEXP (cmp, 0);
2634 rtx op1 = XEXP (cmp, 1);
2635 enum machine_mode cmp_mode
2636 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2637 enum machine_mode cmov_mode = VOIDmode;
2638 int local_fast_math = flag_unsafe_math_optimizations;
2641 if (cmp_mode == TFmode)
2643 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2648 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2650 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2652 enum rtx_code cmp_code;
2657 /* If we have fp<->int register move instructions, do a cmov by
2658 performing the comparison in fp registers, and move the
2659 zero/nonzero value to integer registers, where we can then
2660 use a normal cmov, or vice-versa. */
2664 case EQ: case LE: case LT: case LEU: case LTU:
2665 /* We have these compares. */
2666 cmp_code = code, code = NE;
2670 /* This must be reversed. */
2671 cmp_code = EQ, code = EQ;
2674 case GE: case GT: case GEU: case GTU:
2675 /* These normally need swapping, but for integer zero we have
2676 special patterns that recognize swapped operands. */
2677 if (cmp_mode == DImode && op1 == const0_rtx)
2678 cmp_code = code, code = NE;
2681 cmp_code = swap_condition (code);
2683 tem = op0, op0 = op1, op1 = tem;
2691 tem = gen_reg_rtx (cmp_mode);
2692 emit_insn (gen_rtx_SET (VOIDmode, tem,
2693 gen_rtx_fmt_ee (cmp_code, cmp_mode,
2696 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2697 op0 = gen_lowpart (cmp_mode, tem);
2698 op1 = CONST0_RTX (cmp_mode);
2699 local_fast_math = 1;
2702 /* We may be able to use a conditional move directly.
2703 This avoids emitting spurious compares. */
2704 if (signed_comparison_operator (cmp, VOIDmode)
2705 && (cmp_mode == DImode || local_fast_math)
2706 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2707 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2709 /* We can't put the comparison inside the conditional move;
2710 emit a compare instruction and put that inside the
2711 conditional move. Make sure we emit only comparisons we have;
2712 swap or reverse as necessary. */
2714 if (!can_create_pseudo_p ())
2719 case EQ: case LE: case LT: case LEU: case LTU:
2720 /* We have these compares: */
2724 /* This must be reversed. */
2725 code = reverse_condition (code);
2729 case GE: case GT: case GEU: case GTU:
2730 /* These must be swapped. */
2731 if (op1 != CONST0_RTX (cmp_mode))
2733 code = swap_condition (code);
2734 tem = op0, op0 = op1, op1 = tem;
2742 if (cmp_mode == DImode)
2744 if (!reg_or_0_operand (op0, DImode))
2745 op0 = force_reg (DImode, op0);
2746 if (!reg_or_8bit_operand (op1, DImode))
2747 op1 = force_reg (DImode, op1);
2750 /* ??? We mark the branch mode to be CCmode to prevent the compare
2751 and cmov from being combined, since the compare insn follows IEEE
2752 rules that the cmov does not. */
2753 if (cmp_mode == DFmode && !local_fast_math)
2756 tem = gen_reg_rtx (cmp_mode);
2757 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2758 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2761 /* Simplify a conditional move of two constants into a setcc with
2762 arithmetic. This is done with a splitter since combine would
2763 just undo the work if done during code generation. It also catches
2764 cases we wouldn't have before cse. */
2767 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2768 rtx t_rtx, rtx f_rtx)
2770 HOST_WIDE_INT t, f, diff;
2771 enum machine_mode mode;
2772 rtx target, subtarget, tmp;
2774 mode = GET_MODE (dest);
2779 if (((code == NE || code == EQ) && diff < 0)
2780 || (code == GE || code == GT))
2782 code = reverse_condition (code);
2783 diff = t, t = f, f = diff;
2787 subtarget = target = dest;
2790 target = gen_lowpart (DImode, dest);
2791 if (can_create_pseudo_p ())
2792 subtarget = gen_reg_rtx (DImode);
2796 /* Below, we must be careful to use copy_rtx on target and subtarget
2797 in intermediate insns, as they may be a subreg rtx, which may not
2800 if (f == 0 && exact_log2 (diff) > 0
2801 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2802 viable over a longer latency cmove. On EV5, the E0 slot is a
2803 scarce resource, and on EV4 shift has the same latency as a cmove. */
2804 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2806 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2807 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2809 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2810 GEN_INT (exact_log2 (t)));
2811 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2813 else if (f == 0 && t == -1)
2815 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2816 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2818 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2820 else if (diff == 1 || diff == 4 || diff == 8)
2824 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2825 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2828 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2831 add_op = GEN_INT (f);
2832 if (sext_add_operand (add_op, mode))
2834 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2836 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2837 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2849 /* Look up the function X_floating library function name for the
2852 struct GTY(()) xfloating_op
2854 const enum rtx_code code;
2855 const char *const GTY((skip)) osf_func;
2856 const char *const GTY((skip)) vms_func;
2860 static GTY(()) struct xfloating_op xfloating_ops[] =
2862 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2863 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2864 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2865 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2866 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2867 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2868 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2869 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2870 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2871 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2872 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2873 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2874 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2875 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2876 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2879 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2881 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2882 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2886 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2888 struct xfloating_op *ops = xfloating_ops;
2889 long n = ARRAY_SIZE (xfloating_ops);
2892 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2894 /* How irritating. Nothing to key off for the main table. */
2895 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2898 n = ARRAY_SIZE (vax_cvt_ops);
2901 for (i = 0; i < n; ++i, ++ops)
2902 if (ops->code == code)
2904 rtx func = ops->libcall;
2907 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2908 ? ops->vms_func : ops->osf_func);
2909 ops->libcall = func;
2917 /* Most X_floating operations take the rounding mode as an argument.
2918 Compute that here. */
2921 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2922 enum alpha_fp_rounding_mode round)
2928 case ALPHA_FPRM_NORM:
2931 case ALPHA_FPRM_MINF:
2934 case ALPHA_FPRM_CHOP:
2937 case ALPHA_FPRM_DYN:
2943 /* XXX For reference, round to +inf is mode = 3. */
2946 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2952 /* Emit an X_floating library function call.
2954 Note that these functions do not follow normal calling conventions:
2955 TFmode arguments are passed in two integer registers (as opposed to
2956 indirect); TFmode return values appear in R16+R17.
2958 FUNC is the function to call.
2959 TARGET is where the output belongs.
2960 OPERANDS are the inputs.
2961 NOPERANDS is the count of inputs.
2962 EQUIV is the expression equivalent for the function.
2966 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2967 int noperands, rtx equiv)
2969 rtx usage = NULL_RTX, tmp, reg;
2974 for (i = 0; i < noperands; ++i)
2976 switch (GET_MODE (operands[i]))
2979 reg = gen_rtx_REG (TFmode, regno);
2984 reg = gen_rtx_REG (DFmode, regno + 32);
2989 gcc_assert (CONST_INT_P (operands[i]));
2992 reg = gen_rtx_REG (DImode, regno);
3000 emit_move_insn (reg, operands[i]);
3001 use_reg (&usage, reg);
3004 switch (GET_MODE (target))
3007 reg = gen_rtx_REG (TFmode, 16);
3010 reg = gen_rtx_REG (DFmode, 32);
3013 reg = gen_rtx_REG (DImode, 0);
3019 tmp = gen_rtx_MEM (QImode, func);
3020 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3021 const0_rtx, const0_rtx));
3022 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3023 RTL_CONST_CALL_P (tmp) = 1;
3028 emit_libcall_block (tmp, target, reg, equiv);
3031 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3034 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3038 rtx out_operands[3];
3040 func = alpha_lookup_xfloating_lib_func (code);
3041 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3043 out_operands[0] = operands[1];
3044 out_operands[1] = operands[2];
3045 out_operands[2] = GEN_INT (mode);
3046 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3047 gen_rtx_fmt_ee (code, TFmode, operands[1],
3051 /* Emit an X_floating library function call for a comparison. */
3054 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3056 enum rtx_code cmp_code, res_code;
3057 rtx func, out, operands[2], note;
3059 /* X_floating library comparison functions return
3063 Convert the compare against the raw return value. */
3091 func = alpha_lookup_xfloating_lib_func (cmp_code);
3095 out = gen_reg_rtx (DImode);
3097 /* What's actually returned is -1,0,1, not a proper boolean value,
3098 so use an EXPR_LIST as with a generic libcall instead of a
3099 comparison type expression. */
3100 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3101 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3102 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3103 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3108 /* Emit an X_floating library function call for a conversion. */
3111 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3113 int noperands = 1, mode;
3114 rtx out_operands[2];
3116 enum rtx_code code = orig_code;
3118 if (code == UNSIGNED_FIX)
3121 func = alpha_lookup_xfloating_lib_func (code);
3123 out_operands[0] = operands[1];
3128 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3129 out_operands[1] = GEN_INT (mode);
3132 case FLOAT_TRUNCATE:
3133 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3134 out_operands[1] = GEN_INT (mode);
3141 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3142 gen_rtx_fmt_e (orig_code,
3143 GET_MODE (operands[0]),
3147 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3148 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3149 guarantee that the sequence
3152 is valid. Naturally, output operand ordering is little-endian.
3153 This is used by *movtf_internal and *movti_internal. */
3156 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3159 switch (GET_CODE (operands[1]))
3162 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3163 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3167 operands[3] = adjust_address (operands[1], DImode, 8);
3168 operands[2] = adjust_address (operands[1], DImode, 0);
3173 gcc_assert (operands[1] == CONST0_RTX (mode));
3174 operands[2] = operands[3] = const0_rtx;
3181 switch (GET_CODE (operands[0]))
3184 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3185 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3189 operands[1] = adjust_address (operands[0], DImode, 8);
3190 operands[0] = adjust_address (operands[0], DImode, 0);
3197 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3200 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3201 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3205 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3206 op2 is a register containing the sign bit, operation is the
3207 logical operation to be performed. */
3210 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3212 rtx high_bit = operands[2];
3216 alpha_split_tmode_pair (operands, TFmode, false);
3218 /* Detect three flavors of operand overlap. */
3220 if (rtx_equal_p (operands[0], operands[2]))
3222 else if (rtx_equal_p (operands[1], operands[2]))
3224 if (rtx_equal_p (operands[0], high_bit))
3231 emit_move_insn (operands[0], operands[2]);
3233 /* ??? If the destination overlaps both source tf and high_bit, then
3234 assume source tf is dead in its entirety and use the other half
3235 for a scratch register. Otherwise "scratch" is just the proper
3236 destination register. */
3237 scratch = operands[move < 2 ? 1 : 3];
3239 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3243 emit_move_insn (operands[0], operands[2]);
3245 emit_move_insn (operands[1], scratch);
3249 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3253 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3254 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3255 lda r3,X(r11) lda r3,X+2(r11)
3256 extwl r1,r3,r1 extql r1,r3,r1
3257 extwh r2,r3,r2 extqh r2,r3,r2
3258 or r1.r2.r1 or r1,r2,r1
3261 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3262 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3263 lda r3,X(r11) lda r3,X(r11)
3264 extll r1,r3,r1 extll r1,r3,r1
3265 extlh r2,r3,r2 extlh r2,r3,r2
3266 or r1.r2.r1 addl r1,r2,r1
3268 quad: ldq_u r1,X(r11)
3277 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3278 HOST_WIDE_INT ofs, int sign)
3280 rtx meml, memh, addr, extl, exth, tmp, mema;
3281 enum machine_mode mode;
3283 if (TARGET_BWX && size == 2)
3285 meml = adjust_address (mem, QImode, ofs);
3286 memh = adjust_address (mem, QImode, ofs+1);
3287 extl = gen_reg_rtx (DImode);
3288 exth = gen_reg_rtx (DImode);
3289 emit_insn (gen_zero_extendqidi2 (extl, meml));
3290 emit_insn (gen_zero_extendqidi2 (exth, memh));
3291 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3292 NULL, 1, OPTAB_LIB_WIDEN);
3293 addr = expand_simple_binop (DImode, IOR, extl, exth,
3294 NULL, 1, OPTAB_LIB_WIDEN);
3296 if (sign && GET_MODE (tgt) != HImode)
3298 addr = gen_lowpart (HImode, addr);
3299 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3303 if (GET_MODE (tgt) != DImode)
3304 addr = gen_lowpart (GET_MODE (tgt), addr);
3305 emit_move_insn (tgt, addr);
3310 meml = gen_reg_rtx (DImode);
3311 memh = gen_reg_rtx (DImode);
3312 addr = gen_reg_rtx (DImode);
3313 extl = gen_reg_rtx (DImode);
3314 exth = gen_reg_rtx (DImode);
3316 mema = XEXP (mem, 0);
3317 if (GET_CODE (mema) == LO_SUM)
3318 mema = force_reg (Pmode, mema);
3320 /* AND addresses cannot be in any alias set, since they may implicitly
3321 alias surrounding code. Ideally we'd have some alias set that
3322 covered all types except those with alignment 8 or higher. */
3324 tmp = change_address (mem, DImode,
3325 gen_rtx_AND (DImode,
3326 plus_constant (mema, ofs),
3328 set_mem_alias_set (tmp, 0);
3329 emit_move_insn (meml, tmp);
3331 tmp = change_address (mem, DImode,
3332 gen_rtx_AND (DImode,
3333 plus_constant (mema, ofs + size - 1),
3335 set_mem_alias_set (tmp, 0);
3336 emit_move_insn (memh, tmp);
3338 if (sign && size == 2)
3340 emit_move_insn (addr, plus_constant (mema, ofs+2));
3342 emit_insn (gen_extql (extl, meml, addr));
3343 emit_insn (gen_extqh (exth, memh, addr));
3345 /* We must use tgt here for the target. Alpha-vms port fails if we use
3346 addr for the target, because addr is marked as a pointer and combine
3347 knows that pointers are always sign-extended 32-bit values. */
3348 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3349 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3350 addr, 1, OPTAB_WIDEN);
3354 emit_move_insn (addr, plus_constant (mema, ofs));
3355 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
3359 emit_insn (gen_extwh (exth, memh, addr));
3363 emit_insn (gen_extlh (exth, memh, addr));
3367 emit_insn (gen_extqh (exth, memh, addr));
3374 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3375 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3380 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3383 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3386 alpha_expand_unaligned_store (rtx dst, rtx src,
3387 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3389 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3391 if (TARGET_BWX && size == 2)
3393 if (src != const0_rtx)
3395 dstl = gen_lowpart (QImode, src);
3396 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3397 NULL, 1, OPTAB_LIB_WIDEN);
3398 dsth = gen_lowpart (QImode, dsth);
3401 dstl = dsth = const0_rtx;
3403 meml = adjust_address (dst, QImode, ofs);
3404 memh = adjust_address (dst, QImode, ofs+1);
3406 emit_move_insn (meml, dstl);
3407 emit_move_insn (memh, dsth);
3411 dstl = gen_reg_rtx (DImode);
3412 dsth = gen_reg_rtx (DImode);
3413 insl = gen_reg_rtx (DImode);
3414 insh = gen_reg_rtx (DImode);
3416 dsta = XEXP (dst, 0);
3417 if (GET_CODE (dsta) == LO_SUM)
3418 dsta = force_reg (Pmode, dsta);
3420 /* AND addresses cannot be in any alias set, since they may implicitly
3421 alias surrounding code. Ideally we'd have some alias set that
3422 covered all types except those with alignment 8 or higher. */
3424 meml = change_address (dst, DImode,
3425 gen_rtx_AND (DImode,
3426 plus_constant (dsta, ofs),
3428 set_mem_alias_set (meml, 0);
3430 memh = change_address (dst, DImode,
3431 gen_rtx_AND (DImode,
3432 plus_constant (dsta, ofs + size - 1),
3434 set_mem_alias_set (memh, 0);
3436 emit_move_insn (dsth, memh);
3437 emit_move_insn (dstl, meml);
3439 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3441 if (src != CONST0_RTX (GET_MODE (src)))
3443 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3444 GEN_INT (size*8), addr));
3449 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
3452 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
3455 emit_insn (gen_insql (insl, gen_lowpart (DImode, src), addr));
3462 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3467 emit_insn (gen_mskwl (dstl, dstl, addr));
3470 emit_insn (gen_mskll (dstl, dstl, addr));
3473 emit_insn (gen_mskql (dstl, dstl, addr));
3479 if (src != CONST0_RTX (GET_MODE (src)))
3481 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3482 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3485 /* Must store high before low for degenerate case of aligned. */
3486 emit_move_insn (memh, dsth);
3487 emit_move_insn (meml, dstl);
3490 /* The block move code tries to maximize speed by separating loads and
3491 stores at the expense of register pressure: we load all of the data
3492 before we store it back out. There are two secondary effects worth
3493 mentioning, that this speeds copying to/from aligned and unaligned
3494 buffers, and that it makes the code significantly easier to write. */
3496 #define MAX_MOVE_WORDS 8
3498 /* Load an integral number of consecutive unaligned quadwords. */
3501 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3502 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3504 rtx const im8 = GEN_INT (-8);
3505 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3506 rtx sreg, areg, tmp, smema;
3509 smema = XEXP (smem, 0);
3510 if (GET_CODE (smema) == LO_SUM)
3511 smema = force_reg (Pmode, smema);
3513 /* Generate all the tmp registers we need. */
3514 for (i = 0; i < words; ++i)
3516 data_regs[i] = out_regs[i];
3517 ext_tmps[i] = gen_reg_rtx (DImode);
3519 data_regs[words] = gen_reg_rtx (DImode);
3522 smem = adjust_address (smem, GET_MODE (smem), ofs);
3524 /* Load up all of the source data. */
3525 for (i = 0; i < words; ++i)
3527 tmp = change_address (smem, DImode,
3528 gen_rtx_AND (DImode,
3529 plus_constant (smema, 8*i),
3531 set_mem_alias_set (tmp, 0);
3532 emit_move_insn (data_regs[i], tmp);
3535 tmp = change_address (smem, DImode,
3536 gen_rtx_AND (DImode,
3537 plus_constant (smema, 8*words - 1),
3539 set_mem_alias_set (tmp, 0);
3540 emit_move_insn (data_regs[words], tmp);
3542 /* Extract the half-word fragments. Unfortunately DEC decided to make
3543 extxh with offset zero a noop instead of zeroing the register, so
3544 we must take care of that edge condition ourselves with cmov. */
3546 sreg = copy_addr_to_reg (smema);
3547 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3549 for (i = 0; i < words; ++i)
3551 emit_insn (gen_extql (data_regs[i], data_regs[i], sreg));
3552 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
3553 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3554 gen_rtx_IF_THEN_ELSE (DImode,
3555 gen_rtx_EQ (DImode, areg,
3557 const0_rtx, ext_tmps[i])));
3560 /* Merge the half-words into whole words. */
3561 for (i = 0; i < words; ++i)
3563 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3564 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3568 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3569 may be NULL to store zeros. */
3572 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3573 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3575 rtx const im8 = GEN_INT (-8);
3576 rtx ins_tmps[MAX_MOVE_WORDS];
3577 rtx st_tmp_1, st_tmp_2, dreg;
3578 rtx st_addr_1, st_addr_2, dmema;
3581 dmema = XEXP (dmem, 0);
3582 if (GET_CODE (dmema) == LO_SUM)
3583 dmema = force_reg (Pmode, dmema);
3585 /* Generate all the tmp registers we need. */
3586 if (data_regs != NULL)
3587 for (i = 0; i < words; ++i)
3588 ins_tmps[i] = gen_reg_rtx(DImode);
3589 st_tmp_1 = gen_reg_rtx(DImode);
3590 st_tmp_2 = gen_reg_rtx(DImode);
3593 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3595 st_addr_2 = change_address (dmem, DImode,
3596 gen_rtx_AND (DImode,
3597 plus_constant (dmema, words*8 - 1),
3599 set_mem_alias_set (st_addr_2, 0);
3601 st_addr_1 = change_address (dmem, DImode,
3602 gen_rtx_AND (DImode, dmema, im8));
3603 set_mem_alias_set (st_addr_1, 0);
3605 /* Load up the destination end bits. */
3606 emit_move_insn (st_tmp_2, st_addr_2);
3607 emit_move_insn (st_tmp_1, st_addr_1);
3609 /* Shift the input data into place. */
3610 dreg = copy_addr_to_reg (dmema);
3611 if (data_regs != NULL)
3613 for (i = words-1; i >= 0; --i)
3615 emit_insn (gen_insqh (ins_tmps[i], data_regs[i], dreg));
3616 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
3618 for (i = words-1; i > 0; --i)
3620 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3621 ins_tmps[i-1], ins_tmps[i-1], 1,
3626 /* Split and merge the ends with the destination data. */
3627 emit_insn (gen_mskqh (st_tmp_2, st_tmp_2, dreg));
3628 emit_insn (gen_mskql (st_tmp_1, st_tmp_1, dreg));
3630 if (data_regs != NULL)
3632 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3633 st_tmp_2, 1, OPTAB_WIDEN);
3634 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3635 st_tmp_1, 1, OPTAB_WIDEN);
3639 emit_move_insn (st_addr_2, st_tmp_2);
3640 for (i = words-1; i > 0; --i)
3642 rtx tmp = change_address (dmem, DImode,
3643 gen_rtx_AND (DImode,
3644 plus_constant (dmema, i*8),
3646 set_mem_alias_set (tmp, 0);
3647 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3649 emit_move_insn (st_addr_1, st_tmp_1);
3653 /* Expand string/block move operations.
3655 operands[0] is the pointer to the destination.
3656 operands[1] is the pointer to the source.
3657 operands[2] is the number of bytes to move.
3658 operands[3] is the alignment. */
3661 alpha_expand_block_move (rtx operands[])
3663 rtx bytes_rtx = operands[2];
3664 rtx align_rtx = operands[3];
3665 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3666 HOST_WIDE_INT bytes = orig_bytes;
3667 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3668 HOST_WIDE_INT dst_align = src_align;
3669 rtx orig_src = operands[1];
3670 rtx orig_dst = operands[0];
3671 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3673 unsigned int i, words, ofs, nregs = 0;
3675 if (orig_bytes <= 0)
3677 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3680 /* Look for additional alignment information from recorded register info. */
3682 tmp = XEXP (orig_src, 0);
3684 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3685 else if (GET_CODE (tmp) == PLUS
3686 && REG_P (XEXP (tmp, 0))
3687 && CONST_INT_P (XEXP (tmp, 1)))
3689 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3690 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3694 if (a >= 64 && c % 8 == 0)
3696 else if (a >= 32 && c % 4 == 0)
3698 else if (a >= 16 && c % 2 == 0)
3703 tmp = XEXP (orig_dst, 0);
3705 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3706 else if (GET_CODE (tmp) == PLUS
3707 && REG_P (XEXP (tmp, 0))
3708 && CONST_INT_P (XEXP (tmp, 1)))
3710 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3711 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3715 if (a >= 64 && c % 8 == 0)
3717 else if (a >= 32 && c % 4 == 0)
3719 else if (a >= 16 && c % 2 == 0)
3725 if (src_align >= 64 && bytes >= 8)
3729 for (i = 0; i < words; ++i)
3730 data_regs[nregs + i] = gen_reg_rtx (DImode);
3732 for (i = 0; i < words; ++i)
3733 emit_move_insn (data_regs[nregs + i],
3734 adjust_address (orig_src, DImode, ofs + i * 8));
3741 if (src_align >= 32 && bytes >= 4)
3745 for (i = 0; i < words; ++i)
3746 data_regs[nregs + i] = gen_reg_rtx (SImode);
3748 for (i = 0; i < words; ++i)
3749 emit_move_insn (data_regs[nregs + i],
3750 adjust_address (orig_src, SImode, ofs + i * 4));
3761 for (i = 0; i < words+1; ++i)
3762 data_regs[nregs + i] = gen_reg_rtx (DImode);
3764 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3772 if (! TARGET_BWX && bytes >= 4)
3774 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3775 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3782 if (src_align >= 16)
3785 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3786 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3789 } while (bytes >= 2);
3791 else if (! TARGET_BWX)
3793 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3794 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3802 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3803 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3808 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3810 /* Now save it back out again. */
3814 /* Write out the data in whatever chunks reading the source allowed. */
3815 if (dst_align >= 64)
3817 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3819 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3826 if (dst_align >= 32)
3828 /* If the source has remaining DImode regs, write them out in
3830 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3832 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3833 NULL_RTX, 1, OPTAB_WIDEN);
3835 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3836 gen_lowpart (SImode, data_regs[i]));
3837 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3838 gen_lowpart (SImode, tmp));
3843 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3845 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3852 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3854 /* Write out a remaining block of words using unaligned methods. */
3856 for (words = 1; i + words < nregs; words++)
3857 if (GET_MODE (data_regs[i + words]) != DImode)
3861 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3863 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3870 /* Due to the above, this won't be aligned. */
3871 /* ??? If we have more than one of these, consider constructing full
3872 words in registers and using alpha_expand_unaligned_store_words. */
3873 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3875 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3880 if (dst_align >= 16)
3881 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3883 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
3888 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3890 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
3895 /* The remainder must be byte copies. */
3898 gcc_assert (GET_MODE (data_regs[i]) == QImode);
3899 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
3908 alpha_expand_block_clear (rtx operands[])
3910 rtx bytes_rtx = operands[1];
3911 rtx align_rtx = operands[3];
3912 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3913 HOST_WIDE_INT bytes = orig_bytes;
3914 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
3915 HOST_WIDE_INT alignofs = 0;
3916 rtx orig_dst = operands[0];
3918 int i, words, ofs = 0;
3920 if (orig_bytes <= 0)
3922 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3925 /* Look for stricter alignment. */
3926 tmp = XEXP (orig_dst, 0);
3928 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3929 else if (GET_CODE (tmp) == PLUS
3930 && REG_P (XEXP (tmp, 0))
3931 && CONST_INT_P (XEXP (tmp, 1)))
3933 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3934 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3939 align = a, alignofs = 8 - c % 8;
3941 align = a, alignofs = 4 - c % 4;
3943 align = a, alignofs = 2 - c % 2;
3947 /* Handle an unaligned prefix first. */
3951 #if HOST_BITS_PER_WIDE_INT >= 64
3952 /* Given that alignofs is bounded by align, the only time BWX could
3953 generate three stores is for a 7 byte fill. Prefer two individual
3954 stores over a load/mask/store sequence. */
3955 if ((!TARGET_BWX || alignofs == 7)
3957 && !(alignofs == 4 && bytes >= 4))
3959 enum machine_mode mode = (align >= 64 ? DImode : SImode);
3960 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
3964 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
3965 set_mem_alias_set (mem, 0);
3967 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
3968 if (bytes < alignofs)
3970 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
3981 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
3982 NULL_RTX, 1, OPTAB_WIDEN);
3984 emit_move_insn (mem, tmp);
3988 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
3990 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
3995 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
3997 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4002 if (alignofs == 4 && bytes >= 4)
4004 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4010 /* If we've not used the extra lead alignment information by now,
4011 we won't be able to. Downgrade align to match what's left over. */
4014 alignofs = alignofs & -alignofs;
4015 align = MIN (align, alignofs * BITS_PER_UNIT);
4019 /* Handle a block of contiguous long-words. */
4021 if (align >= 64 && bytes >= 8)
4025 for (i = 0; i < words; ++i)
4026 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4033 /* If the block is large and appropriately aligned, emit a single
4034 store followed by a sequence of stq_u insns. */
4036 if (align >= 32 && bytes > 16)
4040 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4044 orig_dsta = XEXP (orig_dst, 0);
4045 if (GET_CODE (orig_dsta) == LO_SUM)
4046 orig_dsta = force_reg (Pmode, orig_dsta);
4049 for (i = 0; i < words; ++i)
4052 = change_address (orig_dst, DImode,
4053 gen_rtx_AND (DImode,
4054 plus_constant (orig_dsta, ofs + i*8),
4056 set_mem_alias_set (mem, 0);
4057 emit_move_insn (mem, const0_rtx);
4060 /* Depending on the alignment, the first stq_u may have overlapped
4061 with the initial stl, which means that the last stq_u didn't
4062 write as much as it would appear. Leave those questionable bytes
4064 bytes -= words * 8 - 4;
4065 ofs += words * 8 - 4;
4068 /* Handle a smaller block of aligned words. */
4070 if ((align >= 64 && bytes == 4)
4071 || (align == 32 && bytes >= 4))
4075 for (i = 0; i < words; ++i)
4076 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4083 /* An unaligned block uses stq_u stores for as many as possible. */
4089 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4095 /* Next clean up any trailing pieces. */
4097 #if HOST_BITS_PER_WIDE_INT >= 64
4098 /* Count the number of bits in BYTES for which aligned stores could
4101 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4105 /* If we have appropriate alignment (and it wouldn't take too many
4106 instructions otherwise), mask out the bytes we need. */
4107 if (TARGET_BWX ? words > 2 : bytes > 0)
4114 mem = adjust_address (orig_dst, DImode, ofs);
4115 set_mem_alias_set (mem, 0);
4117 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4119 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4120 NULL_RTX, 1, OPTAB_WIDEN);
4122 emit_move_insn (mem, tmp);
4125 else if (align >= 32 && bytes < 4)
4130 mem = adjust_address (orig_dst, SImode, ofs);
4131 set_mem_alias_set (mem, 0);
4133 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4135 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4136 NULL_RTX, 1, OPTAB_WIDEN);
4138 emit_move_insn (mem, tmp);
4144 if (!TARGET_BWX && bytes >= 4)
4146 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4156 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4160 } while (bytes >= 2);
4162 else if (! TARGET_BWX)
4164 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4172 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4180 /* Returns a mask so that zap(x, value) == x & mask. */
4183 alpha_expand_zap_mask (HOST_WIDE_INT value)
4188 if (HOST_BITS_PER_WIDE_INT >= 64)
4190 HOST_WIDE_INT mask = 0;
4192 for (i = 7; i >= 0; --i)
4195 if (!((value >> i) & 1))
4199 result = gen_int_mode (mask, DImode);
4203 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4205 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4207 for (i = 7; i >= 4; --i)
4210 if (!((value >> i) & 1))
4214 for (i = 3; i >= 0; --i)
4217 if (!((value >> i) & 1))
4221 result = immed_double_const (mask_lo, mask_hi, DImode);
4228 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4229 enum machine_mode mode,
4230 rtx op0, rtx op1, rtx op2)
4232 op0 = gen_lowpart (mode, op0);
4234 if (op1 == const0_rtx)
4235 op1 = CONST0_RTX (mode);
4237 op1 = gen_lowpart (mode, op1);
4239 if (op2 == const0_rtx)
4240 op2 = CONST0_RTX (mode);
4242 op2 = gen_lowpart (mode, op2);
4244 emit_insn ((*gen) (op0, op1, op2));
4247 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4248 COND is true. Mark the jump as unlikely to be taken. */
4251 emit_unlikely_jump (rtx cond, rtx label)
4253 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4256 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4257 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4258 add_reg_note (x, REG_BR_PROB, very_unlikely);
4261 /* A subroutine of the atomic operation splitters. Emit a load-locked
4262 instruction in MODE. */
4265 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4267 rtx (*fn) (rtx, rtx) = NULL;
4269 fn = gen_load_locked_si;
4270 else if (mode == DImode)
4271 fn = gen_load_locked_di;
4272 emit_insn (fn (reg, mem));
4275 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4276 instruction in MODE. */
4279 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4281 rtx (*fn) (rtx, rtx, rtx) = NULL;
4283 fn = gen_store_conditional_si;
4284 else if (mode == DImode)
4285 fn = gen_store_conditional_di;
4286 emit_insn (fn (res, mem, val));
4289 /* A subroutine of the atomic operation splitters. Emit an insxl
4290 instruction in MODE. */
4293 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4295 rtx ret = gen_reg_rtx (DImode);
4296 rtx (*fn) (rtx, rtx, rtx);
4316 op1 = force_reg (mode, op1);
4317 emit_insn (fn (ret, op1, op2));
4322 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4323 to perform. MEM is the memory on which to operate. VAL is the second
4324 operand of the binary operator. BEFORE and AFTER are optional locations to
4325 return the value of MEM either before of after the operation. SCRATCH is
4326 a scratch register. */
4329 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4330 rtx before, rtx after, rtx scratch)
4332 enum machine_mode mode = GET_MODE (mem);
4333 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4335 emit_insn (gen_memory_barrier ());
4337 label = gen_label_rtx ();
4339 label = gen_rtx_LABEL_REF (DImode, label);
4343 emit_load_locked (mode, before, mem);
4347 x = gen_rtx_AND (mode, before, val);
4348 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4350 x = gen_rtx_NOT (mode, val);
4353 x = gen_rtx_fmt_ee (code, mode, before, val);
4355 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4356 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4358 emit_store_conditional (mode, cond, mem, scratch);
4360 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4361 emit_unlikely_jump (x, label);
4363 emit_insn (gen_memory_barrier ());
4366 /* Expand a compare and swap operation. */
4369 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4372 enum machine_mode mode = GET_MODE (mem);
4373 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4375 emit_insn (gen_memory_barrier ());
4377 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4378 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4379 emit_label (XEXP (label1, 0));
4381 emit_load_locked (mode, retval, mem);
4383 x = gen_lowpart (DImode, retval);
4384 if (oldval == const0_rtx)
4385 x = gen_rtx_NE (DImode, x, const0_rtx);
4388 x = gen_rtx_EQ (DImode, x, oldval);
4389 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4390 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4392 emit_unlikely_jump (x, label2);
4394 emit_move_insn (scratch, newval);
4395 emit_store_conditional (mode, cond, mem, scratch);
4397 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4398 emit_unlikely_jump (x, label1);
4400 emit_insn (gen_memory_barrier ());
4401 emit_label (XEXP (label2, 0));
4405 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4407 enum machine_mode mode = GET_MODE (mem);
4408 rtx addr, align, wdst;
4409 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4411 addr = force_reg (DImode, XEXP (mem, 0));
4412 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4413 NULL_RTX, 1, OPTAB_DIRECT);
4415 oldval = convert_modes (DImode, mode, oldval, 1);
4416 newval = emit_insxl (mode, newval, addr);
4418 wdst = gen_reg_rtx (DImode);
4420 fn5 = gen_sync_compare_and_swapqi_1;
4422 fn5 = gen_sync_compare_and_swaphi_1;
4423 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4425 emit_move_insn (dst, gen_lowpart (mode, wdst));
4429 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4430 rtx oldval, rtx newval, rtx align,
4431 rtx scratch, rtx cond)
4433 rtx label1, label2, mem, width, mask, x;
4435 mem = gen_rtx_MEM (DImode, align);
4436 MEM_VOLATILE_P (mem) = 1;
4438 emit_insn (gen_memory_barrier ());
4439 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4440 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4441 emit_label (XEXP (label1, 0));
4443 emit_load_locked (DImode, scratch, mem);
4445 width = GEN_INT (GET_MODE_BITSIZE (mode));
4446 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4447 emit_insn (gen_extxl (dest, scratch, width, addr));
4449 if (oldval == const0_rtx)
4450 x = gen_rtx_NE (DImode, dest, const0_rtx);
4453 x = gen_rtx_EQ (DImode, dest, oldval);
4454 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4455 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4457 emit_unlikely_jump (x, label2);
4459 emit_insn (gen_mskxl (scratch, scratch, mask, addr));
4460 emit_insn (gen_iordi3 (scratch, scratch, newval));
4462 emit_store_conditional (DImode, scratch, mem, scratch);
4464 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4465 emit_unlikely_jump (x, label1);
4467 emit_insn (gen_memory_barrier ());
4468 emit_label (XEXP (label2, 0));
4471 /* Expand an atomic exchange operation. */
4474 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4476 enum machine_mode mode = GET_MODE (mem);
4477 rtx label, x, cond = gen_lowpart (DImode, scratch);
4479 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4480 emit_label (XEXP (label, 0));
4482 emit_load_locked (mode, retval, mem);
4483 emit_move_insn (scratch, val);
4484 emit_store_conditional (mode, cond, mem, scratch);
4486 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4487 emit_unlikely_jump (x, label);
4489 emit_insn (gen_memory_barrier ());
4493 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4495 enum machine_mode mode = GET_MODE (mem);
4496 rtx addr, align, wdst;
4497 rtx (*fn4) (rtx, rtx, rtx, rtx);
4499 /* Force the address into a register. */
4500 addr = force_reg (DImode, XEXP (mem, 0));
4502 /* Align it to a multiple of 8. */
4503 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4504 NULL_RTX, 1, OPTAB_DIRECT);
4506 /* Insert val into the correct byte location within the word. */
4507 val = emit_insxl (mode, val, addr);
4509 wdst = gen_reg_rtx (DImode);
4511 fn4 = gen_sync_lock_test_and_setqi_1;
4513 fn4 = gen_sync_lock_test_and_sethi_1;
4514 emit_insn (fn4 (wdst, addr, val, align));
4516 emit_move_insn (dst, gen_lowpart (mode, wdst));
4520 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4521 rtx val, rtx align, rtx scratch)
4523 rtx label, mem, width, mask, x;
4525 mem = gen_rtx_MEM (DImode, align);
4526 MEM_VOLATILE_P (mem) = 1;
4528 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4529 emit_label (XEXP (label, 0));
4531 emit_load_locked (DImode, scratch, mem);
4533 width = GEN_INT (GET_MODE_BITSIZE (mode));
4534 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4535 emit_insn (gen_extxl (dest, scratch, width, addr));
4536 emit_insn (gen_mskxl (scratch, scratch, mask, addr));
4537 emit_insn (gen_iordi3 (scratch, scratch, val));
4539 emit_store_conditional (DImode, scratch, mem, scratch);
4541 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4542 emit_unlikely_jump (x, label);
4544 emit_insn (gen_memory_barrier ());
4547 /* Adjust the cost of a scheduling dependency. Return the new cost of
4548 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4551 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4553 enum attr_type dep_insn_type;
4555 /* If the dependence is an anti-dependence, there is no cost. For an
4556 output dependence, there is sometimes a cost, but it doesn't seem
4557 worth handling those few cases. */
4558 if (REG_NOTE_KIND (link) != 0)
4561 /* If we can't recognize the insns, we can't really do anything. */
4562 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4565 dep_insn_type = get_attr_type (dep_insn);
4567 /* Bring in the user-defined memory latency. */
4568 if (dep_insn_type == TYPE_ILD
4569 || dep_insn_type == TYPE_FLD
4570 || dep_insn_type == TYPE_LDSYM)
4571 cost += alpha_memory_latency-1;
4573 /* Everything else handled in DFA bypasses now. */
4578 /* The number of instructions that can be issued per cycle. */
4581 alpha_issue_rate (void)
4583 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4586 /* How many alternative schedules to try. This should be as wide as the
4587 scheduling freedom in the DFA, but no wider. Making this value too
4588 large results extra work for the scheduler.
4590 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4591 alternative schedules. For EV5, we can choose between E0/E1 and
4592 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4595 alpha_multipass_dfa_lookahead (void)
4597 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4600 /* Machine-specific function data. */
4602 struct GTY(()) machine_function
4605 const char *some_ld_name;
4607 /* For TARGET_LD_BUGGY_LDGP. */
4608 struct rtx_def *gp_save_rtx;
4610 /* For VMS condition handlers. */
4611 bool uses_condition_handler;
4614 /* How to allocate a 'struct machine_function'. */
4616 static struct machine_function *
4617 alpha_init_machine_status (void)
4619 return ggc_alloc_cleared_machine_function ();
4622 /* Support for frame based VMS condition handlers. */
4624 /* A VMS condition handler may be established for a function with a call to
4625 __builtin_establish_vms_condition_handler, and cancelled with a call to
4626 __builtin_revert_vms_condition_handler.
4628 The VMS Condition Handling Facility knows about the existence of a handler
4629 from the procedure descriptor .handler field. As the VMS native compilers,
4630 we store the user specified handler's address at a fixed location in the
4631 stack frame and point the procedure descriptor at a common wrapper which
4632 fetches the real handler's address and issues an indirect call.
4634 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4636 We force the procedure kind to PT_STACK, and the fixed frame location is
4637 fp+8, just before the register save area. We use the handler_data field in
4638 the procedure descriptor to state the fp offset at which the installed
4639 handler address can be found. */
4641 #define VMS_COND_HANDLER_FP_OFFSET 8
4643 /* Expand code to store the currently installed user VMS condition handler
4644 into TARGET and install HANDLER as the new condition handler. */
4647 alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4649 rtx handler_slot_address
4650 = plus_constant (hard_frame_pointer_rtx, VMS_COND_HANDLER_FP_OFFSET);
4653 = gen_rtx_MEM (DImode, handler_slot_address);
4655 emit_move_insn (target, handler_slot);
4656 emit_move_insn (handler_slot, handler);
4658 /* Notify the start/prologue/epilogue emitters that the condition handler
4659 slot is needed. In addition to reserving the slot space, this will force
4660 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4661 use above is correct. */
4662 cfun->machine->uses_condition_handler = true;
4665 /* Expand code to store the current VMS condition handler into TARGET and
4669 alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4671 /* We implement this by establishing a null condition handler, with the tiny
4672 side effect of setting uses_condition_handler. This is a little bit
4673 pessimistic if no actual builtin_establish call is ever issued, which is
4674 not a real problem and expected never to happen anyway. */
4676 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4679 /* Functions to save and restore alpha_return_addr_rtx. */
4681 /* Start the ball rolling with RETURN_ADDR_RTX. */
4684 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4689 return get_hard_reg_initial_val (Pmode, REG_RA);
4692 /* Return or create a memory slot containing the gp value for the current
4693 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4696 alpha_gp_save_rtx (void)
4698 rtx seq, m = cfun->machine->gp_save_rtx;
4704 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4705 m = validize_mem (m);
4706 emit_move_insn (m, pic_offset_table_rtx);
4711 /* We used to simply emit the sequence after entry_of_function.
4712 However this breaks the CFG if the first instruction in the
4713 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4714 label. Emit the sequence properly on the edge. We are only
4715 invoked from dw2_build_landing_pads and finish_eh_generation
4716 will call commit_edge_insertions thanks to a kludge. */
4717 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4719 cfun->machine->gp_save_rtx = m;
4726 alpha_ra_ever_killed (void)
4730 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4731 return (int)df_regs_ever_live_p (REG_RA);
4733 push_topmost_sequence ();
4735 pop_topmost_sequence ();
4737 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4741 /* Return the trap mode suffix applicable to the current
4742 instruction, or NULL. */
4745 get_trap_mode_suffix (void)
4747 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4751 case TRAP_SUFFIX_NONE:
4754 case TRAP_SUFFIX_SU:
4755 if (alpha_fptm >= ALPHA_FPTM_SU)
4759 case TRAP_SUFFIX_SUI:
4760 if (alpha_fptm >= ALPHA_FPTM_SUI)
4764 case TRAP_SUFFIX_V_SV:
4772 case ALPHA_FPTM_SUI:
4778 case TRAP_SUFFIX_V_SV_SVI:
4787 case ALPHA_FPTM_SUI:
4794 case TRAP_SUFFIX_U_SU_SUI:
4803 case ALPHA_FPTM_SUI:
4816 /* Return the rounding mode suffix applicable to the current
4817 instruction, or NULL. */
4820 get_round_mode_suffix (void)
4822 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4826 case ROUND_SUFFIX_NONE:
4828 case ROUND_SUFFIX_NORMAL:
4831 case ALPHA_FPRM_NORM:
4833 case ALPHA_FPRM_MINF:
4835 case ALPHA_FPRM_CHOP:
4837 case ALPHA_FPRM_DYN:
4844 case ROUND_SUFFIX_C:
4853 /* Locate some local-dynamic symbol still in use by this function
4854 so that we can print its name in some movdi_er_tlsldm pattern. */
4857 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4861 if (GET_CODE (x) == SYMBOL_REF
4862 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4864 cfun->machine->some_ld_name = XSTR (x, 0);
4872 get_some_local_dynamic_name (void)
4876 if (cfun->machine->some_ld_name)
4877 return cfun->machine->some_ld_name;
4879 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4881 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4882 return cfun->machine->some_ld_name;
4887 /* Print an operand. Recognize special options, documented below. */
4890 print_operand (FILE *file, rtx x, int code)
4897 /* Print the assembler name of the current function. */
4898 assemble_name (file, alpha_fnname);
4902 assemble_name (file, get_some_local_dynamic_name ());
4907 const char *trap = get_trap_mode_suffix ();
4908 const char *round = get_round_mode_suffix ();
4911 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
4912 (trap ? trap : ""), (round ? round : ""));
4917 /* Generates single precision instruction suffix. */
4918 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
4922 /* Generates double precision instruction suffix. */
4923 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
4927 if (alpha_this_literal_sequence_number == 0)
4928 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
4929 fprintf (file, "%d", alpha_this_literal_sequence_number);
4933 if (alpha_this_gpdisp_sequence_number == 0)
4934 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
4935 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
4939 if (GET_CODE (x) == HIGH)
4940 output_addr_const (file, XEXP (x, 0));
4942 output_operand_lossage ("invalid %%H value");
4949 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
4951 x = XVECEXP (x, 0, 0);
4952 lituse = "lituse_tlsgd";
4954 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
4956 x = XVECEXP (x, 0, 0);
4957 lituse = "lituse_tlsldm";
4959 else if (CONST_INT_P (x))
4960 lituse = "lituse_jsr";
4963 output_operand_lossage ("invalid %%J value");
4967 if (x != const0_rtx)
4968 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
4976 #ifdef HAVE_AS_JSRDIRECT_RELOCS
4977 lituse = "lituse_jsrdirect";
4979 lituse = "lituse_jsr";
4982 gcc_assert (INTVAL (x) != 0);
4983 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
4987 /* If this operand is the constant zero, write it as "$31". */
4989 fprintf (file, "%s", reg_names[REGNO (x)]);
4990 else if (x == CONST0_RTX (GET_MODE (x)))
4991 fprintf (file, "$31");
4993 output_operand_lossage ("invalid %%r value");
4997 /* Similar, but for floating-point. */
4999 fprintf (file, "%s", reg_names[REGNO (x)]);
5000 else if (x == CONST0_RTX (GET_MODE (x)))
5001 fprintf (file, "$f31");
5003 output_operand_lossage ("invalid %%R value");
5007 /* Write the 1's complement of a constant. */
5008 if (!CONST_INT_P (x))
5009 output_operand_lossage ("invalid %%N value");
5011 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5015 /* Write 1 << C, for a constant C. */
5016 if (!CONST_INT_P (x))
5017 output_operand_lossage ("invalid %%P value");
5019 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5023 /* Write the high-order 16 bits of a constant, sign-extended. */
5024 if (!CONST_INT_P (x))
5025 output_operand_lossage ("invalid %%h value");
5027 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5031 /* Write the low-order 16 bits of a constant, sign-extended. */
5032 if (!CONST_INT_P (x))
5033 output_operand_lossage ("invalid %%L value");
5035 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5036 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5040 /* Write mask for ZAP insn. */
5041 if (GET_CODE (x) == CONST_DOUBLE)
5043 HOST_WIDE_INT mask = 0;
5044 HOST_WIDE_INT value;
5046 value = CONST_DOUBLE_LOW (x);
5047 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5052 value = CONST_DOUBLE_HIGH (x);
5053 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5056 mask |= (1 << (i + sizeof (int)));
5058 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5061 else if (CONST_INT_P (x))
5063 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5065 for (i = 0; i < 8; i++, value >>= 8)
5069 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5072 output_operand_lossage ("invalid %%m value");
5076 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5077 if (!CONST_INT_P (x)
5078 || (INTVAL (x) != 8 && INTVAL (x) != 16
5079 && INTVAL (x) != 32 && INTVAL (x) != 64))
5080 output_operand_lossage ("invalid %%M value");
5082 fprintf (file, "%s",
5083 (INTVAL (x) == 8 ? "b"
5084 : INTVAL (x) == 16 ? "w"
5085 : INTVAL (x) == 32 ? "l"
5090 /* Similar, except do it from the mask. */
5091 if (CONST_INT_P (x))
5093 HOST_WIDE_INT value = INTVAL (x);
5100 if (value == 0xffff)
5105 if (value == 0xffffffff)
5116 else if (HOST_BITS_PER_WIDE_INT == 32
5117 && GET_CODE (x) == CONST_DOUBLE
5118 && CONST_DOUBLE_LOW (x) == 0xffffffff
5119 && CONST_DOUBLE_HIGH (x) == 0)
5124 output_operand_lossage ("invalid %%U value");
5128 /* Write the constant value divided by 8. */
5129 if (!CONST_INT_P (x)
5130 || (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5131 || (INTVAL (x) & 7) != 0)
5132 output_operand_lossage ("invalid %%s value");
5134 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
5138 /* Same, except compute (64 - c) / 8 */
5140 if (!CONST_INT_P (x)
5141 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5142 && (INTVAL (x) & 7) != 8)
5143 output_operand_lossage ("invalid %%s value");
5145 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5148 case 'C': case 'D': case 'c': case 'd':
5149 /* Write out comparison name. */
5151 enum rtx_code c = GET_CODE (x);
5153 if (!COMPARISON_P (x))
5154 output_operand_lossage ("invalid %%C value");
5156 else if (code == 'D')
5157 c = reverse_condition (c);
5158 else if (code == 'c')
5159 c = swap_condition (c);
5160 else if (code == 'd')
5161 c = swap_condition (reverse_condition (c));
5164 fprintf (file, "ule");
5166 fprintf (file, "ult");
5167 else if (c == UNORDERED)
5168 fprintf (file, "un");
5170 fprintf (file, "%s", GET_RTX_NAME (c));
5175 /* Write the divide or modulus operator. */
5176 switch (GET_CODE (x))
5179 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5182 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5185 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5188 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5191 output_operand_lossage ("invalid %%E value");
5197 /* Write "_u" for unaligned access. */
5198 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5199 fprintf (file, "_u");
5204 fprintf (file, "%s", reg_names[REGNO (x)]);
5206 output_address (XEXP (x, 0));
5207 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5209 switch (XINT (XEXP (x, 0), 1))
5213 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5216 output_operand_lossage ("unknown relocation unspec");
5221 output_addr_const (file, x);
5225 output_operand_lossage ("invalid %%xn code");
5230 print_operand_address (FILE *file, rtx addr)
5233 HOST_WIDE_INT offset = 0;
5235 if (GET_CODE (addr) == AND)
5236 addr = XEXP (addr, 0);
5238 if (GET_CODE (addr) == PLUS
5239 && CONST_INT_P (XEXP (addr, 1)))
5241 offset = INTVAL (XEXP (addr, 1));
5242 addr = XEXP (addr, 0);
5245 if (GET_CODE (addr) == LO_SUM)
5247 const char *reloc16, *reloclo;
5248 rtx op1 = XEXP (addr, 1);
5250 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5252 op1 = XEXP (op1, 0);
5253 switch (XINT (op1, 1))
5257 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5261 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5264 output_operand_lossage ("unknown relocation unspec");
5268 output_addr_const (file, XVECEXP (op1, 0, 0));
5273 reloclo = "gprellow";
5274 output_addr_const (file, op1);
5278 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5280 addr = XEXP (addr, 0);
5281 switch (GET_CODE (addr))
5284 basereg = REGNO (addr);
5288 basereg = subreg_regno (addr);
5295 fprintf (file, "($%d)\t\t!%s", basereg,
5296 (basereg == 29 ? reloc16 : reloclo));
5300 switch (GET_CODE (addr))
5303 basereg = REGNO (addr);
5307 basereg = subreg_regno (addr);
5311 offset = INTVAL (addr);
5314 #if TARGET_ABI_OPEN_VMS
5316 fprintf (file, "%s", XSTR (addr, 0));
5320 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5321 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5322 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5323 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5324 INTVAL (XEXP (XEXP (addr, 0), 1)));
5332 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5335 /* Emit RTL insns to initialize the variable parts of a trampoline at
5336 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5337 for the static chain value for the function. */
5340 alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5342 rtx fnaddr, mem, word1, word2;
5344 fnaddr = XEXP (DECL_RTL (fndecl), 0);
5346 #ifdef POINTERS_EXTEND_UNSIGNED
5347 fnaddr = convert_memory_address (Pmode, fnaddr);
5348 chain_value = convert_memory_address (Pmode, chain_value);
5351 if (TARGET_ABI_OPEN_VMS)
5356 /* Construct the name of the trampoline entry point. */
5357 fnname = XSTR (fnaddr, 0);
5358 trname = (char *) alloca (strlen (fnname) + 5);
5359 strcpy (trname, fnname);
5360 strcat (trname, "..tr");
5361 fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5362 word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
5364 /* Trampoline (or "bounded") procedure descriptor is constructed from
5365 the function's procedure descriptor with certain fields zeroed IAW
5366 the VMS calling standard. This is stored in the first quadword. */
5367 word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
5368 word1 = expand_and (DImode, word1, GEN_INT (0xffff0fff0000fff0), NULL);
5372 /* These 4 instructions are:
5377 We don't bother setting the HINT field of the jump; the nop
5378 is merely there for padding. */
5379 word1 = GEN_INT (0xa77b0010a43b0018);
5380 word2 = GEN_INT (0x47ff041f6bfb0000);
5383 /* Store the first two words, as computed above. */
5384 mem = adjust_address (m_tramp, DImode, 0);
5385 emit_move_insn (mem, word1);
5386 mem = adjust_address (m_tramp, DImode, 8);
5387 emit_move_insn (mem, word2);
5389 /* Store function address and static chain value. */
5390 mem = adjust_address (m_tramp, Pmode, 16);
5391 emit_move_insn (mem, fnaddr);
5392 mem = adjust_address (m_tramp, Pmode, 24);
5393 emit_move_insn (mem, chain_value);
5397 emit_insn (gen_imb ());
5398 #ifdef ENABLE_EXECUTE_STACK
5399 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5400 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
5405 /* Determine where to put an argument to a function.
5406 Value is zero to push the argument on the stack,
5407 or a hard register in which to store the argument.
5409 MODE is the argument's machine mode.
5410 TYPE is the data type of the argument (as a tree).
5411 This is null for libcalls where that information may
5413 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5414 the preceding args and about the function being called.
5415 NAMED is nonzero if this argument is a named parameter
5416 (otherwise it is an extra parameter matching an ellipsis).
5418 On Alpha the first 6 words of args are normally in registers
5419 and the rest are pushed. */
5422 alpha_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5423 const_tree type, bool named ATTRIBUTE_UNUSED)
5428 /* Don't get confused and pass small structures in FP registers. */
5429 if (type && AGGREGATE_TYPE_P (type))
5433 #ifdef ENABLE_CHECKING
5434 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5436 gcc_assert (!COMPLEX_MODE_P (mode));
5439 /* Set up defaults for FP operands passed in FP registers, and
5440 integral operands passed in integer registers. */
5441 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5447 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5448 the two platforms, so we can't avoid conditional compilation. */
5449 #if TARGET_ABI_OPEN_VMS
5451 if (mode == VOIDmode)
5452 return alpha_arg_info_reg_val (*cum);
5454 num_args = cum->num_args;
5456 || targetm.calls.must_pass_in_stack (mode, type))
5459 #elif TARGET_ABI_OSF
5465 /* VOID is passed as a special flag for "last argument". */
5466 if (type == void_type_node)
5468 else if (targetm.calls.must_pass_in_stack (mode, type))
5472 #error Unhandled ABI
5475 return gen_rtx_REG (mode, num_args + basereg);
5478 /* Update the data in CUM to advance over an argument
5479 of mode MODE and data type TYPE.
5480 (TYPE is null for libcalls where that information may not be available.) */
5483 alpha_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5484 const_tree type, bool named ATTRIBUTE_UNUSED)
5486 bool onstack = targetm.calls.must_pass_in_stack (mode, type);
5487 int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type, named);
5492 if (!onstack && cum->num_args < 6)
5493 cum->atypes[cum->num_args] = alpha_arg_type (mode);
5494 cum->num_args += increment;
5499 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5500 enum machine_mode mode ATTRIBUTE_UNUSED,
5501 tree type ATTRIBUTE_UNUSED,
5502 bool named ATTRIBUTE_UNUSED)
5506 #if TARGET_ABI_OPEN_VMS
5507 if (cum->num_args < 6
5508 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5509 words = 6 - cum->num_args;
5510 #elif TARGET_ABI_OSF
5511 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5514 #error Unhandled ABI
5517 return words * UNITS_PER_WORD;
5521 /* Return true if TYPE must be returned in memory, instead of in registers. */
5524 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5526 enum machine_mode mode = VOIDmode;
5531 mode = TYPE_MODE (type);
5533 /* All aggregates are returned in memory, except on OpenVMS where
5534 records that fit 64 bits should be returned by immediate value
5535 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5536 if (TARGET_ABI_OPEN_VMS
5537 && TREE_CODE (type) != ARRAY_TYPE
5538 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5541 if (AGGREGATE_TYPE_P (type))
5545 size = GET_MODE_SIZE (mode);
5546 switch (GET_MODE_CLASS (mode))
5548 case MODE_VECTOR_FLOAT:
5549 /* Pass all float vectors in memory, like an aggregate. */
5552 case MODE_COMPLEX_FLOAT:
5553 /* We judge complex floats on the size of their element,
5554 not the size of the whole type. */
5555 size = GET_MODE_UNIT_SIZE (mode);
5560 case MODE_COMPLEX_INT:
5561 case MODE_VECTOR_INT:
5565 /* ??? We get called on all sorts of random stuff from
5566 aggregate_value_p. We must return something, but it's not
5567 clear what's safe to return. Pretend it's a struct I
5572 /* Otherwise types must fit in one register. */
5573 return size > UNITS_PER_WORD;
5576 /* Return true if TYPE should be passed by invisible reference. */
5579 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5580 enum machine_mode mode,
5581 const_tree type ATTRIBUTE_UNUSED,
5582 bool named ATTRIBUTE_UNUSED)
5584 return mode == TFmode || mode == TCmode;
5587 /* Define how to find the value returned by a function. VALTYPE is the
5588 data type of the value (as a tree). If the precise function being
5589 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5590 MODE is set instead of VALTYPE for libcalls.
5592 On Alpha the value is found in $0 for integer functions and
5593 $f0 for floating-point functions. */
5596 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5597 enum machine_mode mode)
5599 unsigned int regnum, dummy ATTRIBUTE_UNUSED;
5600 enum mode_class mclass;
5602 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5605 mode = TYPE_MODE (valtype);
5607 mclass = GET_MODE_CLASS (mode);
5611 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5612 where we have them returning both SImode and DImode. */
5613 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5614 PROMOTE_MODE (mode, dummy, valtype);
5617 case MODE_COMPLEX_INT:
5618 case MODE_VECTOR_INT:
5626 case MODE_COMPLEX_FLOAT:
5628 enum machine_mode cmode = GET_MODE_INNER (mode);
5630 return gen_rtx_PARALLEL
5633 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5635 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5636 GEN_INT (GET_MODE_SIZE (cmode)))));
5640 /* We should only reach here for BLKmode on VMS. */
5641 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5649 return gen_rtx_REG (mode, regnum);
5652 /* TCmode complex values are passed by invisible reference. We
5653 should not split these values. */
5656 alpha_split_complex_arg (const_tree type)
5658 return TYPE_MODE (type) != TCmode;
5662 alpha_build_builtin_va_list (void)
5664 tree base, ofs, space, record, type_decl;
5666 if (TARGET_ABI_OPEN_VMS)
5667 return ptr_type_node;
5669 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5670 type_decl = build_decl (BUILTINS_LOCATION,
5671 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5672 TYPE_STUB_DECL (record) = type_decl;
5673 TYPE_NAME (record) = type_decl;
5675 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5677 /* Dummy field to prevent alignment warnings. */
5678 space = build_decl (BUILTINS_LOCATION,
5679 FIELD_DECL, NULL_TREE, integer_type_node);
5680 DECL_FIELD_CONTEXT (space) = record;
5681 DECL_ARTIFICIAL (space) = 1;
5682 DECL_IGNORED_P (space) = 1;
5684 ofs = build_decl (BUILTINS_LOCATION,
5685 FIELD_DECL, get_identifier ("__offset"),
5687 DECL_FIELD_CONTEXT (ofs) = record;
5688 DECL_CHAIN (ofs) = space;
5689 /* ??? This is a hack, __offset is marked volatile to prevent
5690 DCE that confuses stdarg optimization and results in
5691 gcc.c-torture/execute/stdarg-1.c failure. See PR 41089. */
5692 TREE_THIS_VOLATILE (ofs) = 1;
5694 base = build_decl (BUILTINS_LOCATION,
5695 FIELD_DECL, get_identifier ("__base"),
5697 DECL_FIELD_CONTEXT (base) = record;
5698 DECL_CHAIN (base) = ofs;
5700 TYPE_FIELDS (record) = base;
5701 layout_type (record);
5703 va_list_gpr_counter_field = ofs;
5708 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5709 and constant additions. */
5712 va_list_skip_additions (tree lhs)
5718 enum tree_code code;
5720 stmt = SSA_NAME_DEF_STMT (lhs);
5722 if (gimple_code (stmt) == GIMPLE_PHI)
5725 if (!is_gimple_assign (stmt)
5726 || gimple_assign_lhs (stmt) != lhs)
5729 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5731 code = gimple_assign_rhs_code (stmt);
5732 if (!CONVERT_EXPR_CODE_P (code)
5733 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5734 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5735 || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5738 lhs = gimple_assign_rhs1 (stmt);
5742 /* Check if LHS = RHS statement is
5743 LHS = *(ap.__base + ap.__offset + cst)
5746 + ((ap.__offset + cst <= 47)
5747 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5748 If the former, indicate that GPR registers are needed,
5749 if the latter, indicate that FPR registers are needed.
5751 Also look for LHS = (*ptr).field, where ptr is one of the forms
5754 On alpha, cfun->va_list_gpr_size is used as size of the needed
5755 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5756 registers are needed and bit 1 set if FPR registers are needed.
5757 Return true if va_list references should not be scanned for the
5758 current statement. */
5761 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
5763 tree base, offset, rhs;
5767 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5768 != GIMPLE_SINGLE_RHS)
5771 rhs = gimple_assign_rhs1 (stmt);
5772 while (handled_component_p (rhs))
5773 rhs = TREE_OPERAND (rhs, 0);
5774 if (TREE_CODE (rhs) != MEM_REF
5775 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5778 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5780 || !is_gimple_assign (stmt)
5781 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
5784 base = gimple_assign_rhs1 (stmt);
5785 if (TREE_CODE (base) == SSA_NAME)
5787 base_stmt = va_list_skip_additions (base);
5789 && is_gimple_assign (base_stmt)
5790 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5791 base = gimple_assign_rhs1 (base_stmt);
5794 if (TREE_CODE (base) != COMPONENT_REF
5795 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5797 base = gimple_assign_rhs2 (stmt);
5798 if (TREE_CODE (base) == SSA_NAME)
5800 base_stmt = va_list_skip_additions (base);
5802 && is_gimple_assign (base_stmt)
5803 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5804 base = gimple_assign_rhs1 (base_stmt);
5807 if (TREE_CODE (base) != COMPONENT_REF
5808 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5814 base = get_base_address (base);
5815 if (TREE_CODE (base) != VAR_DECL
5816 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5819 offset = gimple_op (stmt, 1 + offset_arg);
5820 if (TREE_CODE (offset) == SSA_NAME)
5822 gimple offset_stmt = va_list_skip_additions (offset);
5825 && gimple_code (offset_stmt) == GIMPLE_PHI)
5828 gimple arg1_stmt, arg2_stmt;
5830 enum tree_code code1, code2;
5832 if (gimple_phi_num_args (offset_stmt) != 2)
5836 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
5838 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
5839 if (arg1_stmt == NULL
5840 || !is_gimple_assign (arg1_stmt)
5841 || arg2_stmt == NULL
5842 || !is_gimple_assign (arg2_stmt))
5845 code1 = gimple_assign_rhs_code (arg1_stmt);
5846 code2 = gimple_assign_rhs_code (arg2_stmt);
5847 if (code1 == COMPONENT_REF
5848 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
5850 else if (code2 == COMPONENT_REF
5851 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
5853 gimple tem = arg1_stmt;
5855 arg1_stmt = arg2_stmt;
5861 if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
5864 sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
5865 if (code2 == MINUS_EXPR)
5867 if (sub < -48 || sub > -32)
5870 arg1 = gimple_assign_rhs1 (arg1_stmt);
5871 arg2 = gimple_assign_rhs1 (arg2_stmt);
5872 if (TREE_CODE (arg2) == SSA_NAME)
5874 arg2_stmt = va_list_skip_additions (arg2);
5875 if (arg2_stmt == NULL
5876 || !is_gimple_assign (arg2_stmt)
5877 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
5879 arg2 = gimple_assign_rhs1 (arg2_stmt);
5884 if (TREE_CODE (arg1) != COMPONENT_REF
5885 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5886 || get_base_address (arg1) != base)
5889 /* Need floating point regs. */
5890 cfun->va_list_fpr_size |= 2;
5894 && is_gimple_assign (offset_stmt)
5895 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
5896 offset = gimple_assign_rhs1 (offset_stmt);
5898 if (TREE_CODE (offset) != COMPONENT_REF
5899 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
5900 || get_base_address (offset) != base)
5903 /* Need general regs. */
5904 cfun->va_list_fpr_size |= 1;
5908 si->va_list_escapes = true;
5913 /* Perform any needed actions needed for a function that is receiving a
5914 variable number of arguments. */
5917 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
5918 tree type, int *pretend_size, int no_rtl)
5920 CUMULATIVE_ARGS cum = *pcum;
5922 /* Skip the current argument. */
5923 targetm.calls.function_arg_advance (&cum, mode, type, true);
5925 #if TARGET_ABI_OPEN_VMS
5926 /* For VMS, we allocate space for all 6 arg registers plus a count.
5928 However, if NO registers need to be saved, don't allocate any space.
5929 This is not only because we won't need the space, but because AP
5930 includes the current_pretend_args_size and we don't want to mess up
5931 any ap-relative addresses already made. */
5932 if (cum.num_args < 6)
5936 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
5937 emit_insn (gen_arg_home ());
5939 *pretend_size = 7 * UNITS_PER_WORD;
5942 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
5943 only push those that are remaining. However, if NO registers need to
5944 be saved, don't allocate any space. This is not only because we won't
5945 need the space, but because AP includes the current_pretend_args_size
5946 and we don't want to mess up any ap-relative addresses already made.
5948 If we are not to use the floating-point registers, save the integer
5949 registers where we would put the floating-point registers. This is
5950 not the most efficient way to implement varargs with just one register
5951 class, but it isn't worth doing anything more efficient in this rare
5959 alias_set_type set = get_varargs_alias_set ();
5962 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
5963 if (count > 6 - cum)
5966 /* Detect whether integer registers or floating-point registers
5967 are needed by the detected va_arg statements. See above for
5968 how these values are computed. Note that the "escape" value
5969 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
5971 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
5973 if (cfun->va_list_fpr_size & 1)
5975 tmp = gen_rtx_MEM (BLKmode,
5976 plus_constant (virtual_incoming_args_rtx,
5977 (cum + 6) * UNITS_PER_WORD));
5978 MEM_NOTRAP_P (tmp) = 1;
5979 set_mem_alias_set (tmp, set);
5980 move_block_from_reg (16 + cum, tmp, count);
5983 if (cfun->va_list_fpr_size & 2)
5985 tmp = gen_rtx_MEM (BLKmode,
5986 plus_constant (virtual_incoming_args_rtx,
5987 cum * UNITS_PER_WORD));
5988 MEM_NOTRAP_P (tmp) = 1;
5989 set_mem_alias_set (tmp, set);
5990 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
5993 *pretend_size = 12 * UNITS_PER_WORD;
5998 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6000 HOST_WIDE_INT offset;
6001 tree t, offset_field, base_field;
6003 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6006 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6007 up by 48, storing fp arg registers in the first 48 bytes, and the
6008 integer arg registers in the next 48 bytes. This is only done,
6009 however, if any integer registers need to be stored.
6011 If no integer registers need be stored, then we must subtract 48
6012 in order to account for the integer arg registers which are counted
6013 in argsize above, but which are not actually stored on the stack.
6014 Must further be careful here about structures straddling the last
6015 integer argument register; that futzes with pretend_args_size,
6016 which changes the meaning of AP. */
6019 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6021 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6023 if (TARGET_ABI_OPEN_VMS)
6025 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6026 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6027 size_int (offset + NUM_ARGS * UNITS_PER_WORD));
6028 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6029 TREE_SIDE_EFFECTS (t) = 1;
6030 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6034 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6035 offset_field = DECL_CHAIN (base_field);
6037 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6038 valist, base_field, NULL_TREE);
6039 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6040 valist, offset_field, NULL_TREE);
6042 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6043 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6045 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6046 TREE_SIDE_EFFECTS (t) = 1;
6047 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6049 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6050 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6051 TREE_SIDE_EFFECTS (t) = 1;
6052 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6057 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6060 tree type_size, ptr_type, addend, t, addr;
6061 gimple_seq internal_post;
6063 /* If the type could not be passed in registers, skip the block
6064 reserved for the registers. */
6065 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6067 t = build_int_cst (TREE_TYPE (offset), 6*8);
6068 gimplify_assign (offset,
6069 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6074 ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
6076 if (TREE_CODE (type) == COMPLEX_TYPE)
6078 tree real_part, imag_part, real_temp;
6080 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6083 /* Copy the value into a new temporary, lest the formal temporary
6084 be reused out from under us. */
6085 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6087 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6090 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6092 else if (TREE_CODE (type) == REAL_TYPE)
6094 tree fpaddend, cond, fourtyeight;
6096 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6097 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6098 addend, fourtyeight);
6099 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6100 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6104 /* Build the final address and force that value into a temporary. */
6105 addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6106 fold_convert (sizetype, addend));
6107 internal_post = NULL;
6108 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6109 gimple_seq_add_seq (pre_p, internal_post);
6111 /* Update the offset field. */
6112 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6113 if (type_size == NULL || TREE_OVERFLOW (type_size))
6117 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6118 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6119 t = size_binop (MULT_EXPR, t, size_int (8));
6121 t = fold_convert (TREE_TYPE (offset), t);
6122 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6125 return build_va_arg_indirect_ref (addr);
6129 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6132 tree offset_field, base_field, offset, base, t, r;
6135 if (TARGET_ABI_OPEN_VMS)
6136 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6138 base_field = TYPE_FIELDS (va_list_type_node);
6139 offset_field = DECL_CHAIN (base_field);
6140 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6141 valist, base_field, NULL_TREE);
6142 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6143 valist, offset_field, NULL_TREE);
6145 /* Pull the fields of the structure out into temporaries. Since we never
6146 modify the base field, we can use a formal temporary. Sign-extend the
6147 offset field so that it's the proper width for pointer arithmetic. */
6148 base = get_formal_tmp_var (base_field, pre_p);
6150 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6151 offset = get_initialized_tmp_var (t, pre_p, NULL);
6153 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6155 type = build_pointer_type_for_mode (type, ptr_mode, true);
6157 /* Find the value. Note that this will be a stable indirection, or
6158 a composite of stable indirections in the case of complex. */
6159 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6161 /* Stuff the offset temporary back into its field. */
6162 gimplify_assign (unshare_expr (offset_field),
6163 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6166 r = build_va_arg_indirect_ref (r);
6175 ALPHA_BUILTIN_CMPBGE,
6176 ALPHA_BUILTIN_EXTBL,
6177 ALPHA_BUILTIN_EXTWL,
6178 ALPHA_BUILTIN_EXTLL,
6179 ALPHA_BUILTIN_EXTQL,
6180 ALPHA_BUILTIN_EXTWH,
6181 ALPHA_BUILTIN_EXTLH,
6182 ALPHA_BUILTIN_EXTQH,
6183 ALPHA_BUILTIN_INSBL,
6184 ALPHA_BUILTIN_INSWL,
6185 ALPHA_BUILTIN_INSLL,
6186 ALPHA_BUILTIN_INSQL,
6187 ALPHA_BUILTIN_INSWH,
6188 ALPHA_BUILTIN_INSLH,
6189 ALPHA_BUILTIN_INSQH,
6190 ALPHA_BUILTIN_MSKBL,
6191 ALPHA_BUILTIN_MSKWL,
6192 ALPHA_BUILTIN_MSKLL,
6193 ALPHA_BUILTIN_MSKQL,
6194 ALPHA_BUILTIN_MSKWH,
6195 ALPHA_BUILTIN_MSKLH,
6196 ALPHA_BUILTIN_MSKQH,
6197 ALPHA_BUILTIN_UMULH,
6199 ALPHA_BUILTIN_ZAPNOT,
6200 ALPHA_BUILTIN_AMASK,
6201 ALPHA_BUILTIN_IMPLVER,
6203 ALPHA_BUILTIN_THREAD_POINTER,
6204 ALPHA_BUILTIN_SET_THREAD_POINTER,
6205 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6206 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6209 ALPHA_BUILTIN_MINUB8,
6210 ALPHA_BUILTIN_MINSB8,
6211 ALPHA_BUILTIN_MINUW4,
6212 ALPHA_BUILTIN_MINSW4,
6213 ALPHA_BUILTIN_MAXUB8,
6214 ALPHA_BUILTIN_MAXSB8,
6215 ALPHA_BUILTIN_MAXUW4,
6216 ALPHA_BUILTIN_MAXSW4,
6220 ALPHA_BUILTIN_UNPKBL,
6221 ALPHA_BUILTIN_UNPKBW,
6226 ALPHA_BUILTIN_CTPOP,
6231 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6232 CODE_FOR_builtin_cmpbge,
6240 CODE_FOR_builtin_insbl,
6241 CODE_FOR_builtin_inswl,
6242 CODE_FOR_builtin_insll,
6254 CODE_FOR_umuldi3_highpart,
6255 CODE_FOR_builtin_zap,
6256 CODE_FOR_builtin_zapnot,
6257 CODE_FOR_builtin_amask,
6258 CODE_FOR_builtin_implver,
6259 CODE_FOR_builtin_rpcc,
6262 CODE_FOR_builtin_establish_vms_condition_handler,
6263 CODE_FOR_builtin_revert_vms_condition_handler,
6266 CODE_FOR_builtin_minub8,
6267 CODE_FOR_builtin_minsb8,
6268 CODE_FOR_builtin_minuw4,
6269 CODE_FOR_builtin_minsw4,
6270 CODE_FOR_builtin_maxub8,
6271 CODE_FOR_builtin_maxsb8,
6272 CODE_FOR_builtin_maxuw4,
6273 CODE_FOR_builtin_maxsw4,
6274 CODE_FOR_builtin_perr,
6275 CODE_FOR_builtin_pklb,
6276 CODE_FOR_builtin_pkwb,
6277 CODE_FOR_builtin_unpkbl,
6278 CODE_FOR_builtin_unpkbw,
6283 CODE_FOR_popcountdi2
6286 struct alpha_builtin_def
6289 enum alpha_builtin code;
6290 unsigned int target_mask;
6294 static struct alpha_builtin_def const zero_arg_builtins[] = {
6295 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6296 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6299 static struct alpha_builtin_def const one_arg_builtins[] = {
6300 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6301 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6302 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6303 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6304 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6305 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6306 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6307 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6310 static struct alpha_builtin_def const two_arg_builtins[] = {
6311 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6312 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6313 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6314 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6315 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6316 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6317 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6318 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6319 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6320 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6321 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6322 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6323 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6324 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6325 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6326 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6327 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6328 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6329 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6330 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6331 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6332 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6333 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6334 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6335 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6336 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6337 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6338 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6339 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6340 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6341 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6342 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6343 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6344 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6347 static GTY(()) tree alpha_v8qi_u;
6348 static GTY(()) tree alpha_v8qi_s;
6349 static GTY(()) tree alpha_v4hi_u;
6350 static GTY(()) tree alpha_v4hi_s;
6352 static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6354 /* Return the alpha builtin for CODE. */
6357 alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6359 if (code >= ALPHA_BUILTIN_max)
6360 return error_mark_node;
6361 return alpha_builtins[code];
6364 /* Helper function of alpha_init_builtins. Add the built-in specified
6365 by NAME, TYPE, CODE, and ECF. */
6368 alpha_builtin_function (const char *name, tree ftype,
6369 enum alpha_builtin code, unsigned ecf)
6371 tree decl = add_builtin_function (name, ftype, (int) code,
6372 BUILT_IN_MD, NULL, NULL_TREE);
6374 if (ecf & ECF_CONST)
6375 TREE_READONLY (decl) = 1;
6376 if (ecf & ECF_NOTHROW)
6377 TREE_NOTHROW (decl) = 1;
6379 alpha_builtins [(int) code] = decl;
6382 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6383 functions pointed to by P, with function type FTYPE. */
6386 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6391 for (i = 0; i < count; ++i, ++p)
6392 if ((target_flags & p->target_mask) == p->target_mask)
6393 alpha_builtin_function (p->name, ftype, p->code,
6394 (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
6398 alpha_init_builtins (void)
6400 tree dimode_integer_type_node;
6403 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6405 /* Fwrite on VMS is non-standard. */
6406 #if TARGET_ABI_OPEN_VMS
6407 implicit_built_in_decls[(int) BUILT_IN_FWRITE] = NULL_TREE;
6408 implicit_built_in_decls[(int) BUILT_IN_FWRITE_UNLOCKED] = NULL_TREE;
6411 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6412 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6415 ftype = build_function_type_list (dimode_integer_type_node,
6416 dimode_integer_type_node, NULL_TREE);
6417 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6420 ftype = build_function_type_list (dimode_integer_type_node,
6421 dimode_integer_type_node,
6422 dimode_integer_type_node, NULL_TREE);
6423 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6426 ftype = build_function_type (ptr_type_node, void_list_node);
6427 alpha_builtin_function ("__builtin_thread_pointer", ftype,
6428 ALPHA_BUILTIN_THREAD_POINTER, ECF_NOTHROW);
6430 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6431 alpha_builtin_function ("__builtin_set_thread_pointer", ftype,
6432 ALPHA_BUILTIN_SET_THREAD_POINTER, ECF_NOTHROW);
6434 if (TARGET_ABI_OPEN_VMS)
6436 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6438 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6440 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6443 ftype = build_function_type_list (ptr_type_node, void_type_node,
6445 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6446 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
6449 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6450 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6451 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6452 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6455 /* Expand an expression EXP that calls a built-in function,
6456 with result going to TARGET if that's convenient
6457 (and in mode MODE if that's convenient).
6458 SUBTARGET may be used as the target for computing one of EXP's operands.
6459 IGNORE is nonzero if the value is to be ignored. */
6462 alpha_expand_builtin (tree exp, rtx target,
6463 rtx subtarget ATTRIBUTE_UNUSED,
6464 enum machine_mode mode ATTRIBUTE_UNUSED,
6465 int ignore ATTRIBUTE_UNUSED)
6469 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6470 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6472 call_expr_arg_iterator iter;
6473 enum insn_code icode;
6474 rtx op[MAX_ARGS], pat;
6478 if (fcode >= ALPHA_BUILTIN_max)
6479 internal_error ("bad builtin fcode");
6480 icode = code_for_builtin[fcode];
6482 internal_error ("bad builtin fcode");
6484 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6487 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6489 const struct insn_operand_data *insn_op;
6491 if (arg == error_mark_node)
6493 if (arity > MAX_ARGS)
6496 insn_op = &insn_data[icode].operand[arity + nonvoid];
6498 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6500 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6501 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6507 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6509 || GET_MODE (target) != tmode
6510 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6511 target = gen_reg_rtx (tmode);
6517 pat = GEN_FCN (icode) (target);
6521 pat = GEN_FCN (icode) (target, op[0]);
6523 pat = GEN_FCN (icode) (op[0]);
6526 pat = GEN_FCN (icode) (target, op[0], op[1]);
6542 /* Several bits below assume HWI >= 64 bits. This should be enforced
6544 #if HOST_BITS_PER_WIDE_INT < 64
6545 # error "HOST_WIDE_INT too small"
6548 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6549 with an 8-bit output vector. OPINT contains the integer operands; bit N
6550 of OP_CONST is set if OPINT[N] is valid. */
6553 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6558 for (i = 0, val = 0; i < 8; ++i)
6560 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6561 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6565 return build_int_cst (long_integer_type_node, val);
6567 else if (op_const == 2 && opint[1] == 0)
6568 return build_int_cst (long_integer_type_node, 0xff);
6572 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6573 specialized form of an AND operation. Other byte manipulation instructions
6574 are defined in terms of this instruction, so this is also used as a
6575 subroutine for other builtins.
6577 OP contains the tree operands; OPINT contains the extracted integer values.
6578 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6579 OPINT may be considered. */
6582 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6587 unsigned HOST_WIDE_INT mask = 0;
6590 for (i = 0; i < 8; ++i)
6591 if ((opint[1] >> i) & 1)
6592 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6595 return build_int_cst (long_integer_type_node, opint[0] & mask);
6598 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6599 build_int_cst (long_integer_type_node, mask));
6601 else if ((op_const & 1) && opint[0] == 0)
6602 return build_int_cst (long_integer_type_node, 0);
6606 /* Fold the builtins for the EXT family of instructions. */
6609 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6610 long op_const, unsigned HOST_WIDE_INT bytemask,
6614 tree *zap_op = NULL;
6618 unsigned HOST_WIDE_INT loc;
6621 loc *= BITS_PER_UNIT;
6627 unsigned HOST_WIDE_INT temp = opint[0];
6640 opint[1] = bytemask;
6641 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6644 /* Fold the builtins for the INS family of instructions. */
6647 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6648 long op_const, unsigned HOST_WIDE_INT bytemask,
6651 if ((op_const & 1) && opint[0] == 0)
6652 return build_int_cst (long_integer_type_node, 0);
6656 unsigned HOST_WIDE_INT temp, loc, byteloc;
6657 tree *zap_op = NULL;
6665 byteloc = (64 - (loc * 8)) & 0x3f;
6682 opint[1] = bytemask;
6683 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6690 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6691 long op_const, unsigned HOST_WIDE_INT bytemask,
6696 unsigned HOST_WIDE_INT loc;
6704 opint[1] = bytemask ^ 0xff;
6707 return alpha_fold_builtin_zapnot (op, opint, op_const);
6711 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6717 unsigned HOST_WIDE_INT l;
6720 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6722 #if HOST_BITS_PER_WIDE_INT > 64
6726 return build_int_cst (long_integer_type_node, h);
6730 opint[1] = opint[0];
6733 /* Note that (X*1) >> 64 == 0. */
6734 if (opint[1] == 0 || opint[1] == 1)
6735 return build_int_cst (long_integer_type_node, 0);
6742 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6744 tree op0 = fold_convert (vtype, op[0]);
6745 tree op1 = fold_convert (vtype, op[1]);
6746 tree val = fold_build2 (code, vtype, op0, op1);
6747 return fold_build1 (VIEW_CONVERT_EXPR, long_integer_type_node, val);
6751 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6753 unsigned HOST_WIDE_INT temp = 0;
6759 for (i = 0; i < 8; ++i)
6761 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6762 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6769 return build_int_cst (long_integer_type_node, temp);
6773 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6775 unsigned HOST_WIDE_INT temp;
6780 temp = opint[0] & 0xff;
6781 temp |= (opint[0] >> 24) & 0xff00;
6783 return build_int_cst (long_integer_type_node, temp);
6787 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6789 unsigned HOST_WIDE_INT temp;
6794 temp = opint[0] & 0xff;
6795 temp |= (opint[0] >> 8) & 0xff00;
6796 temp |= (opint[0] >> 16) & 0xff0000;
6797 temp |= (opint[0] >> 24) & 0xff000000;
6799 return build_int_cst (long_integer_type_node, temp);
6803 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6805 unsigned HOST_WIDE_INT temp;
6810 temp = opint[0] & 0xff;
6811 temp |= (opint[0] & 0xff00) << 24;
6813 return build_int_cst (long_integer_type_node, temp);
6817 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6819 unsigned HOST_WIDE_INT temp;
6824 temp = opint[0] & 0xff;
6825 temp |= (opint[0] & 0x0000ff00) << 8;
6826 temp |= (opint[0] & 0x00ff0000) << 16;
6827 temp |= (opint[0] & 0xff000000) << 24;
6829 return build_int_cst (long_integer_type_node, temp);
6833 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6835 unsigned HOST_WIDE_INT temp;
6843 temp = exact_log2 (opint[0] & -opint[0]);
6845 return build_int_cst (long_integer_type_node, temp);
6849 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6851 unsigned HOST_WIDE_INT temp;
6859 temp = 64 - floor_log2 (opint[0]) - 1;
6861 return build_int_cst (long_integer_type_node, temp);
6865 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6867 unsigned HOST_WIDE_INT temp, op;
6875 temp++, op &= op - 1;
6877 return build_int_cst (long_integer_type_node, temp);
6880 /* Fold one of our builtin functions. */
6883 alpha_fold_builtin (tree fndecl, int n_args, tree *op,
6884 bool ignore ATTRIBUTE_UNUSED)
6886 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6890 if (n_args >= MAX_ARGS)
6893 for (i = 0; i < n_args; i++)
6896 if (arg == error_mark_node)
6900 if (TREE_CODE (arg) == INTEGER_CST)
6902 op_const |= 1L << i;
6903 opint[i] = int_cst_value (arg);
6907 switch (DECL_FUNCTION_CODE (fndecl))
6909 case ALPHA_BUILTIN_CMPBGE:
6910 return alpha_fold_builtin_cmpbge (opint, op_const);
6912 case ALPHA_BUILTIN_EXTBL:
6913 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6914 case ALPHA_BUILTIN_EXTWL:
6915 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6916 case ALPHA_BUILTIN_EXTLL:
6917 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6918 case ALPHA_BUILTIN_EXTQL:
6919 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6920 case ALPHA_BUILTIN_EXTWH:
6921 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6922 case ALPHA_BUILTIN_EXTLH:
6923 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
6924 case ALPHA_BUILTIN_EXTQH:
6925 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
6927 case ALPHA_BUILTIN_INSBL:
6928 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
6929 case ALPHA_BUILTIN_INSWL:
6930 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
6931 case ALPHA_BUILTIN_INSLL:
6932 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
6933 case ALPHA_BUILTIN_INSQL:
6934 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
6935 case ALPHA_BUILTIN_INSWH:
6936 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
6937 case ALPHA_BUILTIN_INSLH:
6938 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
6939 case ALPHA_BUILTIN_INSQH:
6940 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
6942 case ALPHA_BUILTIN_MSKBL:
6943 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
6944 case ALPHA_BUILTIN_MSKWL:
6945 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
6946 case ALPHA_BUILTIN_MSKLL:
6947 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
6948 case ALPHA_BUILTIN_MSKQL:
6949 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
6950 case ALPHA_BUILTIN_MSKWH:
6951 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
6952 case ALPHA_BUILTIN_MSKLH:
6953 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
6954 case ALPHA_BUILTIN_MSKQH:
6955 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
6957 case ALPHA_BUILTIN_UMULH:
6958 return alpha_fold_builtin_umulh (opint, op_const);
6960 case ALPHA_BUILTIN_ZAP:
6963 case ALPHA_BUILTIN_ZAPNOT:
6964 return alpha_fold_builtin_zapnot (op, opint, op_const);
6966 case ALPHA_BUILTIN_MINUB8:
6967 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
6968 case ALPHA_BUILTIN_MINSB8:
6969 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
6970 case ALPHA_BUILTIN_MINUW4:
6971 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
6972 case ALPHA_BUILTIN_MINSW4:
6973 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
6974 case ALPHA_BUILTIN_MAXUB8:
6975 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
6976 case ALPHA_BUILTIN_MAXSB8:
6977 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
6978 case ALPHA_BUILTIN_MAXUW4:
6979 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
6980 case ALPHA_BUILTIN_MAXSW4:
6981 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
6983 case ALPHA_BUILTIN_PERR:
6984 return alpha_fold_builtin_perr (opint, op_const);
6985 case ALPHA_BUILTIN_PKLB:
6986 return alpha_fold_builtin_pklb (opint, op_const);
6987 case ALPHA_BUILTIN_PKWB:
6988 return alpha_fold_builtin_pkwb (opint, op_const);
6989 case ALPHA_BUILTIN_UNPKBL:
6990 return alpha_fold_builtin_unpkbl (opint, op_const);
6991 case ALPHA_BUILTIN_UNPKBW:
6992 return alpha_fold_builtin_unpkbw (opint, op_const);
6994 case ALPHA_BUILTIN_CTTZ:
6995 return alpha_fold_builtin_cttz (opint, op_const);
6996 case ALPHA_BUILTIN_CTLZ:
6997 return alpha_fold_builtin_ctlz (opint, op_const);
6998 case ALPHA_BUILTIN_CTPOP:
6999 return alpha_fold_builtin_ctpop (opint, op_const);
7001 case ALPHA_BUILTIN_AMASK:
7002 case ALPHA_BUILTIN_IMPLVER:
7003 case ALPHA_BUILTIN_RPCC:
7004 case ALPHA_BUILTIN_THREAD_POINTER:
7005 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7006 /* None of these are foldable at compile-time. */
7012 /* This page contains routines that are used to determine what the function
7013 prologue and epilogue code will do and write them out. */
7015 /* Compute the size of the save area in the stack. */
7017 /* These variables are used for communication between the following functions.
7018 They indicate various things about the current function being compiled
7019 that are used to tell what kind of prologue, epilogue and procedure
7020 descriptor to generate. */
7022 /* Nonzero if we need a stack procedure. */
7023 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7024 static enum alpha_procedure_types alpha_procedure_type;
7026 /* Register number (either FP or SP) that is used to unwind the frame. */
7027 static int vms_unwind_regno;
7029 /* Register number used to save FP. We need not have one for RA since
7030 we don't modify it for register procedures. This is only defined
7031 for register frame procedures. */
7032 static int vms_save_fp_regno;
7034 /* Register number used to reference objects off our PV. */
7035 static int vms_base_regno;
7037 /* Compute register masks for saved registers. */
7040 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7042 unsigned long imask = 0;
7043 unsigned long fmask = 0;
7046 /* When outputting a thunk, we don't have valid register life info,
7047 but assemble_start_function wants to output .frame and .mask
7056 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7057 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7059 /* One for every register we have to save. */
7060 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7061 if (! fixed_regs[i] && ! call_used_regs[i]
7062 && df_regs_ever_live_p (i) && i != REG_RA)
7065 imask |= (1UL << i);
7067 fmask |= (1UL << (i - 32));
7070 /* We need to restore these for the handler. */
7071 if (crtl->calls_eh_return)
7075 unsigned regno = EH_RETURN_DATA_REGNO (i);
7076 if (regno == INVALID_REGNUM)
7078 imask |= 1UL << regno;
7082 /* If any register spilled, then spill the return address also. */
7083 /* ??? This is required by the Digital stack unwind specification
7084 and isn't needed if we're doing Dwarf2 unwinding. */
7085 if (imask || fmask || alpha_ra_ever_killed ())
7086 imask |= (1UL << REG_RA);
7093 alpha_sa_size (void)
7095 unsigned long mask[2];
7099 alpha_sa_mask (&mask[0], &mask[1]);
7101 for (j = 0; j < 2; ++j)
7102 for (i = 0; i < 32; ++i)
7103 if ((mask[j] >> i) & 1)
7106 if (TARGET_ABI_OPEN_VMS)
7108 /* Start with a stack procedure if we make any calls (REG_RA used), or
7109 need a frame pointer, with a register procedure if we otherwise need
7110 at least a slot, and with a null procedure in other cases. */
7111 if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7112 alpha_procedure_type = PT_STACK;
7113 else if (get_frame_size() != 0)
7114 alpha_procedure_type = PT_REGISTER;
7116 alpha_procedure_type = PT_NULL;
7118 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7119 made the final decision on stack procedure vs register procedure. */
7120 if (alpha_procedure_type == PT_STACK)
7123 /* Decide whether to refer to objects off our PV via FP or PV.
7124 If we need FP for something else or if we receive a nonlocal
7125 goto (which expects PV to contain the value), we must use PV.
7126 Otherwise, start by assuming we can use FP. */
7129 = (frame_pointer_needed
7130 || cfun->has_nonlocal_label
7131 || alpha_procedure_type == PT_STACK
7132 || crtl->outgoing_args_size)
7133 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7135 /* If we want to copy PV into FP, we need to find some register
7136 in which to save FP. */
7138 vms_save_fp_regno = -1;
7139 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7140 for (i = 0; i < 32; i++)
7141 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7142 vms_save_fp_regno = i;
7144 /* A VMS condition handler requires a stack procedure in our
7145 implementation. (not required by the calling standard). */
7146 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7147 || cfun->machine->uses_condition_handler)
7148 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7149 else if (alpha_procedure_type == PT_NULL)
7150 vms_base_regno = REG_PV;
7152 /* Stack unwinding should be done via FP unless we use it for PV. */
7153 vms_unwind_regno = (vms_base_regno == REG_PV
7154 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7156 /* If this is a stack procedure, allow space for saving FP, RA and
7157 a condition handler slot if needed. */
7158 if (alpha_procedure_type == PT_STACK)
7159 sa_size += 2 + cfun->machine->uses_condition_handler;
7163 /* Our size must be even (multiple of 16 bytes). */
7171 /* Define the offset between two registers, one to be eliminated,
7172 and the other its replacement, at the start of a routine. */
7175 alpha_initial_elimination_offset (unsigned int from,
7176 unsigned int to ATTRIBUTE_UNUSED)
7180 ret = alpha_sa_size ();
7181 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7185 case FRAME_POINTER_REGNUM:
7188 case ARG_POINTER_REGNUM:
7189 ret += (ALPHA_ROUND (get_frame_size ()
7190 + crtl->args.pretend_args_size)
7191 - crtl->args.pretend_args_size);
7201 #if TARGET_ABI_OPEN_VMS
7203 /* Worker function for TARGET_CAN_ELIMINATE. */
7206 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
7208 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7211 switch (alpha_procedure_type)
7214 /* NULL procedures have no frame of their own and we only
7215 know how to resolve from the current stack pointer. */
7216 return to == STACK_POINTER_REGNUM;
7220 /* We always eliminate except to the stack pointer if there is no
7221 usable frame pointer at hand. */
7222 return (to != STACK_POINTER_REGNUM
7223 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7229 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7230 designates the same location as FROM. */
7233 alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7235 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7236 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7237 on the proper computations and will need the register save area size
7240 HOST_WIDE_INT sa_size = alpha_sa_size ();
7242 /* PT_NULL procedures have no frame of their own and we only allow
7243 elimination to the stack pointer. This is the argument pointer and we
7244 resolve the soft frame pointer to that as well. */
7246 if (alpha_procedure_type == PT_NULL)
7249 /* For a PT_STACK procedure the frame layout looks as follows
7251 -----> decreasing addresses
7253 < size rounded up to 16 | likewise >
7254 --------------#------------------------------+++--------------+++-------#
7255 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7256 --------------#---------------------------------------------------------#
7258 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7261 PT_REGISTER procedures are similar in that they may have a frame of their
7262 own. They have no regs-sa/pv/outgoing-args area.
7264 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7265 to STACK_PTR if need be. */
7268 HOST_WIDE_INT offset;
7269 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7273 case FRAME_POINTER_REGNUM:
7274 offset = ALPHA_ROUND (sa_size + pv_save_size);
7276 case ARG_POINTER_REGNUM:
7277 offset = (ALPHA_ROUND (sa_size + pv_save_size
7279 + crtl->args.pretend_args_size)
7280 - crtl->args.pretend_args_size);
7286 if (to == STACK_POINTER_REGNUM)
7287 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7293 #define COMMON_OBJECT "common_object"
7296 common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7297 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7298 bool *no_add_attrs ATTRIBUTE_UNUSED)
7301 gcc_assert (DECL_P (decl));
7303 DECL_COMMON (decl) = 1;
7307 static const struct attribute_spec vms_attribute_table[] =
7309 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
7310 affects_type_identity } */
7311 { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler, false },
7312 { NULL, 0, 0, false, false, false, NULL, false }
7316 vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7317 unsigned HOST_WIDE_INT size,
7320 tree attr = DECL_ATTRIBUTES (decl);
7321 fprintf (file, "%s", COMMON_ASM_OP);
7322 assemble_name (file, name);
7323 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7324 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7325 fprintf (file, ",%u", align / BITS_PER_UNIT);
7328 attr = lookup_attribute (COMMON_OBJECT, attr);
7330 fprintf (file, ",%s",
7331 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7336 #undef COMMON_OBJECT
7341 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7343 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7347 alpha_find_lo_sum_using_gp (rtx insn)
7349 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7353 alpha_does_function_need_gp (void)
7357 /* The GP being variable is an OSF abi thing. */
7358 if (! TARGET_ABI_OSF)
7361 /* We need the gp to load the address of __mcount. */
7362 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7365 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7369 /* The nonlocal receiver pattern assumes that the gp is valid for
7370 the nested function. Reasonable because it's almost always set
7371 correctly already. For the cases where that's wrong, make sure
7372 the nested function loads its gp on entry. */
7373 if (crtl->has_nonlocal_goto)
7376 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7377 Even if we are a static function, we still need to do this in case
7378 our address is taken and passed to something like qsort. */
7380 push_topmost_sequence ();
7381 insn = get_insns ();
7382 pop_topmost_sequence ();
7384 for (; insn; insn = NEXT_INSN (insn))
7385 if (NONDEBUG_INSN_P (insn)
7386 && ! JUMP_TABLE_DATA_P (insn)
7387 && GET_CODE (PATTERN (insn)) != USE
7388 && GET_CODE (PATTERN (insn)) != CLOBBER
7389 && get_attr_usegp (insn))
7396 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7400 set_frame_related_p (void)
7402 rtx seq = get_insns ();
7413 while (insn != NULL_RTX)
7415 RTX_FRAME_RELATED_P (insn) = 1;
7416 insn = NEXT_INSN (insn);
7418 seq = emit_insn (seq);
7422 seq = emit_insn (seq);
7423 RTX_FRAME_RELATED_P (seq) = 1;
7428 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7430 /* Generates a store with the proper unwind info attached. VALUE is
7431 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7432 contains SP+FRAME_BIAS, and that is the unwind info that should be
7433 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7434 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7437 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7438 HOST_WIDE_INT base_ofs, rtx frame_reg)
7440 rtx addr, mem, insn;
7442 addr = plus_constant (base_reg, base_ofs);
7443 mem = gen_frame_mem (DImode, addr);
7445 insn = emit_move_insn (mem, value);
7446 RTX_FRAME_RELATED_P (insn) = 1;
7448 if (frame_bias || value != frame_reg)
7452 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7453 mem = gen_rtx_MEM (DImode, addr);
7456 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7457 gen_rtx_SET (VOIDmode, mem, frame_reg));
7462 emit_frame_store (unsigned int regno, rtx base_reg,
7463 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7465 rtx reg = gen_rtx_REG (DImode, regno);
7466 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7469 /* Compute the frame size. SIZE is the size of the "naked" frame
7470 and SA_SIZE is the size of the register save area. */
7472 static HOST_WIDE_INT
7473 compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
7475 if (TARGET_ABI_OPEN_VMS)
7476 return ALPHA_ROUND (sa_size
7477 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7479 + crtl->args.pretend_args_size);
7481 return ALPHA_ROUND (crtl->outgoing_args_size)
7484 + crtl->args.pretend_args_size);
7487 /* Write function prologue. */
7489 /* On vms we have two kinds of functions:
7491 - stack frame (PROC_STACK)
7492 these are 'normal' functions with local vars and which are
7493 calling other functions
7494 - register frame (PROC_REGISTER)
7495 keeps all data in registers, needs no stack
7497 We must pass this to the assembler so it can generate the
7498 proper pdsc (procedure descriptor)
7499 This is done with the '.pdesc' command.
7501 On not-vms, we don't really differentiate between the two, as we can
7502 simply allocate stack without saving registers. */
7505 alpha_expand_prologue (void)
7507 /* Registers to save. */
7508 unsigned long imask = 0;
7509 unsigned long fmask = 0;
7510 /* Stack space needed for pushing registers clobbered by us. */
7511 HOST_WIDE_INT sa_size, sa_bias;
7512 /* Complete stack size needed. */
7513 HOST_WIDE_INT frame_size;
7514 /* Probed stack size; it additionally includes the size of
7515 the "reserve region" if any. */
7516 HOST_WIDE_INT probed_size;
7517 /* Offset from base reg to register save area. */
7518 HOST_WIDE_INT reg_offset;
7522 sa_size = alpha_sa_size ();
7523 frame_size = compute_frame_size (get_frame_size (), sa_size);
7525 if (flag_stack_usage)
7526 current_function_static_stack_size = frame_size;
7528 if (TARGET_ABI_OPEN_VMS)
7529 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7531 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7533 alpha_sa_mask (&imask, &fmask);
7535 /* Emit an insn to reload GP, if needed. */
7538 alpha_function_needs_gp = alpha_does_function_need_gp ();
7539 if (alpha_function_needs_gp)
7540 emit_insn (gen_prologue_ldgp ());
7543 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7544 the call to mcount ourselves, rather than having the linker do it
7545 magically in response to -pg. Since _mcount has special linkage,
7546 don't represent the call as a call. */
7547 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7548 emit_insn (gen_prologue_mcount ());
7550 /* Adjust the stack by the frame size. If the frame size is > 4096
7551 bytes, we need to be sure we probe somewhere in the first and last
7552 4096 bytes (we can probably get away without the latter test) and
7553 every 8192 bytes in between. If the frame size is > 32768, we
7554 do this in a loop. Otherwise, we generate the explicit probe
7557 Note that we are only allowed to adjust sp once in the prologue. */
7559 probed_size = frame_size;
7560 if (flag_stack_check)
7561 probed_size += STACK_CHECK_PROTECT;
7563 if (probed_size <= 32768)
7565 if (probed_size > 4096)
7569 for (probed = 4096; probed < probed_size; probed += 8192)
7570 emit_insn (gen_probe_stack (GEN_INT (-probed)));
7572 /* We only have to do this probe if we aren't saving registers or
7573 if we are probing beyond the frame because of -fstack-check. */
7574 if ((sa_size == 0 && probed_size > probed - 4096)
7575 || flag_stack_check)
7576 emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
7579 if (frame_size != 0)
7580 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7581 GEN_INT (-frame_size))));
7585 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7586 number of 8192 byte blocks to probe. We then probe each block
7587 in the loop and then set SP to the proper location. If the
7588 amount remaining is > 4096, we have to do one more probe if we
7589 are not saving any registers or if we are probing beyond the
7590 frame because of -fstack-check. */
7592 HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7593 HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
7594 rtx ptr = gen_rtx_REG (DImode, 22);
7595 rtx count = gen_rtx_REG (DImode, 23);
7598 emit_move_insn (count, GEN_INT (blocks));
7599 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
7601 /* Because of the difficulty in emitting a new basic block this
7602 late in the compilation, generate the loop as a single insn. */
7603 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7605 if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
7607 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7608 MEM_VOLATILE_P (last) = 1;
7609 emit_move_insn (last, const0_rtx);
7612 if (flag_stack_check)
7614 /* If -fstack-check is specified we have to load the entire
7615 constant into a register and subtract from the sp in one go,
7616 because the probed stack size is not equal to the frame size. */
7617 HOST_WIDE_INT lo, hi;
7618 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7619 hi = frame_size - lo;
7621 emit_move_insn (ptr, GEN_INT (hi));
7622 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7623 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7628 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7629 GEN_INT (-leftover)));
7632 /* This alternative is special, because the DWARF code cannot
7633 possibly intuit through the loop above. So we invent this
7634 note it looks at instead. */
7635 RTX_FRAME_RELATED_P (seq) = 1;
7636 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7637 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7638 plus_constant (stack_pointer_rtx,
7642 /* Cope with very large offsets to the register save area. */
7644 sa_reg = stack_pointer_rtx;
7645 if (reg_offset + sa_size > 0x8000)
7647 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7650 if (low + sa_size <= 0x8000)
7651 sa_bias = reg_offset - low, reg_offset = low;
7653 sa_bias = reg_offset, reg_offset = 0;
7655 sa_reg = gen_rtx_REG (DImode, 24);
7656 sa_bias_rtx = GEN_INT (sa_bias);
7658 if (add_operand (sa_bias_rtx, DImode))
7659 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7662 emit_move_insn (sa_reg, sa_bias_rtx);
7663 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7667 /* Save regs in stack order. Beginning with VMS PV. */
7668 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7669 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7671 /* Save register RA next. */
7672 if (imask & (1UL << REG_RA))
7674 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7675 imask &= ~(1UL << REG_RA);
7679 /* Now save any other registers required to be saved. */
7680 for (i = 0; i < 31; i++)
7681 if (imask & (1UL << i))
7683 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7687 for (i = 0; i < 31; i++)
7688 if (fmask & (1UL << i))
7690 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7694 if (TARGET_ABI_OPEN_VMS)
7696 /* Register frame procedures save the fp. */
7697 if (alpha_procedure_type == PT_REGISTER)
7699 rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7700 hard_frame_pointer_rtx);
7701 add_reg_note (insn, REG_CFA_REGISTER, NULL);
7702 RTX_FRAME_RELATED_P (insn) = 1;
7705 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7706 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7707 gen_rtx_REG (DImode, REG_PV)));
7709 if (alpha_procedure_type != PT_NULL
7710 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7711 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7713 /* If we have to allocate space for outgoing args, do it now. */
7714 if (crtl->outgoing_args_size != 0)
7717 = emit_move_insn (stack_pointer_rtx,
7719 (hard_frame_pointer_rtx,
7721 (crtl->outgoing_args_size))));
7723 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7724 if ! frame_pointer_needed. Setting the bit will change the CFA
7725 computation rule to use sp again, which would be wrong if we had
7726 frame_pointer_needed, as this means sp might move unpredictably
7730 frame_pointer_needed
7731 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7733 crtl->outgoing_args_size != 0
7734 => alpha_procedure_type != PT_NULL,
7736 so when we are not setting the bit here, we are guaranteed to
7737 have emitted an FRP frame pointer update just before. */
7738 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7743 /* If we need a frame pointer, set it from the stack pointer. */
7744 if (frame_pointer_needed)
7746 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7747 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7749 /* This must always be the last instruction in the
7750 prologue, thus we emit a special move + clobber. */
7751 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7752 stack_pointer_rtx, sa_reg)));
7756 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7757 the prologue, for exception handling reasons, we cannot do this for
7758 any insn that might fault. We could prevent this for mems with a
7759 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7760 have to prevent all such scheduling with a blockage.
7762 Linux, on the other hand, never bothered to implement OSF/1's
7763 exception handling, and so doesn't care about such things. Anyone
7764 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7766 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7767 emit_insn (gen_blockage ());
7770 /* Count the number of .file directives, so that .loc is up to date. */
7771 int num_source_filenames = 0;
7773 /* Output the textual info surrounding the prologue. */
7776 alpha_start_function (FILE *file, const char *fnname,
7777 tree decl ATTRIBUTE_UNUSED)
7779 unsigned long imask = 0;
7780 unsigned long fmask = 0;
7781 /* Stack space needed for pushing registers clobbered by us. */
7782 HOST_WIDE_INT sa_size;
7783 /* Complete stack size needed. */
7784 unsigned HOST_WIDE_INT frame_size;
7785 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7786 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7789 /* Offset from base reg to register save area. */
7790 HOST_WIDE_INT reg_offset;
7791 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7792 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
7795 #if TARGET_ABI_OPEN_VMS
7797 && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
7799 targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
7800 ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
7801 switch_to_section (text_section);
7802 vms_debug_main = NULL;
7806 alpha_fnname = fnname;
7807 sa_size = alpha_sa_size ();
7808 frame_size = compute_frame_size (get_frame_size (), sa_size);
7810 if (TARGET_ABI_OPEN_VMS)
7811 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7813 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7815 alpha_sa_mask (&imask, &fmask);
7817 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7818 We have to do that before the .ent directive as we cannot switch
7819 files within procedures with native ecoff because line numbers are
7820 linked to procedure descriptors.
7821 Outputting the lineno helps debugging of one line functions as they
7822 would otherwise get no line number at all. Please note that we would
7823 like to put out last_linenum from final.c, but it is not accessible. */
7825 if (write_symbols == SDB_DEBUG)
7827 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7828 ASM_OUTPUT_SOURCE_FILENAME (file,
7829 DECL_SOURCE_FILE (current_function_decl));
7831 #ifdef SDB_OUTPUT_SOURCE_LINE
7832 if (debug_info_level != DINFO_LEVEL_TERSE)
7833 SDB_OUTPUT_SOURCE_LINE (file,
7834 DECL_SOURCE_LINE (current_function_decl));
7838 /* Issue function start and label. */
7839 if (TARGET_ABI_OPEN_VMS || !flag_inhibit_size_directive)
7841 fputs ("\t.ent ", file);
7842 assemble_name (file, fnname);
7845 /* If the function needs GP, we'll write the "..ng" label there.
7846 Otherwise, do it here. */
7848 && ! alpha_function_needs_gp
7849 && ! cfun->is_thunk)
7852 assemble_name (file, fnname);
7853 fputs ("..ng:\n", file);
7856 /* Nested functions on VMS that are potentially called via trampoline
7857 get a special transfer entry point that loads the called functions
7858 procedure descriptor and static chain. */
7859 if (TARGET_ABI_OPEN_VMS
7860 && !TREE_PUBLIC (decl)
7861 && DECL_CONTEXT (decl)
7862 && !TYPE_P (DECL_CONTEXT (decl)))
7864 strcpy (tramp_label, fnname);
7865 strcat (tramp_label, "..tr");
7866 ASM_OUTPUT_LABEL (file, tramp_label);
7867 fprintf (file, "\tldq $1,24($27)\n");
7868 fprintf (file, "\tldq $27,16($27)\n");
7871 strcpy (entry_label, fnname);
7872 if (TARGET_ABI_OPEN_VMS)
7873 strcat (entry_label, "..en");
7875 ASM_OUTPUT_LABEL (file, entry_label);
7876 inside_function = TRUE;
7878 if (TARGET_ABI_OPEN_VMS)
7879 fprintf (file, "\t.base $%d\n", vms_base_regno);
7882 && TARGET_IEEE_CONFORMANT
7883 && !flag_inhibit_size_directive)
7885 /* Set flags in procedure descriptor to request IEEE-conformant
7886 math-library routines. The value we set it to is PDSC_EXC_IEEE
7887 (/usr/include/pdsc.h). */
7888 fputs ("\t.eflag 48\n", file);
7891 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7892 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
7893 alpha_arg_offset = -frame_size + 48;
7895 /* Describe our frame. If the frame size is larger than an integer,
7896 print it as zero to avoid an assembler error. We won't be
7897 properly describing such a frame, but that's the best we can do. */
7898 if (TARGET_ABI_OPEN_VMS)
7899 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7900 HOST_WIDE_INT_PRINT_DEC "\n",
7902 frame_size >= (1UL << 31) ? 0 : frame_size,
7904 else if (!flag_inhibit_size_directive)
7905 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7906 (frame_pointer_needed
7907 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7908 frame_size >= max_frame_size ? 0 : frame_size,
7909 crtl->args.pretend_args_size);
7911 /* Describe which registers were spilled. */
7912 if (TARGET_ABI_OPEN_VMS)
7915 /* ??? Does VMS care if mask contains ra? The old code didn't
7916 set it, so I don't here. */
7917 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7919 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7920 if (alpha_procedure_type == PT_REGISTER)
7921 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7923 else if (!flag_inhibit_size_directive)
7927 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7928 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7930 for (i = 0; i < 32; ++i)
7931 if (imask & (1UL << i))
7936 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7937 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7940 #if TARGET_ABI_OPEN_VMS
7941 /* If a user condition handler has been installed at some point, emit
7942 the procedure descriptor bits to point the Condition Handling Facility
7943 at the indirection wrapper, and state the fp offset at which the user
7944 handler may be found. */
7945 if (cfun->machine->uses_condition_handler)
7947 fprintf (file, "\t.handler __gcc_shell_handler\n");
7948 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
7951 /* Ifdef'ed cause link_section are only available then. */
7952 switch_to_section (readonly_data_section);
7953 fprintf (file, "\t.align 3\n");
7954 assemble_name (file, fnname); fputs ("..na:\n", file);
7955 fputs ("\t.ascii \"", file);
7956 assemble_name (file, fnname);
7957 fputs ("\\0\"\n", file);
7958 alpha_need_linkage (fnname, 1);
7959 switch_to_section (text_section);
7963 /* Emit the .prologue note at the scheduled end of the prologue. */
7966 alpha_output_function_end_prologue (FILE *file)
7968 if (TARGET_ABI_OPEN_VMS)
7969 fputs ("\t.prologue\n", file);
7970 else if (!flag_inhibit_size_directive)
7971 fprintf (file, "\t.prologue %d\n",
7972 alpha_function_needs_gp || cfun->is_thunk);
7975 /* Write function epilogue. */
7978 alpha_expand_epilogue (void)
7980 /* Registers to save. */
7981 unsigned long imask = 0;
7982 unsigned long fmask = 0;
7983 /* Stack space needed for pushing registers clobbered by us. */
7984 HOST_WIDE_INT sa_size;
7985 /* Complete stack size needed. */
7986 HOST_WIDE_INT frame_size;
7987 /* Offset from base reg to register save area. */
7988 HOST_WIDE_INT reg_offset;
7989 int fp_is_frame_pointer, fp_offset;
7990 rtx sa_reg, sa_reg_exp = NULL;
7991 rtx sp_adj1, sp_adj2, mem, reg, insn;
7993 rtx cfa_restores = NULL_RTX;
7996 sa_size = alpha_sa_size ();
7997 frame_size = compute_frame_size (get_frame_size (), sa_size);
7999 if (TARGET_ABI_OPEN_VMS)
8001 if (alpha_procedure_type == PT_STACK)
8002 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8007 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8009 alpha_sa_mask (&imask, &fmask);
8012 = (TARGET_ABI_OPEN_VMS
8013 ? alpha_procedure_type == PT_STACK
8014 : frame_pointer_needed);
8016 sa_reg = stack_pointer_rtx;
8018 if (crtl->calls_eh_return)
8019 eh_ofs = EH_RETURN_STACKADJ_RTX;
8025 /* If we have a frame pointer, restore SP from it. */
8026 if (TARGET_ABI_OPEN_VMS
8027 ? vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8028 : frame_pointer_needed)
8029 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8031 /* Cope with very large offsets to the register save area. */
8032 if (reg_offset + sa_size > 0x8000)
8034 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8037 if (low + sa_size <= 0x8000)
8038 bias = reg_offset - low, reg_offset = low;
8040 bias = reg_offset, reg_offset = 0;
8042 sa_reg = gen_rtx_REG (DImode, 22);
8043 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8045 emit_move_insn (sa_reg, sa_reg_exp);
8048 /* Restore registers in order, excepting a true frame pointer. */
8050 mem = gen_frame_mem (DImode, plus_constant (sa_reg, reg_offset));
8051 reg = gen_rtx_REG (DImode, REG_RA);
8052 emit_move_insn (reg, mem);
8053 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8056 imask &= ~(1UL << REG_RA);
8058 for (i = 0; i < 31; ++i)
8059 if (imask & (1UL << i))
8061 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8062 fp_offset = reg_offset;
8065 mem = gen_frame_mem (DImode,
8066 plus_constant (sa_reg, reg_offset));
8067 reg = gen_rtx_REG (DImode, i);
8068 emit_move_insn (reg, mem);
8069 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8075 for (i = 0; i < 31; ++i)
8076 if (fmask & (1UL << i))
8078 mem = gen_frame_mem (DFmode, plus_constant (sa_reg, reg_offset));
8079 reg = gen_rtx_REG (DFmode, i+32);
8080 emit_move_insn (reg, mem);
8081 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8086 if (frame_size || eh_ofs)
8088 sp_adj1 = stack_pointer_rtx;
8092 sp_adj1 = gen_rtx_REG (DImode, 23);
8093 emit_move_insn (sp_adj1,
8094 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8097 /* If the stack size is large, begin computation into a temporary
8098 register so as not to interfere with a potential fp restore,
8099 which must be consecutive with an SP restore. */
8100 if (frame_size < 32768 && !cfun->calls_alloca)
8101 sp_adj2 = GEN_INT (frame_size);
8102 else if (frame_size < 0x40007fffL)
8104 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8106 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8107 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8111 sp_adj1 = gen_rtx_REG (DImode, 23);
8112 emit_move_insn (sp_adj1, sp_adj2);
8114 sp_adj2 = GEN_INT (low);
8118 rtx tmp = gen_rtx_REG (DImode, 23);
8119 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8122 /* We can't drop new things to memory this late, afaik,
8123 so build it up by pieces. */
8124 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8126 gcc_assert (sp_adj2);
8130 /* From now on, things must be in order. So emit blockages. */
8132 /* Restore the frame pointer. */
8133 if (fp_is_frame_pointer)
8135 emit_insn (gen_blockage ());
8136 mem = gen_frame_mem (DImode, plus_constant (sa_reg, fp_offset));
8137 emit_move_insn (hard_frame_pointer_rtx, mem);
8138 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8139 hard_frame_pointer_rtx, cfa_restores);
8141 else if (TARGET_ABI_OPEN_VMS)
8143 emit_insn (gen_blockage ());
8144 emit_move_insn (hard_frame_pointer_rtx,
8145 gen_rtx_REG (DImode, vms_save_fp_regno));
8146 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8147 hard_frame_pointer_rtx, cfa_restores);
8150 /* Restore the stack pointer. */
8151 emit_insn (gen_blockage ());
8152 if (sp_adj2 == const0_rtx)
8153 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8155 insn = emit_move_insn (stack_pointer_rtx,
8156 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8157 REG_NOTES (insn) = cfa_restores;
8158 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8159 RTX_FRAME_RELATED_P (insn) = 1;
8163 gcc_assert (cfa_restores == NULL);
8165 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8167 emit_insn (gen_blockage ());
8168 insn = emit_move_insn (hard_frame_pointer_rtx,
8169 gen_rtx_REG (DImode, vms_save_fp_regno));
8170 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8171 RTX_FRAME_RELATED_P (insn) = 1;
8176 /* Output the rest of the textual info surrounding the epilogue. */
8179 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8183 /* We output a nop after noreturn calls at the very end of the function to
8184 ensure that the return address always remains in the caller's code range,
8185 as not doing so might confuse unwinding engines. */
8186 insn = get_last_insn ();
8188 insn = prev_active_insn (insn);
8189 if (insn && CALL_P (insn))
8190 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8192 #if TARGET_ABI_OPEN_VMS
8193 alpha_write_linkage (file, fnname, decl);
8196 /* End the function. */
8197 if (!flag_inhibit_size_directive)
8199 fputs ("\t.end ", file);
8200 assemble_name (file, fnname);
8203 inside_function = FALSE;
8206 #if TARGET_ABI_OPEN_VMS
8207 void avms_asm_output_external (FILE *file, tree decl ATTRIBUTE_UNUSED, const char *name)
8209 #ifdef DO_CRTL_NAMES
8216 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8218 In order to avoid the hordes of differences between generated code
8219 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8220 lots of code loading up large constants, generate rtl and emit it
8221 instead of going straight to text.
8223 Not sure why this idea hasn't been explored before... */
8226 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8227 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8230 HOST_WIDE_INT hi, lo;
8231 rtx this_rtx, insn, funexp;
8233 /* We always require a valid GP. */
8234 emit_insn (gen_prologue_ldgp ());
8235 emit_note (NOTE_INSN_PROLOGUE_END);
8237 /* Find the "this" pointer. If the function returns a structure,
8238 the structure return pointer is in $16. */
8239 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8240 this_rtx = gen_rtx_REG (Pmode, 17);
8242 this_rtx = gen_rtx_REG (Pmode, 16);
8244 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8245 entire constant for the add. */
8246 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8247 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8248 if (hi + lo == delta)
8251 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8253 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8257 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8258 delta, -(delta < 0));
8259 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8262 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8267 tmp = gen_rtx_REG (Pmode, 0);
8268 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8270 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8271 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8272 if (hi + lo == vcall_offset)
8275 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8279 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8280 vcall_offset, -(vcall_offset < 0));
8281 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8285 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8288 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8290 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8293 /* Generate a tail call to the target function. */
8294 if (! TREE_USED (function))
8296 assemble_external (function);
8297 TREE_USED (function) = 1;
8299 funexp = XEXP (DECL_RTL (function), 0);
8300 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8301 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8302 SIBLING_CALL_P (insn) = 1;
8304 /* Run just enough of rest_of_compilation to get the insns emitted.
8305 There's not really enough bulk here to make other passes such as
8306 instruction scheduling worth while. Note that use_thunk calls
8307 assemble_start_function and assemble_end_function. */
8308 insn = get_insns ();
8309 insn_locators_alloc ();
8310 shorten_branches (insn);
8311 final_start_function (insn, file, 1);
8312 final (insn, file, 1);
8313 final_end_function ();
8315 #endif /* TARGET_ABI_OSF */
8317 /* Debugging support. */
8321 /* Count the number of sdb related labels are generated (to find block
8322 start and end boundaries). */
8324 int sdb_label_count = 0;
8326 /* Name of the file containing the current function. */
8328 static const char *current_function_file = "";
8330 /* Offsets to alpha virtual arg/local debugging pointers. */
8332 long alpha_arg_offset;
8333 long alpha_auto_offset;
8335 /* Emit a new filename to a stream. */
8338 alpha_output_filename (FILE *stream, const char *name)
8340 static int first_time = TRUE;
8345 ++num_source_filenames;
8346 current_function_file = name;
8347 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8348 output_quoted_string (stream, name);
8349 fprintf (stream, "\n");
8350 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8351 fprintf (stream, "\t#@stabs\n");
8354 else if (write_symbols == DBX_DEBUG)
8355 /* dbxout.c will emit an appropriate .stabs directive. */
8358 else if (name != current_function_file
8359 && strcmp (name, current_function_file) != 0)
8361 if (inside_function && ! TARGET_GAS)
8362 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8365 ++num_source_filenames;
8366 current_function_file = name;
8367 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8370 output_quoted_string (stream, name);
8371 fprintf (stream, "\n");
8375 /* Structure to show the current status of registers and memory. */
8377 struct shadow_summary
8380 unsigned int i : 31; /* Mask of int regs */
8381 unsigned int fp : 31; /* Mask of fp regs */
8382 unsigned int mem : 1; /* mem == imem | fpmem */
8386 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8387 to the summary structure. SET is nonzero if the insn is setting the
8388 object, otherwise zero. */
8391 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8393 const char *format_ptr;
8399 switch (GET_CODE (x))
8401 /* ??? Note that this case would be incorrect if the Alpha had a
8402 ZERO_EXTRACT in SET_DEST. */
8404 summarize_insn (SET_SRC (x), sum, 0);
8405 summarize_insn (SET_DEST (x), sum, 1);
8409 summarize_insn (XEXP (x, 0), sum, 1);
8413 summarize_insn (XEXP (x, 0), sum, 0);
8417 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8418 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8422 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8423 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8427 summarize_insn (SUBREG_REG (x), sum, 0);
8432 int regno = REGNO (x);
8433 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8435 if (regno == 31 || regno == 63)
8441 sum->defd.i |= mask;
8443 sum->defd.fp |= mask;
8448 sum->used.i |= mask;
8450 sum->used.fp |= mask;
8461 /* Find the regs used in memory address computation: */
8462 summarize_insn (XEXP (x, 0), sum, 0);
8465 case CONST_INT: case CONST_DOUBLE:
8466 case SYMBOL_REF: case LABEL_REF: case CONST:
8467 case SCRATCH: case ASM_INPUT:
8470 /* Handle common unary and binary ops for efficiency. */
8471 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8472 case MOD: case UDIV: case UMOD: case AND: case IOR:
8473 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8474 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8475 case NE: case EQ: case GE: case GT: case LE:
8476 case LT: case GEU: case GTU: case LEU: case LTU:
8477 summarize_insn (XEXP (x, 0), sum, 0);
8478 summarize_insn (XEXP (x, 1), sum, 0);
8481 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8482 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8483 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8484 case SQRT: case FFS:
8485 summarize_insn (XEXP (x, 0), sum, 0);
8489 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8490 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8491 switch (format_ptr[i])
8494 summarize_insn (XEXP (x, i), sum, 0);
8498 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8499 summarize_insn (XVECEXP (x, i, j), sum, 0);
8511 /* Ensure a sufficient number of `trapb' insns are in the code when
8512 the user requests code with a trap precision of functions or
8515 In naive mode, when the user requests a trap-precision of
8516 "instruction", a trapb is needed after every instruction that may
8517 generate a trap. This ensures that the code is resumption safe but
8520 When optimizations are turned on, we delay issuing a trapb as long
8521 as possible. In this context, a trap shadow is the sequence of
8522 instructions that starts with a (potentially) trap generating
8523 instruction and extends to the next trapb or call_pal instruction
8524 (but GCC never generates call_pal by itself). We can delay (and
8525 therefore sometimes omit) a trapb subject to the following
8528 (a) On entry to the trap shadow, if any Alpha register or memory
8529 location contains a value that is used as an operand value by some
8530 instruction in the trap shadow (live on entry), then no instruction
8531 in the trap shadow may modify the register or memory location.
8533 (b) Within the trap shadow, the computation of the base register
8534 for a memory load or store instruction may not involve using the
8535 result of an instruction that might generate an UNPREDICTABLE
8538 (c) Within the trap shadow, no register may be used more than once
8539 as a destination register. (This is to make life easier for the
8542 (d) The trap shadow may not include any branch instructions. */
8545 alpha_handle_trap_shadows (void)
8547 struct shadow_summary shadow;
8548 int trap_pending, exception_nesting;
8552 exception_nesting = 0;
8555 shadow.used.mem = 0;
8556 shadow.defd = shadow.used;
8558 for (i = get_insns (); i ; i = NEXT_INSN (i))
8562 switch (NOTE_KIND (i))
8564 case NOTE_INSN_EH_REGION_BEG:
8565 exception_nesting++;
8570 case NOTE_INSN_EH_REGION_END:
8571 exception_nesting--;
8576 case NOTE_INSN_EPILOGUE_BEG:
8577 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8582 else if (trap_pending)
8584 if (alpha_tp == ALPHA_TP_FUNC)
8587 && GET_CODE (PATTERN (i)) == RETURN)
8590 else if (alpha_tp == ALPHA_TP_INSN)
8594 struct shadow_summary sum;
8599 sum.defd = sum.used;
8601 switch (GET_CODE (i))
8604 /* Annoyingly, get_attr_trap will die on these. */
8605 if (GET_CODE (PATTERN (i)) == USE
8606 || GET_CODE (PATTERN (i)) == CLOBBER)
8609 summarize_insn (PATTERN (i), &sum, 0);
8611 if ((sum.defd.i & shadow.defd.i)
8612 || (sum.defd.fp & shadow.defd.fp))
8614 /* (c) would be violated */
8618 /* Combine shadow with summary of current insn: */
8619 shadow.used.i |= sum.used.i;
8620 shadow.used.fp |= sum.used.fp;
8621 shadow.used.mem |= sum.used.mem;
8622 shadow.defd.i |= sum.defd.i;
8623 shadow.defd.fp |= sum.defd.fp;
8624 shadow.defd.mem |= sum.defd.mem;
8626 if ((sum.defd.i & shadow.used.i)
8627 || (sum.defd.fp & shadow.used.fp)
8628 || (sum.defd.mem & shadow.used.mem))
8630 /* (a) would be violated (also takes care of (b)) */
8631 gcc_assert (get_attr_trap (i) != TRAP_YES
8632 || (!(sum.defd.i & sum.used.i)
8633 && !(sum.defd.fp & sum.used.fp)));
8651 n = emit_insn_before (gen_trapb (), i);
8652 PUT_MODE (n, TImode);
8653 PUT_MODE (i, TImode);
8657 shadow.used.mem = 0;
8658 shadow.defd = shadow.used;
8663 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8664 && NONJUMP_INSN_P (i)
8665 && GET_CODE (PATTERN (i)) != USE
8666 && GET_CODE (PATTERN (i)) != CLOBBER
8667 && get_attr_trap (i) == TRAP_YES)
8669 if (optimize && !trap_pending)
8670 summarize_insn (PATTERN (i), &shadow, 0);
8676 /* Alpha can only issue instruction groups simultaneously if they are
8677 suitably aligned. This is very processor-specific. */
8678 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8679 that are marked "fake". These instructions do not exist on that target,
8680 but it is possible to see these insns with deranged combinations of
8681 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8682 choose a result at random. */
8684 enum alphaev4_pipe {
8691 enum alphaev5_pipe {
8702 static enum alphaev4_pipe
8703 alphaev4_insn_pipe (rtx insn)
8705 if (recog_memoized (insn) < 0)
8707 if (get_attr_length (insn) != 4)
8710 switch (get_attr_type (insn))
8726 case TYPE_MVI: /* fake */
8741 case TYPE_FSQRT: /* fake */
8742 case TYPE_FTOI: /* fake */
8743 case TYPE_ITOF: /* fake */
8751 static enum alphaev5_pipe
8752 alphaev5_insn_pipe (rtx insn)
8754 if (recog_memoized (insn) < 0)
8756 if (get_attr_length (insn) != 4)
8759 switch (get_attr_type (insn))
8779 case TYPE_FTOI: /* fake */
8780 case TYPE_ITOF: /* fake */
8795 case TYPE_FSQRT: /* fake */
8806 /* IN_USE is a mask of the slots currently filled within the insn group.
8807 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8808 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8810 LEN is, of course, the length of the group in bytes. */
8813 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8820 || GET_CODE (PATTERN (insn)) == CLOBBER
8821 || GET_CODE (PATTERN (insn)) == USE)
8826 enum alphaev4_pipe pipe;
8828 pipe = alphaev4_insn_pipe (insn);
8832 /* Force complex instructions to start new groups. */
8836 /* If this is a completely unrecognized insn, it's an asm.
8837 We don't know how long it is, so record length as -1 to
8838 signal a needed realignment. */
8839 if (recog_memoized (insn) < 0)
8842 len = get_attr_length (insn);
8846 if (in_use & EV4_IB0)
8848 if (in_use & EV4_IB1)
8853 in_use |= EV4_IB0 | EV4_IBX;
8857 if (in_use & EV4_IB0)
8859 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8867 if (in_use & EV4_IB1)
8877 /* Haifa doesn't do well scheduling branches. */
8882 insn = next_nonnote_insn (insn);
8884 if (!insn || ! INSN_P (insn))
8887 /* Let Haifa tell us where it thinks insn group boundaries are. */
8888 if (GET_MODE (insn) == TImode)
8891 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8896 insn = next_nonnote_insn (insn);
8904 /* IN_USE is a mask of the slots currently filled within the insn group.
8905 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8906 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8908 LEN is, of course, the length of the group in bytes. */
8911 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8918 || GET_CODE (PATTERN (insn)) == CLOBBER
8919 || GET_CODE (PATTERN (insn)) == USE)
8924 enum alphaev5_pipe pipe;
8926 pipe = alphaev5_insn_pipe (insn);
8930 /* Force complex instructions to start new groups. */
8934 /* If this is a completely unrecognized insn, it's an asm.
8935 We don't know how long it is, so record length as -1 to
8936 signal a needed realignment. */
8937 if (recog_memoized (insn) < 0)
8940 len = get_attr_length (insn);
8943 /* ??? Most of the places below, we would like to assert never
8944 happen, as it would indicate an error either in Haifa, or
8945 in the scheduling description. Unfortunately, Haifa never
8946 schedules the last instruction of the BB, so we don't have
8947 an accurate TI bit to go off. */
8949 if (in_use & EV5_E0)
8951 if (in_use & EV5_E1)
8956 in_use |= EV5_E0 | EV5_E01;
8960 if (in_use & EV5_E0)
8962 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
8970 if (in_use & EV5_E1)
8976 if (in_use & EV5_FA)
8978 if (in_use & EV5_FM)
8983 in_use |= EV5_FA | EV5_FAM;
8987 if (in_use & EV5_FA)
8993 if (in_use & EV5_FM)
9006 /* Haifa doesn't do well scheduling branches. */
9007 /* ??? If this is predicted not-taken, slotting continues, except
9008 that no more IBR, FBR, or JSR insns may be slotted. */
9013 insn = next_nonnote_insn (insn);
9015 if (!insn || ! INSN_P (insn))
9018 /* Let Haifa tell us where it thinks insn group boundaries are. */
9019 if (GET_MODE (insn) == TImode)
9022 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9027 insn = next_nonnote_insn (insn);
9036 alphaev4_next_nop (int *pin_use)
9038 int in_use = *pin_use;
9041 if (!(in_use & EV4_IB0))
9046 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9051 else if (TARGET_FP && !(in_use & EV4_IB1))
9064 alphaev5_next_nop (int *pin_use)
9066 int in_use = *pin_use;
9069 if (!(in_use & EV5_E1))
9074 else if (TARGET_FP && !(in_use & EV5_FA))
9079 else if (TARGET_FP && !(in_use & EV5_FM))
9091 /* The instruction group alignment main loop. */
9094 alpha_align_insns (unsigned int max_align,
9095 rtx (*next_group) (rtx, int *, int *),
9096 rtx (*next_nop) (int *))
9098 /* ALIGN is the known alignment for the insn group. */
9100 /* OFS is the offset of the current insn in the insn group. */
9102 int prev_in_use, in_use, len, ldgp;
9105 /* Let shorten branches care for assigning alignments to code labels. */
9106 shorten_branches (get_insns ());
9108 if (align_functions < 4)
9110 else if ((unsigned int) align_functions < max_align)
9111 align = align_functions;
9115 ofs = prev_in_use = 0;
9118 i = next_nonnote_insn (i);
9120 ldgp = alpha_function_needs_gp ? 8 : 0;
9124 next = (*next_group) (i, &in_use, &len);
9126 /* When we see a label, resync alignment etc. */
9129 unsigned int new_align = 1 << label_to_alignment (i);
9131 if (new_align >= align)
9133 align = new_align < max_align ? new_align : max_align;
9137 else if (ofs & (new_align-1))
9138 ofs = (ofs | (new_align-1)) + 1;
9142 /* Handle complex instructions special. */
9143 else if (in_use == 0)
9145 /* Asms will have length < 0. This is a signal that we have
9146 lost alignment knowledge. Assume, however, that the asm
9147 will not mis-align instructions. */
9156 /* If the known alignment is smaller than the recognized insn group,
9157 realign the output. */
9158 else if ((int) align < len)
9160 unsigned int new_log_align = len > 8 ? 4 : 3;
9163 where = prev = prev_nonnote_insn (i);
9164 if (!where || !LABEL_P (where))
9167 /* Can't realign between a call and its gp reload. */
9168 if (! (TARGET_EXPLICIT_RELOCS
9169 && prev && CALL_P (prev)))
9171 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9172 align = 1 << new_log_align;
9177 /* We may not insert padding inside the initial ldgp sequence. */
9181 /* If the group won't fit in the same INT16 as the previous,
9182 we need to add padding to keep the group together. Rather
9183 than simply leaving the insn filling to the assembler, we
9184 can make use of the knowledge of what sorts of instructions
9185 were issued in the previous group to make sure that all of
9186 the added nops are really free. */
9187 else if (ofs + len > (int) align)
9189 int nop_count = (align - ofs) / 4;
9192 /* Insert nops before labels, branches, and calls to truly merge
9193 the execution of the nops with the previous instruction group. */
9194 where = prev_nonnote_insn (i);
9197 if (LABEL_P (where))
9199 rtx where2 = prev_nonnote_insn (where);
9200 if (where2 && JUMP_P (where2))
9203 else if (NONJUMP_INSN_P (where))
9210 emit_insn_before ((*next_nop)(&prev_in_use), where);
9211 while (--nop_count);
9215 ofs = (ofs + len) & (align - 1);
9216 prev_in_use = in_use;
9221 /* Insert an unop between a noreturn function call and GP load. */
9224 alpha_pad_noreturn (void)
9228 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9230 if (! (CALL_P (insn)
9231 && find_reg_note (insn, REG_NORETURN, NULL_RTX)))
9234 /* Make sure we do not split a call and its corresponding
9235 CALL_ARG_LOCATION note. */
9238 next = NEXT_INSN (insn);
9239 if (next && NOTE_P (next)
9240 && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
9244 next = next_active_insn (insn);
9248 rtx pat = PATTERN (next);
9250 if (GET_CODE (pat) == SET
9251 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9252 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9253 emit_insn_after (gen_unop (), insn);
9258 /* Machine dependent reorg pass. */
9263 /* Workaround for a linker error that triggers when an
9264 exception handler immediatelly follows a noreturn function.
9266 The instruction stream from an object file:
9268 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9269 58: 00 00 ba 27 ldah gp,0(ra)
9270 5c: 00 00 bd 23 lda gp,0(gp)
9271 60: 00 00 7d a7 ldq t12,0(gp)
9272 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9274 was converted in the final link pass to:
9276 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9277 fdb28: 00 00 fe 2f unop
9278 fdb2c: 00 00 fe 2f unop
9279 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9280 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9282 GP load instructions were wrongly cleared by the linker relaxation
9283 pass. This workaround prevents removal of GP loads by inserting
9284 an unop instruction between a noreturn function call and
9285 exception handler prologue. */
9287 if (current_function_has_exception_handlers ())
9288 alpha_pad_noreturn ();
9290 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9291 alpha_handle_trap_shadows ();
9293 /* Due to the number of extra trapb insns, don't bother fixing up
9294 alignment when trap precision is instruction. Moreover, we can
9295 only do our job when sched2 is run. */
9296 if (optimize && !optimize_size
9297 && alpha_tp != ALPHA_TP_INSN
9298 && flag_schedule_insns_after_reload)
9300 if (alpha_tune == PROCESSOR_EV4)
9301 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9302 else if (alpha_tune == PROCESSOR_EV5)
9303 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9312 alpha_file_start (void)
9314 #ifdef OBJECT_FORMAT_ELF
9315 /* If emitting dwarf2 debug information, we cannot generate a .file
9316 directive to start the file, as it will conflict with dwarf2out
9317 file numbers. So it's only useful when emitting mdebug output. */
9318 targetm.asm_file_start_file_directive = (write_symbols == DBX_DEBUG);
9321 default_file_start ();
9323 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9326 fputs ("\t.set noreorder\n", asm_out_file);
9327 fputs ("\t.set volatile\n", asm_out_file);
9329 fputs ("\t.set noat\n", asm_out_file);
9330 if (TARGET_EXPLICIT_RELOCS)
9331 fputs ("\t.set nomacro\n", asm_out_file);
9332 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9336 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9338 else if (TARGET_MAX)
9340 else if (TARGET_BWX)
9342 else if (alpha_cpu == PROCESSOR_EV5)
9347 fprintf (asm_out_file, "\t.arch %s\n", arch);
9351 #ifdef OBJECT_FORMAT_ELF
9352 /* Since we don't have a .dynbss section, we should not allow global
9353 relocations in the .rodata section. */
9356 alpha_elf_reloc_rw_mask (void)
9358 return flag_pic ? 3 : 2;
9361 /* Return a section for X. The only special thing we do here is to
9362 honor small data. */
9365 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9366 unsigned HOST_WIDE_INT align)
9368 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9369 /* ??? Consider using mergeable sdata sections. */
9370 return sdata_section;
9372 return default_elf_select_rtx_section (mode, x, align);
9376 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9378 unsigned int flags = 0;
9380 if (strcmp (name, ".sdata") == 0
9381 || strncmp (name, ".sdata.", 7) == 0
9382 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9383 || strcmp (name, ".sbss") == 0
9384 || strncmp (name, ".sbss.", 6) == 0
9385 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9386 flags = SECTION_SMALL;
9388 flags |= default_section_type_flags (decl, name, reloc);
9391 #endif /* OBJECT_FORMAT_ELF */
9393 /* Structure to collect function names for final output in link section. */
9394 /* Note that items marked with GTY can't be ifdef'ed out. */
9396 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9397 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9399 struct GTY(()) alpha_links
9404 enum links_kind lkind;
9405 enum reloc_kind rkind;
9408 struct GTY(()) alpha_funcs
9411 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9415 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9416 splay_tree alpha_links_tree;
9417 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9418 splay_tree alpha_funcs_tree;
9420 static GTY(()) int alpha_funcs_num;
9422 #if TARGET_ABI_OPEN_VMS
9424 /* Return the VMS argument type corresponding to MODE. */
9427 alpha_arg_type (enum machine_mode mode)
9432 return TARGET_FLOAT_VAX ? FF : FS;
9434 return TARGET_FLOAT_VAX ? FD : FT;
9440 /* Return an rtx for an integer representing the VMS Argument Information
9444 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9446 unsigned HOST_WIDE_INT regval = cum.num_args;
9449 for (i = 0; i < 6; i++)
9450 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9452 return GEN_INT (regval);
9455 /* Register the need for a (fake) .linkage entry for calls to function NAME.
9456 IS_LOCAL is 1 if this is for a definition, 0 if this is for a real call.
9457 Return a SYMBOL_REF suited to the call instruction. */
9460 alpha_need_linkage (const char *name, int is_local)
9462 splay_tree_node node;
9463 struct alpha_links *al;
9472 struct alpha_funcs *cfaf;
9474 if (!alpha_funcs_tree)
9475 alpha_funcs_tree = splay_tree_new_ggc
9476 (splay_tree_compare_pointers,
9477 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
9478 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
9481 cfaf = ggc_alloc_alpha_funcs ();
9484 cfaf->num = ++alpha_funcs_num;
9486 splay_tree_insert (alpha_funcs_tree,
9487 (splay_tree_key) current_function_decl,
9488 (splay_tree_value) cfaf);
9491 if (alpha_links_tree)
9493 /* Is this name already defined? */
9495 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9498 al = (struct alpha_links *) node->value;
9501 /* Defined here but external assumed. */
9502 if (al->lkind == KIND_EXTERN)
9503 al->lkind = KIND_LOCAL;
9507 /* Used here but unused assumed. */
9508 if (al->lkind == KIND_UNUSED)
9509 al->lkind = KIND_LOCAL;
9515 alpha_links_tree = splay_tree_new_ggc
9516 ((splay_tree_compare_fn) strcmp,
9517 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9518 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9520 al = ggc_alloc_alpha_links ();
9521 name = ggc_strdup (name);
9523 /* Assume external if no definition. */
9524 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9526 /* Ensure we have an IDENTIFIER so assemble_name can mark it used
9527 and find the ultimate alias target like assemble_name. */
9528 id = get_identifier (name);
9530 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9532 id = TREE_CHAIN (id);
9533 target = IDENTIFIER_POINTER (id);
9536 al->target = target ? target : name;
9537 al->linkage = gen_rtx_SYMBOL_REF (Pmode, name);
9539 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9540 (splay_tree_value) al);
9545 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9546 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9547 this is the reference to the linkage pointer value, 0 if this is the
9548 reference to the function entry value. RFLAG is 1 if this a reduced
9549 reference (code address only), 0 if this is a full reference. */
9552 alpha_use_linkage (rtx func, tree cfundecl, int lflag, int rflag)
9554 splay_tree_node cfunnode;
9555 struct alpha_funcs *cfaf;
9556 struct alpha_links *al;
9557 const char *name = XSTR (func, 0);
9559 cfaf = (struct alpha_funcs *) 0;
9560 al = (struct alpha_links *) 0;
9562 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9563 cfaf = (struct alpha_funcs *) cfunnode->value;
9567 splay_tree_node lnode;
9569 /* Is this name already defined? */
9571 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9573 al = (struct alpha_links *) lnode->value;
9576 cfaf->links = splay_tree_new_ggc
9577 ((splay_tree_compare_fn) strcmp,
9578 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9579 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9586 splay_tree_node node = 0;
9587 struct alpha_links *anl;
9592 name_len = strlen (name);
9593 linksym = (char *) alloca (name_len + 50);
9595 al = ggc_alloc_alpha_links ();
9596 al->num = cfaf->num;
9599 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9602 anl = (struct alpha_links *) node->value;
9603 al->lkind = anl->lkind;
9607 sprintf (linksym, "$%d..%s..lk", cfaf->num, name);
9608 buflen = strlen (linksym);
9610 al->linkage = gen_rtx_SYMBOL_REF
9611 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9613 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9614 (splay_tree_value) al);
9618 al->rkind = KIND_CODEADDR;
9620 al->rkind = KIND_LINKAGE;
9623 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9629 alpha_write_one_linkage (splay_tree_node node, void *data)
9631 const char *const name = (const char *) node->key;
9632 struct alpha_links *link = (struct alpha_links *) node->value;
9633 FILE *stream = (FILE *) data;
9635 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9636 if (link->rkind == KIND_CODEADDR)
9638 if (link->lkind == KIND_LOCAL)
9640 /* Local and used */
9641 fprintf (stream, "\t.quad %s..en\n", name);
9645 /* External and used, request code address. */
9646 fprintf (stream, "\t.code_address %s\n", name);
9651 if (link->lkind == KIND_LOCAL)
9653 /* Local and used, build linkage pair. */
9654 fprintf (stream, "\t.quad %s..en\n", name);
9655 fprintf (stream, "\t.quad %s\n", name);
9659 /* External and used, request linkage pair. */
9660 fprintf (stream, "\t.linkage %s\n", name);
9668 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9670 splay_tree_node node;
9671 struct alpha_funcs *func;
9673 fprintf (stream, "\t.link\n");
9674 fprintf (stream, "\t.align 3\n");
9677 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9678 func = (struct alpha_funcs *) node->value;
9680 fputs ("\t.name ", stream);
9681 assemble_name (stream, funname);
9682 fputs ("..na\n", stream);
9683 ASM_OUTPUT_LABEL (stream, funname);
9684 fprintf (stream, "\t.pdesc ");
9685 assemble_name (stream, funname);
9686 fprintf (stream, "..en,%s\n",
9687 alpha_procedure_type == PT_STACK ? "stack"
9688 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9692 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9693 /* splay_tree_delete (func->links); */
9697 /* Switch to an arbitrary section NAME with attributes as specified
9698 by FLAGS. ALIGN specifies any known alignment requirements for
9699 the section; 0 if the default should be used. */
9702 vms_asm_named_section (const char *name, unsigned int flags,
9703 tree decl ATTRIBUTE_UNUSED)
9705 fputc ('\n', asm_out_file);
9706 fprintf (asm_out_file, ".section\t%s", name);
9708 if (flags & SECTION_DEBUG)
9709 fprintf (asm_out_file, ",NOWRT");
9711 fputc ('\n', asm_out_file);
9714 /* Record an element in the table of global constructors. SYMBOL is
9715 a SYMBOL_REF of the function to be called; PRIORITY is a number
9716 between 0 and MAX_INIT_PRIORITY.
9718 Differs from default_ctors_section_asm_out_constructor in that the
9719 width of the .ctors entry is always 64 bits, rather than the 32 bits
9720 used by a normal pointer. */
9723 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9725 switch_to_section (ctors_section);
9726 assemble_align (BITS_PER_WORD);
9727 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9731 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9733 switch_to_section (dtors_section);
9734 assemble_align (BITS_PER_WORD);
9735 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9740 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9741 int is_local ATTRIBUTE_UNUSED)
9747 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
9748 tree cfundecl ATTRIBUTE_UNUSED,
9749 int lflag ATTRIBUTE_UNUSED,
9750 int rflag ATTRIBUTE_UNUSED)
9755 #endif /* TARGET_ABI_OPEN_VMS */
9758 alpha_init_libfuncs (void)
9760 if (TARGET_ABI_OPEN_VMS)
9762 /* Use the VMS runtime library functions for division and
9764 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9765 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9766 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9767 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9768 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9769 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9770 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9771 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9772 abort_libfunc = init_one_libfunc ("decc$abort");
9773 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
9774 #ifdef MEM_LIBFUNCS_INIT
9780 /* On the Alpha, we use this to disable the floating-point registers
9781 when they don't exist. */
9784 alpha_conditional_register_usage (void)
9787 if (! TARGET_FPREGS)
9788 for (i = 32; i < 63; i++)
9789 fixed_regs[i] = call_used_regs[i] = 1;
9792 /* Initialize the GCC target structure. */
9793 #if TARGET_ABI_OPEN_VMS
9794 # undef TARGET_ATTRIBUTE_TABLE
9795 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
9796 # undef TARGET_CAN_ELIMINATE
9797 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
9800 #undef TARGET_IN_SMALL_DATA_P
9801 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9803 #undef TARGET_ASM_ALIGNED_HI_OP
9804 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9805 #undef TARGET_ASM_ALIGNED_DI_OP
9806 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9808 /* Default unaligned ops are provided for ELF systems. To get unaligned
9809 data for non-ELF systems, we have to turn off auto alignment. */
9810 #if !defined (OBJECT_FORMAT_ELF) || TARGET_ABI_OPEN_VMS
9811 #undef TARGET_ASM_UNALIGNED_HI_OP
9812 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9813 #undef TARGET_ASM_UNALIGNED_SI_OP
9814 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9815 #undef TARGET_ASM_UNALIGNED_DI_OP
9816 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9819 #ifdef OBJECT_FORMAT_ELF
9820 #undef TARGET_ASM_RELOC_RW_MASK
9821 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
9822 #undef TARGET_ASM_SELECT_RTX_SECTION
9823 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
9824 #undef TARGET_SECTION_TYPE_FLAGS
9825 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
9828 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
9829 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9831 #undef TARGET_INIT_LIBFUNCS
9832 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9834 #undef TARGET_LEGITIMIZE_ADDRESS
9835 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
9837 #undef TARGET_ASM_FILE_START
9838 #define TARGET_ASM_FILE_START alpha_file_start
9839 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
9840 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
9842 #undef TARGET_SCHED_ADJUST_COST
9843 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9844 #undef TARGET_SCHED_ISSUE_RATE
9845 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
9846 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9847 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
9848 alpha_multipass_dfa_lookahead
9850 #undef TARGET_HAVE_TLS
9851 #define TARGET_HAVE_TLS HAVE_AS_TLS
9853 #undef TARGET_BUILTIN_DECL
9854 #define TARGET_BUILTIN_DECL alpha_builtin_decl
9855 #undef TARGET_INIT_BUILTINS
9856 #define TARGET_INIT_BUILTINS alpha_init_builtins
9857 #undef TARGET_EXPAND_BUILTIN
9858 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
9859 #undef TARGET_FOLD_BUILTIN
9860 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
9862 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
9863 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
9864 #undef TARGET_CANNOT_COPY_INSN_P
9865 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
9866 #undef TARGET_LEGITIMATE_CONSTANT_P
9867 #define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
9868 #undef TARGET_CANNOT_FORCE_CONST_MEM
9869 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
9872 #undef TARGET_ASM_OUTPUT_MI_THUNK
9873 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
9874 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
9875 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
9876 #undef TARGET_STDARG_OPTIMIZE_HOOK
9877 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
9880 #undef TARGET_RTX_COSTS
9881 #define TARGET_RTX_COSTS alpha_rtx_costs
9882 #undef TARGET_ADDRESS_COST
9883 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
9885 #undef TARGET_MACHINE_DEPENDENT_REORG
9886 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
9888 #undef TARGET_PROMOTE_FUNCTION_MODE
9889 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
9890 #undef TARGET_PROMOTE_PROTOTYPES
9891 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
9892 #undef TARGET_RETURN_IN_MEMORY
9893 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
9894 #undef TARGET_PASS_BY_REFERENCE
9895 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
9896 #undef TARGET_SETUP_INCOMING_VARARGS
9897 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
9898 #undef TARGET_STRICT_ARGUMENT_NAMING
9899 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
9900 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
9901 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
9902 #undef TARGET_SPLIT_COMPLEX_ARG
9903 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
9904 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
9905 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
9906 #undef TARGET_ARG_PARTIAL_BYTES
9907 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
9908 #undef TARGET_FUNCTION_ARG
9909 #define TARGET_FUNCTION_ARG alpha_function_arg
9910 #undef TARGET_FUNCTION_ARG_ADVANCE
9911 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
9912 #undef TARGET_TRAMPOLINE_INIT
9913 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
9915 #undef TARGET_SECONDARY_RELOAD
9916 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
9918 #undef TARGET_SCALAR_MODE_SUPPORTED_P
9919 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
9920 #undef TARGET_VECTOR_MODE_SUPPORTED_P
9921 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
9923 #undef TARGET_BUILD_BUILTIN_VA_LIST
9924 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
9926 #undef TARGET_EXPAND_BUILTIN_VA_START
9927 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
9929 /* The Alpha architecture does not require sequential consistency. See
9930 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
9931 for an example of how it can be violated in practice. */
9932 #undef TARGET_RELAXED_ORDERING
9933 #define TARGET_RELAXED_ORDERING true
9935 #undef TARGET_DEFAULT_TARGET_FLAGS
9936 #define TARGET_DEFAULT_TARGET_FLAGS \
9937 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
9938 #undef TARGET_HANDLE_OPTION
9939 #define TARGET_HANDLE_OPTION alpha_handle_option
9941 #undef TARGET_OPTION_OVERRIDE
9942 #define TARGET_OPTION_OVERRIDE alpha_option_override
9944 #undef TARGET_OPTION_OPTIMIZATION_TABLE
9945 #define TARGET_OPTION_OPTIMIZATION_TABLE alpha_option_optimization_table
9947 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9948 #undef TARGET_MANGLE_TYPE
9949 #define TARGET_MANGLE_TYPE alpha_mangle_type
9952 #undef TARGET_LEGITIMATE_ADDRESS_P
9953 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
9955 #undef TARGET_CONDITIONAL_REGISTER_USAGE
9956 #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
9958 struct gcc_target targetm = TARGET_INITIALIZER;
9961 #include "gt-alpha.h"