1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
47 #include "integrate.h"
50 #include "target-def.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58 #include "tm-constrs.h"
61 /* Specify which cpu to schedule for. */
62 enum processor_type alpha_tune;
64 /* Which cpu we're generating code for. */
65 enum processor_type alpha_cpu;
67 static const char * const alpha_cpu_name[] =
72 /* Specify how accurate floating-point traps need to be. */
74 enum alpha_trap_precision alpha_tp;
76 /* Specify the floating-point rounding mode. */
78 enum alpha_fp_rounding_mode alpha_fprm;
80 /* Specify which things cause traps. */
82 enum alpha_fp_trap_mode alpha_fptm;
84 /* Save information from a "cmpxx" operation until the branch or scc is
87 struct alpha_compare alpha_compare;
89 /* Nonzero if inside of a function, because the Alpha asm can't
90 handle .files inside of functions. */
92 static int inside_function = FALSE;
94 /* The number of cycles of latency we should assume on memory reads. */
96 int alpha_memory_latency = 3;
98 /* Whether the function needs the GP. */
100 static int alpha_function_needs_gp;
102 /* The alias set for prologue/epilogue register save/restore. */
104 static GTY(()) alias_set_type alpha_sr_alias_set;
106 /* The assembler name of the current function. */
108 static const char *alpha_fnname;
110 /* The next explicit relocation sequence number. */
111 extern GTY(()) int alpha_next_sequence_number;
112 int alpha_next_sequence_number = 1;
114 /* The literal and gpdisp sequence numbers for this insn, as printed
115 by %# and %* respectively. */
116 extern GTY(()) int alpha_this_literal_sequence_number;
117 extern GTY(()) int alpha_this_gpdisp_sequence_number;
118 int alpha_this_literal_sequence_number;
119 int alpha_this_gpdisp_sequence_number;
121 /* Costs of various operations on the different architectures. */
123 struct alpha_rtx_cost_data
125 unsigned char fp_add;
126 unsigned char fp_mult;
127 unsigned char fp_div_sf;
128 unsigned char fp_div_df;
129 unsigned char int_mult_si;
130 unsigned char int_mult_di;
131 unsigned char int_shift;
132 unsigned char int_cmov;
133 unsigned short int_div;
136 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
139 COSTS_N_INSNS (6), /* fp_add */
140 COSTS_N_INSNS (6), /* fp_mult */
141 COSTS_N_INSNS (34), /* fp_div_sf */
142 COSTS_N_INSNS (63), /* fp_div_df */
143 COSTS_N_INSNS (23), /* int_mult_si */
144 COSTS_N_INSNS (23), /* int_mult_di */
145 COSTS_N_INSNS (2), /* int_shift */
146 COSTS_N_INSNS (2), /* int_cmov */
147 COSTS_N_INSNS (97), /* int_div */
150 COSTS_N_INSNS (4), /* fp_add */
151 COSTS_N_INSNS (4), /* fp_mult */
152 COSTS_N_INSNS (15), /* fp_div_sf */
153 COSTS_N_INSNS (22), /* fp_div_df */
154 COSTS_N_INSNS (8), /* int_mult_si */
155 COSTS_N_INSNS (12), /* int_mult_di */
156 COSTS_N_INSNS (1) + 1, /* int_shift */
157 COSTS_N_INSNS (1), /* int_cmov */
158 COSTS_N_INSNS (83), /* int_div */
161 COSTS_N_INSNS (4), /* fp_add */
162 COSTS_N_INSNS (4), /* fp_mult */
163 COSTS_N_INSNS (12), /* fp_div_sf */
164 COSTS_N_INSNS (15), /* fp_div_df */
165 COSTS_N_INSNS (7), /* int_mult_si */
166 COSTS_N_INSNS (7), /* int_mult_di */
167 COSTS_N_INSNS (1), /* int_shift */
168 COSTS_N_INSNS (2), /* int_cmov */
169 COSTS_N_INSNS (86), /* int_div */
173 /* Similar but tuned for code size instead of execution latency. The
174 extra +N is fractional cost tuning based on latency. It's used to
175 encourage use of cheaper insns like shift, but only if there's just
178 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
180 COSTS_N_INSNS (1), /* fp_add */
181 COSTS_N_INSNS (1), /* fp_mult */
182 COSTS_N_INSNS (1), /* fp_div_sf */
183 COSTS_N_INSNS (1) + 1, /* fp_div_df */
184 COSTS_N_INSNS (1) + 1, /* int_mult_si */
185 COSTS_N_INSNS (1) + 2, /* int_mult_di */
186 COSTS_N_INSNS (1), /* int_shift */
187 COSTS_N_INSNS (1), /* int_cmov */
188 COSTS_N_INSNS (6), /* int_div */
191 /* Get the number of args of a function in one of two ways. */
192 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
193 #define NUM_ARGS crtl->args.info.num_args
195 #define NUM_ARGS crtl->args.info
201 /* Declarations of static functions. */
202 static struct machine_function *alpha_init_machine_status (void);
203 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
205 #if TARGET_ABI_OPEN_VMS
206 static void alpha_write_linkage (FILE *, const char *, tree);
209 static void unicosmk_output_deferred_case_vectors (FILE *);
210 static void unicosmk_gen_dsib (unsigned long *);
211 static void unicosmk_output_ssib (FILE *, const char *);
212 static int unicosmk_need_dex (rtx);
214 /* Implement TARGET_HANDLE_OPTION. */
217 alpha_handle_option (size_t code, const char *arg, int value)
223 target_flags |= MASK_SOFT_FP;
227 case OPT_mieee_with_inexact:
228 target_flags |= MASK_IEEE_CONFORMANT;
232 if (value != 16 && value != 32 && value != 64)
233 error ("bad value %qs for -mtls-size switch", arg);
240 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
241 /* Implement TARGET_MANGLE_TYPE. */
244 alpha_mangle_type (const_tree type)
246 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
247 && TARGET_LONG_DOUBLE_128)
250 /* For all other types, use normal C++ mangling. */
255 /* Parse target option strings. */
258 override_options (void)
260 static const struct cpu_table {
261 const char *const name;
262 const enum processor_type processor;
265 { "ev4", PROCESSOR_EV4, 0 },
266 { "ev45", PROCESSOR_EV4, 0 },
267 { "21064", PROCESSOR_EV4, 0 },
268 { "ev5", PROCESSOR_EV5, 0 },
269 { "21164", PROCESSOR_EV5, 0 },
270 { "ev56", PROCESSOR_EV5, MASK_BWX },
271 { "21164a", PROCESSOR_EV5, MASK_BWX },
272 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
274 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
275 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
276 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
277 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
278 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX }
281 int const ct_size = ARRAY_SIZE (cpu_table);
284 /* Unicos/Mk doesn't have shared libraries. */
285 if (TARGET_ABI_UNICOSMK && flag_pic)
287 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
288 (flag_pic > 1) ? "PIC" : "pic");
292 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
293 floating-point instructions. Make that the default for this target. */
294 if (TARGET_ABI_UNICOSMK)
295 alpha_fprm = ALPHA_FPRM_DYN;
297 alpha_fprm = ALPHA_FPRM_NORM;
299 alpha_tp = ALPHA_TP_PROG;
300 alpha_fptm = ALPHA_FPTM_N;
302 /* We cannot use su and sui qualifiers for conversion instructions on
303 Unicos/Mk. I'm not sure if this is due to assembler or hardware
304 limitations. Right now, we issue a warning if -mieee is specified
305 and then ignore it; eventually, we should either get it right or
306 disable the option altogether. */
310 if (TARGET_ABI_UNICOSMK)
311 warning (0, "-mieee not supported on Unicos/Mk");
314 alpha_tp = ALPHA_TP_INSN;
315 alpha_fptm = ALPHA_FPTM_SU;
319 if (TARGET_IEEE_WITH_INEXACT)
321 if (TARGET_ABI_UNICOSMK)
322 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
325 alpha_tp = ALPHA_TP_INSN;
326 alpha_fptm = ALPHA_FPTM_SUI;
332 if (! strcmp (alpha_tp_string, "p"))
333 alpha_tp = ALPHA_TP_PROG;
334 else if (! strcmp (alpha_tp_string, "f"))
335 alpha_tp = ALPHA_TP_FUNC;
336 else if (! strcmp (alpha_tp_string, "i"))
337 alpha_tp = ALPHA_TP_INSN;
339 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
342 if (alpha_fprm_string)
344 if (! strcmp (alpha_fprm_string, "n"))
345 alpha_fprm = ALPHA_FPRM_NORM;
346 else if (! strcmp (alpha_fprm_string, "m"))
347 alpha_fprm = ALPHA_FPRM_MINF;
348 else if (! strcmp (alpha_fprm_string, "c"))
349 alpha_fprm = ALPHA_FPRM_CHOP;
350 else if (! strcmp (alpha_fprm_string,"d"))
351 alpha_fprm = ALPHA_FPRM_DYN;
353 error ("bad value %qs for -mfp-rounding-mode switch",
357 if (alpha_fptm_string)
359 if (strcmp (alpha_fptm_string, "n") == 0)
360 alpha_fptm = ALPHA_FPTM_N;
361 else if (strcmp (alpha_fptm_string, "u") == 0)
362 alpha_fptm = ALPHA_FPTM_U;
363 else if (strcmp (alpha_fptm_string, "su") == 0)
364 alpha_fptm = ALPHA_FPTM_SU;
365 else if (strcmp (alpha_fptm_string, "sui") == 0)
366 alpha_fptm = ALPHA_FPTM_SUI;
368 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
371 if (alpha_cpu_string)
373 for (i = 0; i < ct_size; i++)
374 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
376 alpha_tune = alpha_cpu = cpu_table [i].processor;
377 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
378 target_flags |= cpu_table [i].flags;
382 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
385 if (alpha_tune_string)
387 for (i = 0; i < ct_size; i++)
388 if (! strcmp (alpha_tune_string, cpu_table [i].name))
390 alpha_tune = cpu_table [i].processor;
394 error ("bad value %qs for -mcpu switch", alpha_tune_string);
397 /* Do some sanity checks on the above options. */
399 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
401 warning (0, "trap mode not supported on Unicos/Mk");
402 alpha_fptm = ALPHA_FPTM_N;
405 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
406 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
408 warning (0, "fp software completion requires -mtrap-precision=i");
409 alpha_tp = ALPHA_TP_INSN;
412 if (alpha_cpu == PROCESSOR_EV6)
414 /* Except for EV6 pass 1 (not released), we always have precise
415 arithmetic traps. Which means we can do software completion
416 without minding trap shadows. */
417 alpha_tp = ALPHA_TP_PROG;
420 if (TARGET_FLOAT_VAX)
422 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
424 warning (0, "rounding mode not supported for VAX floats");
425 alpha_fprm = ALPHA_FPRM_NORM;
427 if (alpha_fptm == ALPHA_FPTM_SUI)
429 warning (0, "trap mode not supported for VAX floats");
430 alpha_fptm = ALPHA_FPTM_SU;
432 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
433 warning (0, "128-bit long double not supported for VAX floats");
434 target_flags &= ~MASK_LONG_DOUBLE_128;
441 if (!alpha_mlat_string)
442 alpha_mlat_string = "L1";
444 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
445 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
447 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
448 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
449 && alpha_mlat_string[2] == '\0')
451 static int const cache_latency[][4] =
453 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
454 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
455 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
458 lat = alpha_mlat_string[1] - '0';
459 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
461 warning (0, "L%d cache latency unknown for %s",
462 lat, alpha_cpu_name[alpha_tune]);
466 lat = cache_latency[alpha_tune][lat-1];
468 else if (! strcmp (alpha_mlat_string, "main"))
470 /* Most current memories have about 370ns latency. This is
471 a reasonable guess for a fast cpu. */
476 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
480 alpha_memory_latency = lat;
483 /* Default the definition of "small data" to 8 bytes. */
487 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
489 target_flags |= MASK_SMALL_DATA;
490 else if (flag_pic == 2)
491 target_flags &= ~MASK_SMALL_DATA;
493 /* Align labels and loops for optimal branching. */
494 /* ??? Kludge these by not doing anything if we don't optimize and also if
495 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
496 if (optimize > 0 && write_symbols != SDB_DEBUG)
498 if (align_loops <= 0)
500 if (align_jumps <= 0)
503 if (align_functions <= 0)
504 align_functions = 16;
506 /* Acquire a unique set number for our register saves and restores. */
507 alpha_sr_alias_set = new_alias_set ();
509 /* Register variables and functions with the garbage collector. */
511 /* Set up function hooks. */
512 init_machine_status = alpha_init_machine_status;
514 /* Tell the compiler when we're using VAX floating point. */
515 if (TARGET_FLOAT_VAX)
517 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
518 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
519 REAL_MODE_FORMAT (TFmode) = NULL;
522 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
523 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
524 target_flags |= MASK_LONG_DOUBLE_128;
527 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
528 can be optimized to ap = __builtin_next_arg (0). */
529 if (TARGET_ABI_UNICOSMK)
530 targetm.expand_builtin_va_start = NULL;
533 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
536 zap_mask (HOST_WIDE_INT value)
540 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
542 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
548 /* Return true if OP is valid for a particular TLS relocation.
549 We are already guaranteed that OP is a CONST. */
552 tls_symbolic_operand_1 (rtx op, int size, int unspec)
556 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
558 op = XVECEXP (op, 0, 0);
560 if (GET_CODE (op) != SYMBOL_REF)
563 switch (SYMBOL_REF_TLS_MODEL (op))
565 case TLS_MODEL_LOCAL_DYNAMIC:
566 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
567 case TLS_MODEL_INITIAL_EXEC:
568 return unspec == UNSPEC_TPREL && size == 64;
569 case TLS_MODEL_LOCAL_EXEC:
570 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
576 /* Used by aligned_memory_operand and unaligned_memory_operand to
577 resolve what reload is going to do with OP if it's a register. */
580 resolve_reload_operand (rtx op)
582 if (reload_in_progress)
585 if (GET_CODE (tmp) == SUBREG)
586 tmp = SUBREG_REG (tmp);
588 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
590 op = reg_equiv_memory_loc[REGNO (tmp)];
598 /* The scalar modes supported differs from the default check-what-c-supports
599 version in that sometimes TFmode is available even when long double
600 indicates only DFmode. On unicosmk, we have the situation that HImode
601 doesn't map to any C type, but of course we still support that. */
604 alpha_scalar_mode_supported_p (enum machine_mode mode)
612 case TImode: /* via optabs.c */
620 return TARGET_HAS_XFLOATING_LIBS;
627 /* Alpha implements a couple of integer vector mode operations when
628 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
629 which allows the vectorizer to operate on e.g. move instructions,
630 or when expand_vector_operations can do something useful. */
633 alpha_vector_mode_supported_p (enum machine_mode mode)
635 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
638 /* Return 1 if this function can directly return via $26. */
643 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
645 && alpha_sa_size () == 0
646 && get_frame_size () == 0
647 && crtl->outgoing_args_size == 0
648 && crtl->args.pretend_args_size == 0);
651 /* Return the ADDR_VEC associated with a tablejump insn. */
654 alpha_tablejump_addr_vec (rtx insn)
658 tmp = JUMP_LABEL (insn);
661 tmp = NEXT_INSN (tmp);
665 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
666 return PATTERN (tmp);
670 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
673 alpha_tablejump_best_label (rtx insn)
675 rtx jump_table = alpha_tablejump_addr_vec (insn);
676 rtx best_label = NULL_RTX;
678 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
679 there for edge frequency counts from profile data. */
683 int n_labels = XVECLEN (jump_table, 1);
687 for (i = 0; i < n_labels; i++)
691 for (j = i + 1; j < n_labels; j++)
692 if (XEXP (XVECEXP (jump_table, 1, i), 0)
693 == XEXP (XVECEXP (jump_table, 1, j), 0))
696 if (count > best_count)
697 best_count = count, best_label = XVECEXP (jump_table, 1, i);
701 return best_label ? best_label : const0_rtx;
704 /* Return the TLS model to use for SYMBOL. */
706 static enum tls_model
707 tls_symbolic_operand_type (rtx symbol)
709 enum tls_model model;
711 if (GET_CODE (symbol) != SYMBOL_REF)
712 return TLS_MODEL_NONE;
713 model = SYMBOL_REF_TLS_MODEL (symbol);
715 /* Local-exec with a 64-bit size is the same code as initial-exec. */
716 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
717 model = TLS_MODEL_INITIAL_EXEC;
722 /* Return true if the function DECL will share the same GP as any
723 function in the current unit of translation. */
726 decl_has_samegp (const_tree decl)
728 /* Functions that are not local can be overridden, and thus may
729 not share the same gp. */
730 if (!(*targetm.binds_local_p) (decl))
733 /* If -msmall-data is in effect, assume that there is only one GP
734 for the module, and so any local symbol has this property. We
735 need explicit relocations to be able to enforce this for symbols
736 not defined in this unit of translation, however. */
737 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
740 /* Functions that are not external are defined in this UoT. */
741 /* ??? Irritatingly, static functions not yet emitted are still
742 marked "external". Apply this to non-static functions only. */
743 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
746 /* Return true if EXP should be placed in the small data section. */
749 alpha_in_small_data_p (const_tree exp)
751 /* We want to merge strings, so we never consider them small data. */
752 if (TREE_CODE (exp) == STRING_CST)
755 /* Functions are never in the small data area. Duh. */
756 if (TREE_CODE (exp) == FUNCTION_DECL)
759 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
761 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
762 if (strcmp (section, ".sdata") == 0
763 || strcmp (section, ".sbss") == 0)
768 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
770 /* If this is an incomplete type with size 0, then we can't put it
771 in sdata because it might be too big when completed. */
772 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
779 #if TARGET_ABI_OPEN_VMS
781 alpha_linkage_symbol_p (const char *symname)
783 int symlen = strlen (symname);
786 return strcmp (&symname [symlen - 4], "..lk") == 0;
791 #define LINKAGE_SYMBOL_REF_P(X) \
792 ((GET_CODE (X) == SYMBOL_REF \
793 && alpha_linkage_symbol_p (XSTR (X, 0))) \
794 || (GET_CODE (X) == CONST \
795 && GET_CODE (XEXP (X, 0)) == PLUS \
796 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
797 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
800 /* legitimate_address_p recognizes an RTL expression that is a valid
801 memory address for an instruction. The MODE argument is the
802 machine mode for the MEM expression that wants to use this address.
804 For Alpha, we have either a constant address or the sum of a
805 register and a constant address, or just a register. For DImode,
806 any of those forms can be surrounded with an AND that clear the
807 low-order three bits; this is an "unaligned" access. */
810 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
812 /* If this is an ldq_u type address, discard the outer AND. */
814 && GET_CODE (x) == AND
815 && CONST_INT_P (XEXP (x, 1))
816 && INTVAL (XEXP (x, 1)) == -8)
819 /* Discard non-paradoxical subregs. */
820 if (GET_CODE (x) == SUBREG
821 && (GET_MODE_SIZE (GET_MODE (x))
822 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
825 /* Unadorned general registers are valid. */
828 ? STRICT_REG_OK_FOR_BASE_P (x)
829 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
832 /* Constant addresses (i.e. +/- 32k) are valid. */
833 if (CONSTANT_ADDRESS_P (x))
836 #if TARGET_ABI_OPEN_VMS
837 if (LINKAGE_SYMBOL_REF_P (x))
841 /* Register plus a small constant offset is valid. */
842 if (GET_CODE (x) == PLUS)
844 rtx ofs = XEXP (x, 1);
847 /* Discard non-paradoxical subregs. */
848 if (GET_CODE (x) == SUBREG
849 && (GET_MODE_SIZE (GET_MODE (x))
850 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
856 && NONSTRICT_REG_OK_FP_BASE_P (x)
857 && CONST_INT_P (ofs))
860 ? STRICT_REG_OK_FOR_BASE_P (x)
861 : NONSTRICT_REG_OK_FOR_BASE_P (x))
862 && CONSTANT_ADDRESS_P (ofs))
867 /* If we're managing explicit relocations, LO_SUM is valid, as are small
868 data symbols. Avoid explicit relocations of modes larger than word
869 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
870 else if (TARGET_EXPLICIT_RELOCS
871 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
873 if (small_symbolic_operand (x, Pmode))
876 if (GET_CODE (x) == LO_SUM)
878 rtx ofs = XEXP (x, 1);
881 /* Discard non-paradoxical subregs. */
882 if (GET_CODE (x) == SUBREG
883 && (GET_MODE_SIZE (GET_MODE (x))
884 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
887 /* Must have a valid base register. */
890 ? STRICT_REG_OK_FOR_BASE_P (x)
891 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
894 /* The symbol must be local. */
895 if (local_symbolic_operand (ofs, Pmode)
896 || dtp32_symbolic_operand (ofs, Pmode)
897 || tp32_symbolic_operand (ofs, Pmode))
905 /* Build the SYMBOL_REF for __tls_get_addr. */
907 static GTY(()) rtx tls_get_addr_libfunc;
910 get_tls_get_addr (void)
912 if (!tls_get_addr_libfunc)
913 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
914 return tls_get_addr_libfunc;
917 /* Try machine-dependent ways of modifying an illegitimate address
918 to be legitimate. If we find one, return the new, valid address. */
921 alpha_legitimize_address (rtx x, rtx scratch, enum machine_mode mode)
923 HOST_WIDE_INT addend;
925 /* If the address is (plus reg const_int) and the CONST_INT is not a
926 valid offset, compute the high part of the constant and add it to
927 the register. Then our address is (plus temp low-part-const). */
928 if (GET_CODE (x) == PLUS
929 && REG_P (XEXP (x, 0))
930 && CONST_INT_P (XEXP (x, 1))
931 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
933 addend = INTVAL (XEXP (x, 1));
938 /* If the address is (const (plus FOO const_int)), find the low-order
939 part of the CONST_INT. Then load FOO plus any high-order part of the
940 CONST_INT into a register. Our address is (plus reg low-part-const).
941 This is done to reduce the number of GOT entries. */
942 if (can_create_pseudo_p ()
943 && GET_CODE (x) == CONST
944 && GET_CODE (XEXP (x, 0)) == PLUS
945 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
947 addend = INTVAL (XEXP (XEXP (x, 0), 1));
948 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
952 /* If we have a (plus reg const), emit the load as in (2), then add
953 the two registers, and finally generate (plus reg low-part-const) as
955 if (can_create_pseudo_p ()
956 && GET_CODE (x) == PLUS
957 && REG_P (XEXP (x, 0))
958 && GET_CODE (XEXP (x, 1)) == CONST
959 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
960 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
962 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
963 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
964 XEXP (XEXP (XEXP (x, 1), 0), 0),
965 NULL_RTX, 1, OPTAB_LIB_WIDEN);
969 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
970 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
971 around +/- 32k offset. */
972 if (TARGET_EXPLICIT_RELOCS
973 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
974 && symbolic_operand (x, Pmode))
976 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
978 switch (tls_symbolic_operand_type (x))
983 case TLS_MODEL_GLOBAL_DYNAMIC:
986 r0 = gen_rtx_REG (Pmode, 0);
987 r16 = gen_rtx_REG (Pmode, 16);
988 tga = get_tls_get_addr ();
989 dest = gen_reg_rtx (Pmode);
990 seq = GEN_INT (alpha_next_sequence_number++);
992 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
993 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
994 insn = emit_call_insn (insn);
995 RTL_CONST_CALL_P (insn) = 1;
996 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1001 emit_libcall_block (insn, dest, r0, x);
1004 case TLS_MODEL_LOCAL_DYNAMIC:
1007 r0 = gen_rtx_REG (Pmode, 0);
1008 r16 = gen_rtx_REG (Pmode, 16);
1009 tga = get_tls_get_addr ();
1010 scratch = gen_reg_rtx (Pmode);
1011 seq = GEN_INT (alpha_next_sequence_number++);
1013 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1014 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1015 insn = emit_call_insn (insn);
1016 RTL_CONST_CALL_P (insn) = 1;
1017 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1019 insn = get_insns ();
1022 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1023 UNSPEC_TLSLDM_CALL);
1024 emit_libcall_block (insn, scratch, r0, eqv);
1026 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1027 eqv = gen_rtx_CONST (Pmode, eqv);
1029 if (alpha_tls_size == 64)
1031 dest = gen_reg_rtx (Pmode);
1032 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1033 emit_insn (gen_adddi3 (dest, dest, scratch));
1036 if (alpha_tls_size == 32)
1038 insn = gen_rtx_HIGH (Pmode, eqv);
1039 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1040 scratch = gen_reg_rtx (Pmode);
1041 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1043 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1045 case TLS_MODEL_INITIAL_EXEC:
1046 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1047 eqv = gen_rtx_CONST (Pmode, eqv);
1048 tp = gen_reg_rtx (Pmode);
1049 scratch = gen_reg_rtx (Pmode);
1050 dest = gen_reg_rtx (Pmode);
1052 emit_insn (gen_load_tp (tp));
1053 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1054 emit_insn (gen_adddi3 (dest, tp, scratch));
1057 case TLS_MODEL_LOCAL_EXEC:
1058 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1059 eqv = gen_rtx_CONST (Pmode, eqv);
1060 tp = gen_reg_rtx (Pmode);
1062 emit_insn (gen_load_tp (tp));
1063 if (alpha_tls_size == 32)
1065 insn = gen_rtx_HIGH (Pmode, eqv);
1066 insn = gen_rtx_PLUS (Pmode, tp, insn);
1067 tp = gen_reg_rtx (Pmode);
1068 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1070 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1076 if (local_symbolic_operand (x, Pmode))
1078 if (small_symbolic_operand (x, Pmode))
1082 if (can_create_pseudo_p ())
1083 scratch = gen_reg_rtx (Pmode);
1084 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1085 gen_rtx_HIGH (Pmode, x)));
1086 return gen_rtx_LO_SUM (Pmode, scratch, x);
1095 HOST_WIDE_INT low, high;
1097 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1099 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1103 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1104 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1105 1, OPTAB_LIB_WIDEN);
1107 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1108 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1109 1, OPTAB_LIB_WIDEN);
1111 return plus_constant (x, low);
1115 /* Primarily this is required for TLS symbols, but given that our move
1116 patterns *ought* to be able to handle any symbol at any time, we
1117 should never be spilling symbolic operands to the constant pool, ever. */
1120 alpha_cannot_force_const_mem (rtx x)
1122 enum rtx_code code = GET_CODE (x);
1123 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1126 /* We do not allow indirect calls to be optimized into sibling calls, nor
1127 can we allow a call to a function with a different GP to be optimized
1131 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1133 /* Can't do indirect tail calls, since we don't know if the target
1134 uses the same GP. */
1138 /* Otherwise, we can make a tail call if the target function shares
1140 return decl_has_samegp (decl);
1144 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1148 /* Don't re-split. */
1149 if (GET_CODE (x) == LO_SUM)
1152 return small_symbolic_operand (x, Pmode) != 0;
1156 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1160 /* Don't re-split. */
1161 if (GET_CODE (x) == LO_SUM)
1164 if (small_symbolic_operand (x, Pmode))
1166 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1175 split_small_symbolic_operand (rtx x)
1178 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1182 /* Indicate that INSN cannot be duplicated. This is true for any insn
1183 that we've marked with gpdisp relocs, since those have to stay in
1184 1-1 correspondence with one another.
1186 Technically we could copy them if we could set up a mapping from one
1187 sequence number to another, across the set of insns to be duplicated.
1188 This seems overly complicated and error-prone since interblock motion
1189 from sched-ebb could move one of the pair of insns to a different block.
1191 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1192 then they'll be in a different block from their ldgp. Which could lead
1193 the bb reorder code to think that it would be ok to copy just the block
1194 containing the call and branch to the block containing the ldgp. */
1197 alpha_cannot_copy_insn_p (rtx insn)
1199 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1201 if (recog_memoized (insn) >= 0)
1202 return get_attr_cannot_copy (insn);
1208 /* Try a machine-dependent way of reloading an illegitimate address
1209 operand. If we find one, push the reload and return the new rtx. */
1212 alpha_legitimize_reload_address (rtx x,
1213 enum machine_mode mode ATTRIBUTE_UNUSED,
1214 int opnum, int type,
1215 int ind_levels ATTRIBUTE_UNUSED)
1217 /* We must recognize output that we have already generated ourselves. */
1218 if (GET_CODE (x) == PLUS
1219 && GET_CODE (XEXP (x, 0)) == PLUS
1220 && REG_P (XEXP (XEXP (x, 0), 0))
1221 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1222 && CONST_INT_P (XEXP (x, 1)))
1224 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1225 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1226 opnum, (enum reload_type) type);
1230 /* We wish to handle large displacements off a base register by
1231 splitting the addend across an ldah and the mem insn. This
1232 cuts number of extra insns needed from 3 to 1. */
1233 if (GET_CODE (x) == PLUS
1234 && REG_P (XEXP (x, 0))
1235 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1236 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1237 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1239 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1240 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1242 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1244 /* Check for 32-bit overflow. */
1245 if (high + low != val)
1248 /* Reload the high part into a base reg; leave the low part
1249 in the mem directly. */
1250 x = gen_rtx_PLUS (GET_MODE (x),
1251 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1255 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1256 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1257 opnum, (enum reload_type) type);
1264 /* Compute a (partial) cost for rtx X. Return true if the complete
1265 cost has been computed, and false if subexpressions should be
1266 scanned. In either case, *TOTAL contains the cost result. */
1269 alpha_rtx_costs (rtx x, int code, int outer_code, int *total,
1272 enum machine_mode mode = GET_MODE (x);
1273 bool float_mode_p = FLOAT_MODE_P (mode);
1274 const struct alpha_rtx_cost_data *cost_data;
1277 cost_data = &alpha_rtx_cost_size;
1279 cost_data = &alpha_rtx_cost_data[alpha_tune];
1284 /* If this is an 8-bit constant, return zero since it can be used
1285 nearly anywhere with no cost. If it is a valid operand for an
1286 ADD or AND, likewise return 0 if we know it will be used in that
1287 context. Otherwise, return 2 since it might be used there later.
1288 All other constants take at least two insns. */
1289 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1297 if (x == CONST0_RTX (mode))
1299 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1300 || (outer_code == AND && and_operand (x, VOIDmode)))
1302 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1305 *total = COSTS_N_INSNS (2);
1311 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1312 *total = COSTS_N_INSNS (outer_code != MEM);
1313 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1314 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1315 else if (tls_symbolic_operand_type (x))
1316 /* Estimate of cost for call_pal rduniq. */
1317 /* ??? How many insns do we emit here? More than one... */
1318 *total = COSTS_N_INSNS (15);
1320 /* Otherwise we do a load from the GOT. */
1321 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1325 /* This is effectively an add_operand. */
1332 *total = cost_data->fp_add;
1333 else if (GET_CODE (XEXP (x, 0)) == MULT
1334 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1336 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1337 (enum rtx_code) outer_code, speed)
1338 + rtx_cost (XEXP (x, 1),
1339 (enum rtx_code) outer_code, speed)
1340 + COSTS_N_INSNS (1));
1347 *total = cost_data->fp_mult;
1348 else if (mode == DImode)
1349 *total = cost_data->int_mult_di;
1351 *total = cost_data->int_mult_si;
1355 if (CONST_INT_P (XEXP (x, 1))
1356 && INTVAL (XEXP (x, 1)) <= 3)
1358 *total = COSTS_N_INSNS (1);
1365 *total = cost_data->int_shift;
1370 *total = cost_data->fp_add;
1372 *total = cost_data->int_cmov;
1380 *total = cost_data->int_div;
1381 else if (mode == SFmode)
1382 *total = cost_data->fp_div_sf;
1384 *total = cost_data->fp_div_df;
1388 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1394 *total = COSTS_N_INSNS (1);
1402 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1408 case UNSIGNED_FLOAT:
1411 case FLOAT_TRUNCATE:
1412 *total = cost_data->fp_add;
1416 if (MEM_P (XEXP (x, 0)))
1419 *total = cost_data->fp_add;
1427 /* REF is an alignable memory location. Place an aligned SImode
1428 reference into *PALIGNED_MEM and the number of bits to shift into
1429 *PBITNUM. SCRATCH is a free register for use in reloading out
1430 of range stack slots. */
1433 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1436 HOST_WIDE_INT disp, offset;
1438 gcc_assert (MEM_P (ref));
1440 if (reload_in_progress
1441 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1443 base = find_replacement (&XEXP (ref, 0));
1444 gcc_assert (memory_address_p (GET_MODE (ref), base));
1447 base = XEXP (ref, 0);
1449 if (GET_CODE (base) == PLUS)
1450 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1454 /* Find the byte offset within an aligned word. If the memory itself is
1455 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1456 will have examined the base register and determined it is aligned, and
1457 thus displacements from it are naturally alignable. */
1458 if (MEM_ALIGN (ref) >= 32)
1463 /* Access the entire aligned word. */
1464 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1466 /* Convert the byte offset within the word to a bit offset. */
1467 if (WORDS_BIG_ENDIAN)
1468 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1471 *pbitnum = GEN_INT (offset);
1474 /* Similar, but just get the address. Handle the two reload cases.
1475 Add EXTRA_OFFSET to the address we return. */
1478 get_unaligned_address (rtx ref)
1481 HOST_WIDE_INT offset = 0;
1483 gcc_assert (MEM_P (ref));
1485 if (reload_in_progress
1486 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1488 base = find_replacement (&XEXP (ref, 0));
1490 gcc_assert (memory_address_p (GET_MODE (ref), base));
1493 base = XEXP (ref, 0);
1495 if (GET_CODE (base) == PLUS)
1496 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1498 return plus_constant (base, offset);
1501 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1502 X is always returned in a register. */
1505 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1507 if (GET_CODE (addr) == PLUS)
1509 ofs += INTVAL (XEXP (addr, 1));
1510 addr = XEXP (addr, 0);
1513 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1514 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1517 /* On the Alpha, all (non-symbolic) constants except zero go into
1518 a floating-point register via memory. Note that we cannot
1519 return anything that is not a subset of RCLASS, and that some
1520 symbolic constants cannot be dropped to memory. */
1523 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1525 /* Zero is present in any register class. */
1526 if (x == CONST0_RTX (GET_MODE (x)))
1529 /* These sorts of constants we can easily drop to memory. */
1531 || GET_CODE (x) == CONST_DOUBLE
1532 || GET_CODE (x) == CONST_VECTOR)
1534 if (rclass == FLOAT_REGS)
1536 if (rclass == ALL_REGS)
1537 return GENERAL_REGS;
1541 /* All other kinds of constants should not (and in the case of HIGH
1542 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1543 secondary reload. */
1545 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1550 /* Inform reload about cases where moving X with a mode MODE to a register in
1551 RCLASS requires an extra scratch or immediate register. Return the class
1552 needed for the immediate register. */
1554 static enum reg_class
1555 alpha_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
1556 enum machine_mode mode, secondary_reload_info *sri)
1558 /* Loading and storing HImode or QImode values to and from memory
1559 usually requires a scratch register. */
1560 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1562 if (any_memory_operand (x, mode))
1566 if (!aligned_memory_operand (x, mode))
1567 sri->icode = reload_in_optab[mode];
1570 sri->icode = reload_out_optab[mode];
1575 /* We also cannot do integral arithmetic into FP regs, as might result
1576 from register elimination into a DImode fp register. */
1577 if (rclass == FLOAT_REGS)
1579 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1580 return GENERAL_REGS;
1581 if (in_p && INTEGRAL_MODE_P (mode)
1582 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1583 return GENERAL_REGS;
1589 /* Subfunction of the following function. Update the flags of any MEM
1590 found in part of X. */
1593 alpha_set_memflags_1 (rtx *xp, void *data)
1595 rtx x = *xp, orig = (rtx) data;
1600 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1601 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1602 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1603 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1604 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1606 /* Sadly, we cannot use alias sets because the extra aliasing
1607 produced by the AND interferes. Given that two-byte quantities
1608 are the only thing we would be able to differentiate anyway,
1609 there does not seem to be any point in convoluting the early
1610 out of the alias check. */
1615 /* Given SEQ, which is an INSN list, look for any MEMs in either
1616 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1617 volatile flags from REF into each of the MEMs found. If REF is not
1618 a MEM, don't do anything. */
1621 alpha_set_memflags (rtx seq, rtx ref)
1628 /* This is only called from alpha.md, after having had something
1629 generated from one of the insn patterns. So if everything is
1630 zero, the pattern is already up-to-date. */
1631 if (!MEM_VOLATILE_P (ref)
1632 && !MEM_IN_STRUCT_P (ref)
1633 && !MEM_SCALAR_P (ref)
1634 && !MEM_NOTRAP_P (ref)
1635 && !MEM_READONLY_P (ref))
1638 for (insn = seq; insn; insn = NEXT_INSN (insn))
1640 for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1645 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1648 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1649 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1650 and return pc_rtx if successful. */
1653 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1654 HOST_WIDE_INT c, int n, bool no_output)
1656 HOST_WIDE_INT new_const;
1658 /* Use a pseudo if highly optimizing and still generating RTL. */
1660 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1663 /* If this is a sign-extended 32-bit constant, we can do this in at most
1664 three insns, so do it if we have enough insns left. We always have
1665 a sign-extended 32-bit constant when compiling on a narrow machine. */
1667 if (HOST_BITS_PER_WIDE_INT != 64
1668 || c >> 31 == -1 || c >> 31 == 0)
1670 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1671 HOST_WIDE_INT tmp1 = c - low;
1672 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1673 HOST_WIDE_INT extra = 0;
1675 /* If HIGH will be interpreted as negative but the constant is
1676 positive, we must adjust it to do two ldha insns. */
1678 if ((high & 0x8000) != 0 && c >= 0)
1682 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1685 if (c == low || (low == 0 && extra == 0))
1687 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1688 but that meant that we can't handle INT_MIN on 32-bit machines
1689 (like NT/Alpha), because we recurse indefinitely through
1690 emit_move_insn to gen_movdi. So instead, since we know exactly
1691 what we want, create it explicitly. */
1696 target = gen_reg_rtx (mode);
1697 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1700 else if (n >= 2 + (extra != 0))
1704 if (!can_create_pseudo_p ())
1706 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1710 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1713 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1714 This means that if we go through expand_binop, we'll try to
1715 generate extensions, etc, which will require new pseudos, which
1716 will fail during some split phases. The SImode add patterns
1717 still exist, but are not named. So build the insns by hand. */
1722 subtarget = gen_reg_rtx (mode);
1723 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1724 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1730 target = gen_reg_rtx (mode);
1731 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1732 insn = gen_rtx_SET (VOIDmode, target, insn);
1738 /* If we couldn't do it that way, try some other methods. But if we have
1739 no instructions left, don't bother. Likewise, if this is SImode and
1740 we can't make pseudos, we can't do anything since the expand_binop
1741 and expand_unop calls will widen and try to make pseudos. */
1743 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1746 /* Next, see if we can load a related constant and then shift and possibly
1747 negate it to get the constant we want. Try this once each increasing
1748 numbers of insns. */
1750 for (i = 1; i < n; i++)
1752 /* First, see if minus some low bits, we've an easy load of
1755 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1758 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1763 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1764 target, 0, OPTAB_WIDEN);
1768 /* Next try complementing. */
1769 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1774 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1777 /* Next try to form a constant and do a left shift. We can do this
1778 if some low-order bits are zero; the exact_log2 call below tells
1779 us that information. The bits we are shifting out could be any
1780 value, but here we'll just try the 0- and sign-extended forms of
1781 the constant. To try to increase the chance of having the same
1782 constant in more than one insn, start at the highest number of
1783 bits to shift, but try all possibilities in case a ZAPNOT will
1786 bits = exact_log2 (c & -c);
1788 for (; bits > 0; bits--)
1790 new_const = c >> bits;
1791 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1794 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1795 temp = alpha_emit_set_const (subtarget, mode, new_const,
1802 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1803 target, 0, OPTAB_WIDEN);
1807 /* Now try high-order zero bits. Here we try the shifted-in bits as
1808 all zero and all ones. Be careful to avoid shifting outside the
1809 mode and to avoid shifting outside the host wide int size. */
1810 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1811 confuse the recursive call and set all of the high 32 bits. */
1813 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1814 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1816 for (; bits > 0; bits--)
1818 new_const = c << bits;
1819 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1822 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1823 temp = alpha_emit_set_const (subtarget, mode, new_const,
1830 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1831 target, 1, OPTAB_WIDEN);
1835 /* Now try high-order 1 bits. We get that with a sign-extension.
1836 But one bit isn't enough here. Be careful to avoid shifting outside
1837 the mode and to avoid shifting outside the host wide int size. */
1839 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1840 - floor_log2 (~ c) - 2);
1842 for (; bits > 0; bits--)
1844 new_const = c << bits;
1845 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1848 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1849 temp = alpha_emit_set_const (subtarget, mode, new_const,
1856 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1857 target, 0, OPTAB_WIDEN);
1862 #if HOST_BITS_PER_WIDE_INT == 64
1863 /* Finally, see if can load a value into the target that is the same as the
1864 constant except that all bytes that are 0 are changed to be 0xff. If we
1865 can, then we can do a ZAPNOT to obtain the desired constant. */
1868 for (i = 0; i < 64; i += 8)
1869 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1870 new_const |= (HOST_WIDE_INT) 0xff << i;
1872 /* We are only called for SImode and DImode. If this is SImode, ensure that
1873 we are sign extended to a full word. */
1876 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1880 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1885 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1886 target, 0, OPTAB_WIDEN);
1894 /* Try to output insns to set TARGET equal to the constant C if it can be
1895 done in less than N insns. Do all computations in MODE. Returns the place
1896 where the output has been placed if it can be done and the insns have been
1897 emitted. If it would take more than N insns, zero is returned and no
1898 insns and emitted. */
1901 alpha_emit_set_const (rtx target, enum machine_mode mode,
1902 HOST_WIDE_INT c, int n, bool no_output)
1904 enum machine_mode orig_mode = mode;
1905 rtx orig_target = target;
1909 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1910 can't load this constant in one insn, do this in DImode. */
1911 if (!can_create_pseudo_p () && mode == SImode
1912 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1914 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1918 target = no_output ? NULL : gen_lowpart (DImode, target);
1921 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1923 target = no_output ? NULL : gen_lowpart (DImode, target);
1927 /* Try 1 insn, then 2, then up to N. */
1928 for (i = 1; i <= n; i++)
1930 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1938 insn = get_last_insn ();
1939 set = single_set (insn);
1940 if (! CONSTANT_P (SET_SRC (set)))
1941 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1946 /* Allow for the case where we changed the mode of TARGET. */
1949 if (result == target)
1950 result = orig_target;
1951 else if (mode != orig_mode)
1952 result = gen_lowpart (orig_mode, result);
1958 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1959 fall back to a straight forward decomposition. We do this to avoid
1960 exponential run times encountered when looking for longer sequences
1961 with alpha_emit_set_const. */
1964 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1966 HOST_WIDE_INT d1, d2, d3, d4;
1968 /* Decompose the entire word */
1969 #if HOST_BITS_PER_WIDE_INT >= 64
1970 gcc_assert (c2 == -(c1 < 0));
1971 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1973 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1974 c1 = (c1 - d2) >> 32;
1975 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1977 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1978 gcc_assert (c1 == d4);
1980 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1982 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1983 gcc_assert (c1 == d2);
1985 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1987 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1988 gcc_assert (c2 == d4);
1991 /* Construct the high word */
1994 emit_move_insn (target, GEN_INT (d4));
1996 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1999 emit_move_insn (target, GEN_INT (d3));
2001 /* Shift it into place */
2002 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2004 /* Add in the low bits. */
2006 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2008 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2013 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2017 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2019 HOST_WIDE_INT i0, i1;
2021 if (GET_CODE (x) == CONST_VECTOR)
2022 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2025 if (CONST_INT_P (x))
2030 else if (HOST_BITS_PER_WIDE_INT >= 64)
2032 i0 = CONST_DOUBLE_LOW (x);
2037 i0 = CONST_DOUBLE_LOW (x);
2038 i1 = CONST_DOUBLE_HIGH (x);
2045 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2046 are willing to load the value into a register via a move pattern.
2047 Normally this is all symbolic constants, integral constants that
2048 take three or fewer instructions, and floating-point zero. */
2051 alpha_legitimate_constant_p (rtx x)
2053 enum machine_mode mode = GET_MODE (x);
2054 HOST_WIDE_INT i0, i1;
2056 switch (GET_CODE (x))
2064 /* TLS symbols are never valid. */
2065 return SYMBOL_REF_TLS_MODEL (x) == 0;
2068 if (x == CONST0_RTX (mode))
2070 if (FLOAT_MODE_P (mode))
2075 if (x == CONST0_RTX (mode))
2077 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2079 if (GET_MODE_SIZE (mode) != 8)
2085 if (TARGET_BUILD_CONSTANTS)
2087 alpha_extract_integer (x, &i0, &i1);
2088 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2089 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2097 /* Operand 1 is known to be a constant, and should require more than one
2098 instruction to load. Emit that multi-part load. */
2101 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2103 HOST_WIDE_INT i0, i1;
2104 rtx temp = NULL_RTX;
2106 alpha_extract_integer (operands[1], &i0, &i1);
2108 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2109 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2111 if (!temp && TARGET_BUILD_CONSTANTS)
2112 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2116 if (!rtx_equal_p (operands[0], temp))
2117 emit_move_insn (operands[0], temp);
2124 /* Expand a move instruction; return true if all work is done.
2125 We don't handle non-bwx subword loads here. */
2128 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2132 /* If the output is not a register, the input must be. */
2133 if (MEM_P (operands[0])
2134 && ! reg_or_0_operand (operands[1], mode))
2135 operands[1] = force_reg (mode, operands[1]);
2137 /* Allow legitimize_address to perform some simplifications. */
2138 if (mode == Pmode && symbolic_operand (operands[1], mode))
2140 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2143 if (tmp == operands[0])
2150 /* Early out for non-constants and valid constants. */
2151 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2154 /* Split large integers. */
2155 if (CONST_INT_P (operands[1])
2156 || GET_CODE (operands[1]) == CONST_DOUBLE
2157 || GET_CODE (operands[1]) == CONST_VECTOR)
2159 if (alpha_split_const_mov (mode, operands))
2163 /* Otherwise we've nothing left but to drop the thing to memory. */
2164 tmp = force_const_mem (mode, operands[1]);
2166 if (tmp == NULL_RTX)
2169 if (reload_in_progress)
2171 emit_move_insn (operands[0], XEXP (tmp, 0));
2172 operands[1] = replace_equiv_address (tmp, operands[0]);
2175 operands[1] = validize_mem (tmp);
2179 /* Expand a non-bwx QImode or HImode move instruction;
2180 return true if all work is done. */
2183 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2187 /* If the output is not a register, the input must be. */
2188 if (MEM_P (operands[0]))
2189 operands[1] = force_reg (mode, operands[1]);
2191 /* Handle four memory cases, unaligned and aligned for either the input
2192 or the output. The only case where we can be called during reload is
2193 for aligned loads; all other cases require temporaries. */
2195 if (any_memory_operand (operands[1], mode))
2197 if (aligned_memory_operand (operands[1], mode))
2199 if (reload_in_progress)
2202 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2204 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2209 rtx aligned_mem, bitnum;
2210 rtx scratch = gen_reg_rtx (SImode);
2214 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2216 subtarget = operands[0];
2217 if (REG_P (subtarget))
2218 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2220 subtarget = gen_reg_rtx (DImode), copyout = true;
2223 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2226 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2231 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2236 /* Don't pass these as parameters since that makes the generated
2237 code depend on parameter evaluation order which will cause
2238 bootstrap failures. */
2240 rtx temp1, temp2, subtarget, ua;
2243 temp1 = gen_reg_rtx (DImode);
2244 temp2 = gen_reg_rtx (DImode);
2246 subtarget = operands[0];
2247 if (REG_P (subtarget))
2248 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2250 subtarget = gen_reg_rtx (DImode), copyout = true;
2252 ua = get_unaligned_address (operands[1]);
2254 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2256 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2258 alpha_set_memflags (seq, operands[1]);
2262 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2267 if (any_memory_operand (operands[0], mode))
2269 if (aligned_memory_operand (operands[0], mode))
2271 rtx aligned_mem, bitnum;
2272 rtx temp1 = gen_reg_rtx (SImode);
2273 rtx temp2 = gen_reg_rtx (SImode);
2275 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2277 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2282 rtx temp1 = gen_reg_rtx (DImode);
2283 rtx temp2 = gen_reg_rtx (DImode);
2284 rtx temp3 = gen_reg_rtx (DImode);
2285 rtx ua = get_unaligned_address (operands[0]);
2288 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2290 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2292 alpha_set_memflags (seq, operands[0]);
2301 /* Implement the movmisalign patterns. One of the operands is a memory
2302 that is not naturally aligned. Emit instructions to load it. */
2305 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2307 /* Honor misaligned loads, for those we promised to do so. */
2308 if (MEM_P (operands[1]))
2312 if (register_operand (operands[0], mode))
2315 tmp = gen_reg_rtx (mode);
2317 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2318 if (tmp != operands[0])
2319 emit_move_insn (operands[0], tmp);
2321 else if (MEM_P (operands[0]))
2323 if (!reg_or_0_operand (operands[1], mode))
2324 operands[1] = force_reg (mode, operands[1]);
2325 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2331 /* Generate an unsigned DImode to FP conversion. This is the same code
2332 optabs would emit if we didn't have TFmode patterns.
2334 For SFmode, this is the only construction I've found that can pass
2335 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2336 intermediates will work, because you'll get intermediate rounding
2337 that ruins the end result. Some of this could be fixed by turning
2338 on round-to-positive-infinity, but that requires diddling the fpsr,
2339 which kills performance. I tried turning this around and converting
2340 to a negative number, so that I could turn on /m, but either I did
2341 it wrong or there's something else cause I wound up with the exact
2342 same single-bit error. There is a branch-less form of this same code:
2353 fcmoveq $f10,$f11,$f0
2355 I'm not using it because it's the same number of instructions as
2356 this branch-full form, and it has more serialized long latency
2357 instructions on the critical path.
2359 For DFmode, we can avoid rounding errors by breaking up the word
2360 into two pieces, converting them separately, and adding them back:
2362 LC0: .long 0,0x5f800000
2367 cpyse $f11,$f31,$f10
2368 cpyse $f31,$f11,$f11
2376 This doesn't seem to be a clear-cut win over the optabs form.
2377 It probably all depends on the distribution of numbers being
2378 converted -- in the optabs form, all but high-bit-set has a
2379 much lower minimum execution time. */
2382 alpha_emit_floatuns (rtx operands[2])
2384 rtx neglab, donelab, i0, i1, f0, in, out;
2385 enum machine_mode mode;
2388 in = force_reg (DImode, operands[1]);
2389 mode = GET_MODE (out);
2390 neglab = gen_label_rtx ();
2391 donelab = gen_label_rtx ();
2392 i0 = gen_reg_rtx (DImode);
2393 i1 = gen_reg_rtx (DImode);
2394 f0 = gen_reg_rtx (mode);
2396 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2398 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2399 emit_jump_insn (gen_jump (donelab));
2402 emit_label (neglab);
2404 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2405 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2406 emit_insn (gen_iordi3 (i0, i0, i1));
2407 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2408 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2410 emit_label (donelab);
2413 /* Generate the comparison for a conditional branch. */
2416 alpha_emit_conditional_branch (enum rtx_code code)
2418 enum rtx_code cmp_code, branch_code;
2419 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2420 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2423 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2425 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2427 alpha_compare.fp_p = 0;
2430 /* The general case: fold the comparison code to the types of compares
2431 that we have, choosing the branch as necessary. */
2434 case EQ: case LE: case LT: case LEU: case LTU:
2436 /* We have these compares: */
2437 cmp_code = code, branch_code = NE;
2442 /* These must be reversed. */
2443 cmp_code = reverse_condition (code), branch_code = EQ;
2446 case GE: case GT: case GEU: case GTU:
2447 /* For FP, we swap them, for INT, we reverse them. */
2448 if (alpha_compare.fp_p)
2450 cmp_code = swap_condition (code);
2452 tem = op0, op0 = op1, op1 = tem;
2456 cmp_code = reverse_condition (code);
2465 if (alpha_compare.fp_p)
2468 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2470 /* When we are not as concerned about non-finite values, and we
2471 are comparing against zero, we can branch directly. */
2472 if (op1 == CONST0_RTX (DFmode))
2473 cmp_code = UNKNOWN, branch_code = code;
2474 else if (op0 == CONST0_RTX (DFmode))
2476 /* Undo the swap we probably did just above. */
2477 tem = op0, op0 = op1, op1 = tem;
2478 branch_code = swap_condition (cmp_code);
2484 /* ??? We mark the branch mode to be CCmode to prevent the
2485 compare and branch from being combined, since the compare
2486 insn follows IEEE rules that the branch does not. */
2487 branch_mode = CCmode;
2494 /* The following optimizations are only for signed compares. */
2495 if (code != LEU && code != LTU && code != GEU && code != GTU)
2497 /* Whee. Compare and branch against 0 directly. */
2498 if (op1 == const0_rtx)
2499 cmp_code = UNKNOWN, branch_code = code;
2501 /* If the constants doesn't fit into an immediate, but can
2502 be generated by lda/ldah, we adjust the argument and
2503 compare against zero, so we can use beq/bne directly. */
2504 /* ??? Don't do this when comparing against symbols, otherwise
2505 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2506 be declared false out of hand (at least for non-weak). */
2507 else if (CONST_INT_P (op1)
2508 && (code == EQ || code == NE)
2509 && !(symbolic_operand (op0, VOIDmode)
2510 || (REG_P (op0) && REG_POINTER (op0))))
2512 rtx n_op1 = GEN_INT (-INTVAL (op1));
2514 if (! satisfies_constraint_I (op1)
2515 && (satisfies_constraint_K (n_op1)
2516 || satisfies_constraint_L (n_op1)))
2517 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2521 if (!reg_or_0_operand (op0, DImode))
2522 op0 = force_reg (DImode, op0);
2523 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2524 op1 = force_reg (DImode, op1);
2527 /* Emit an initial compare instruction, if necessary. */
2529 if (cmp_code != UNKNOWN)
2531 tem = gen_reg_rtx (cmp_mode);
2532 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2535 /* Zero the operands. */
2536 memset (&alpha_compare, 0, sizeof (alpha_compare));
2538 /* Return the branch comparison. */
2539 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2542 /* Certain simplifications can be done to make invalid setcc operations
2543 valid. Return the final comparison, or NULL if we can't work. */
2546 alpha_emit_setcc (enum rtx_code code)
2548 enum rtx_code cmp_code;
2549 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2550 int fp_p = alpha_compare.fp_p;
2553 /* Zero the operands. */
2554 memset (&alpha_compare, 0, sizeof (alpha_compare));
2556 if (fp_p && GET_MODE (op0) == TFmode)
2558 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2563 if (fp_p && !TARGET_FIX)
2566 /* The general case: fold the comparison code to the types of compares
2567 that we have, choosing the branch as necessary. */
2572 case EQ: case LE: case LT: case LEU: case LTU:
2574 /* We have these compares. */
2576 cmp_code = code, code = NE;
2580 if (!fp_p && op1 == const0_rtx)
2585 cmp_code = reverse_condition (code);
2589 case GE: case GT: case GEU: case GTU:
2590 /* These normally need swapping, but for integer zero we have
2591 special patterns that recognize swapped operands. */
2592 if (!fp_p && op1 == const0_rtx)
2594 code = swap_condition (code);
2596 cmp_code = code, code = NE;
2597 tmp = op0, op0 = op1, op1 = tmp;
2606 if (!register_operand (op0, DImode))
2607 op0 = force_reg (DImode, op0);
2608 if (!reg_or_8bit_operand (op1, DImode))
2609 op1 = force_reg (DImode, op1);
2612 /* Emit an initial compare instruction, if necessary. */
2613 if (cmp_code != UNKNOWN)
2615 enum machine_mode mode = fp_p ? DFmode : DImode;
2617 tmp = gen_reg_rtx (mode);
2618 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2619 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2621 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2625 /* Return the setcc comparison. */
2626 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2630 /* Rewrite a comparison against zero CMP of the form
2631 (CODE (cc0) (const_int 0)) so it can be written validly in
2632 a conditional move (if_then_else CMP ...).
2633 If both of the operands that set cc0 are nonzero we must emit
2634 an insn to perform the compare (it can't be done within
2635 the conditional move). */
2638 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2640 enum rtx_code code = GET_CODE (cmp);
2641 enum rtx_code cmov_code = NE;
2642 rtx op0 = alpha_compare.op0;
2643 rtx op1 = alpha_compare.op1;
2644 int fp_p = alpha_compare.fp_p;
2645 enum machine_mode cmp_mode
2646 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2647 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2648 enum machine_mode cmov_mode = VOIDmode;
2649 int local_fast_math = flag_unsafe_math_optimizations;
2652 /* Zero the operands. */
2653 memset (&alpha_compare, 0, sizeof (alpha_compare));
2655 if (fp_p != FLOAT_MODE_P (mode))
2657 enum rtx_code cmp_code;
2662 /* If we have fp<->int register move instructions, do a cmov by
2663 performing the comparison in fp registers, and move the
2664 zero/nonzero value to integer registers, where we can then
2665 use a normal cmov, or vice-versa. */
2669 case EQ: case LE: case LT: case LEU: case LTU:
2670 /* We have these compares. */
2671 cmp_code = code, code = NE;
2675 /* This must be reversed. */
2676 cmp_code = EQ, code = EQ;
2679 case GE: case GT: case GEU: case GTU:
2680 /* These normally need swapping, but for integer zero we have
2681 special patterns that recognize swapped operands. */
2682 if (!fp_p && op1 == const0_rtx)
2683 cmp_code = code, code = NE;
2686 cmp_code = swap_condition (code);
2688 tem = op0, op0 = op1, op1 = tem;
2696 tem = gen_reg_rtx (cmp_op_mode);
2697 emit_insn (gen_rtx_SET (VOIDmode, tem,
2698 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2701 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2702 op0 = gen_lowpart (cmp_op_mode, tem);
2703 op1 = CONST0_RTX (cmp_op_mode);
2705 local_fast_math = 1;
2708 /* We may be able to use a conditional move directly.
2709 This avoids emitting spurious compares. */
2710 if (signed_comparison_operator (cmp, VOIDmode)
2711 && (!fp_p || local_fast_math)
2712 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2713 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2715 /* We can't put the comparison inside the conditional move;
2716 emit a compare instruction and put that inside the
2717 conditional move. Make sure we emit only comparisons we have;
2718 swap or reverse as necessary. */
2720 if (!can_create_pseudo_p ())
2725 case EQ: case LE: case LT: case LEU: case LTU:
2726 /* We have these compares: */
2730 /* This must be reversed. */
2731 code = reverse_condition (code);
2735 case GE: case GT: case GEU: case GTU:
2736 /* These must be swapped. */
2737 if (op1 != CONST0_RTX (cmp_mode))
2739 code = swap_condition (code);
2740 tem = op0, op0 = op1, op1 = tem;
2750 if (!reg_or_0_operand (op0, DImode))
2751 op0 = force_reg (DImode, op0);
2752 if (!reg_or_8bit_operand (op1, DImode))
2753 op1 = force_reg (DImode, op1);
2756 /* ??? We mark the branch mode to be CCmode to prevent the compare
2757 and cmov from being combined, since the compare insn follows IEEE
2758 rules that the cmov does not. */
2759 if (fp_p && !local_fast_math)
2762 tem = gen_reg_rtx (cmp_op_mode);
2763 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2764 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2767 /* Simplify a conditional move of two constants into a setcc with
2768 arithmetic. This is done with a splitter since combine would
2769 just undo the work if done during code generation. It also catches
2770 cases we wouldn't have before cse. */
2773 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2774 rtx t_rtx, rtx f_rtx)
2776 HOST_WIDE_INT t, f, diff;
2777 enum machine_mode mode;
2778 rtx target, subtarget, tmp;
2780 mode = GET_MODE (dest);
2785 if (((code == NE || code == EQ) && diff < 0)
2786 || (code == GE || code == GT))
2788 code = reverse_condition (code);
2789 diff = t, t = f, f = diff;
2793 subtarget = target = dest;
2796 target = gen_lowpart (DImode, dest);
2797 if (can_create_pseudo_p ())
2798 subtarget = gen_reg_rtx (DImode);
2802 /* Below, we must be careful to use copy_rtx on target and subtarget
2803 in intermediate insns, as they may be a subreg rtx, which may not
2806 if (f == 0 && exact_log2 (diff) > 0
2807 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2808 viable over a longer latency cmove. On EV5, the E0 slot is a
2809 scarce resource, and on EV4 shift has the same latency as a cmove. */
2810 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2812 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2813 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2815 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2816 GEN_INT (exact_log2 (t)));
2817 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2819 else if (f == 0 && t == -1)
2821 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2822 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2824 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2826 else if (diff == 1 || diff == 4 || diff == 8)
2830 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2831 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2834 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2837 add_op = GEN_INT (f);
2838 if (sext_add_operand (add_op, mode))
2840 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2842 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2843 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2855 /* Look up the function X_floating library function name for the
2858 struct GTY(()) xfloating_op
2860 const enum rtx_code code;
2861 const char *const GTY((skip)) osf_func;
2862 const char *const GTY((skip)) vms_func;
2866 static GTY(()) struct xfloating_op xfloating_ops[] =
2868 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2869 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2870 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2871 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2872 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2873 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2874 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2875 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2876 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2877 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2878 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2879 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2880 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2881 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2882 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2885 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2887 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2888 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2892 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2894 struct xfloating_op *ops = xfloating_ops;
2895 long n = ARRAY_SIZE (xfloating_ops);
2898 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2900 /* How irritating. Nothing to key off for the main table. */
2901 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2904 n = ARRAY_SIZE (vax_cvt_ops);
2907 for (i = 0; i < n; ++i, ++ops)
2908 if (ops->code == code)
2910 rtx func = ops->libcall;
2913 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2914 ? ops->vms_func : ops->osf_func);
2915 ops->libcall = func;
2923 /* Most X_floating operations take the rounding mode as an argument.
2924 Compute that here. */
2927 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2928 enum alpha_fp_rounding_mode round)
2934 case ALPHA_FPRM_NORM:
2937 case ALPHA_FPRM_MINF:
2940 case ALPHA_FPRM_CHOP:
2943 case ALPHA_FPRM_DYN:
2949 /* XXX For reference, round to +inf is mode = 3. */
2952 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2958 /* Emit an X_floating library function call.
2960 Note that these functions do not follow normal calling conventions:
2961 TFmode arguments are passed in two integer registers (as opposed to
2962 indirect); TFmode return values appear in R16+R17.
2964 FUNC is the function to call.
2965 TARGET is where the output belongs.
2966 OPERANDS are the inputs.
2967 NOPERANDS is the count of inputs.
2968 EQUIV is the expression equivalent for the function.
2972 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2973 int noperands, rtx equiv)
2975 rtx usage = NULL_RTX, tmp, reg;
2980 for (i = 0; i < noperands; ++i)
2982 switch (GET_MODE (operands[i]))
2985 reg = gen_rtx_REG (TFmode, regno);
2990 reg = gen_rtx_REG (DFmode, regno + 32);
2995 gcc_assert (CONST_INT_P (operands[i]));
2998 reg = gen_rtx_REG (DImode, regno);
3006 emit_move_insn (reg, operands[i]);
3007 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3010 switch (GET_MODE (target))
3013 reg = gen_rtx_REG (TFmode, 16);
3016 reg = gen_rtx_REG (DFmode, 32);
3019 reg = gen_rtx_REG (DImode, 0);
3025 tmp = gen_rtx_MEM (QImode, func);
3026 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3027 const0_rtx, const0_rtx));
3028 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3029 RTL_CONST_CALL_P (tmp) = 1;
3034 emit_libcall_block (tmp, target, reg, equiv);
3037 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3040 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3044 rtx out_operands[3];
3046 func = alpha_lookup_xfloating_lib_func (code);
3047 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3049 out_operands[0] = operands[1];
3050 out_operands[1] = operands[2];
3051 out_operands[2] = GEN_INT (mode);
3052 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3053 gen_rtx_fmt_ee (code, TFmode, operands[1],
3057 /* Emit an X_floating library function call for a comparison. */
3060 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3062 enum rtx_code cmp_code, res_code;
3063 rtx func, out, operands[2], note;
3065 /* X_floating library comparison functions return
3069 Convert the compare against the raw return value. */
3097 func = alpha_lookup_xfloating_lib_func (cmp_code);
3101 out = gen_reg_rtx (DImode);
3103 /* What's actually returned is -1,0,1, not a proper boolean value,
3104 so use an EXPR_LIST as with a generic libcall instead of a
3105 comparison type expression. */
3106 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3107 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3108 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3109 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3114 /* Emit an X_floating library function call for a conversion. */
3117 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3119 int noperands = 1, mode;
3120 rtx out_operands[2];
3122 enum rtx_code code = orig_code;
3124 if (code == UNSIGNED_FIX)
3127 func = alpha_lookup_xfloating_lib_func (code);
3129 out_operands[0] = operands[1];
3134 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3135 out_operands[1] = GEN_INT (mode);
3138 case FLOAT_TRUNCATE:
3139 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3140 out_operands[1] = GEN_INT (mode);
3147 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3148 gen_rtx_fmt_e (orig_code,
3149 GET_MODE (operands[0]),
3153 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3154 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3155 guarantee that the sequence
3158 is valid. Naturally, output operand ordering is little-endian.
3159 This is used by *movtf_internal and *movti_internal. */
3162 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3165 switch (GET_CODE (operands[1]))
3168 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3169 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3173 operands[3] = adjust_address (operands[1], DImode, 8);
3174 operands[2] = adjust_address (operands[1], DImode, 0);
3179 gcc_assert (operands[1] == CONST0_RTX (mode));
3180 operands[2] = operands[3] = const0_rtx;
3187 switch (GET_CODE (operands[0]))
3190 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3191 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3195 operands[1] = adjust_address (operands[0], DImode, 8);
3196 operands[0] = adjust_address (operands[0], DImode, 0);
3203 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3206 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3207 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3211 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3212 op2 is a register containing the sign bit, operation is the
3213 logical operation to be performed. */
3216 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3218 rtx high_bit = operands[2];
3222 alpha_split_tmode_pair (operands, TFmode, false);
3224 /* Detect three flavors of operand overlap. */
3226 if (rtx_equal_p (operands[0], operands[2]))
3228 else if (rtx_equal_p (operands[1], operands[2]))
3230 if (rtx_equal_p (operands[0], high_bit))
3237 emit_move_insn (operands[0], operands[2]);
3239 /* ??? If the destination overlaps both source tf and high_bit, then
3240 assume source tf is dead in its entirety and use the other half
3241 for a scratch register. Otherwise "scratch" is just the proper
3242 destination register. */
3243 scratch = operands[move < 2 ? 1 : 3];
3245 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3249 emit_move_insn (operands[0], operands[2]);
3251 emit_move_insn (operands[1], scratch);
3255 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3259 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3260 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3261 lda r3,X(r11) lda r3,X+2(r11)
3262 extwl r1,r3,r1 extql r1,r3,r1
3263 extwh r2,r3,r2 extqh r2,r3,r2
3264 or r1.r2.r1 or r1,r2,r1
3267 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3268 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3269 lda r3,X(r11) lda r3,X(r11)
3270 extll r1,r3,r1 extll r1,r3,r1
3271 extlh r2,r3,r2 extlh r2,r3,r2
3272 or r1.r2.r1 addl r1,r2,r1
3274 quad: ldq_u r1,X(r11)
3283 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3284 HOST_WIDE_INT ofs, int sign)
3286 rtx meml, memh, addr, extl, exth, tmp, mema;
3287 enum machine_mode mode;
3289 if (TARGET_BWX && size == 2)
3291 meml = adjust_address (mem, QImode, ofs);
3292 memh = adjust_address (mem, QImode, ofs+1);
3293 if (BYTES_BIG_ENDIAN)
3294 tmp = meml, meml = memh, memh = tmp;
3295 extl = gen_reg_rtx (DImode);
3296 exth = gen_reg_rtx (DImode);
3297 emit_insn (gen_zero_extendqidi2 (extl, meml));
3298 emit_insn (gen_zero_extendqidi2 (exth, memh));
3299 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3300 NULL, 1, OPTAB_LIB_WIDEN);
3301 addr = expand_simple_binop (DImode, IOR, extl, exth,
3302 NULL, 1, OPTAB_LIB_WIDEN);
3304 if (sign && GET_MODE (tgt) != HImode)
3306 addr = gen_lowpart (HImode, addr);
3307 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3311 if (GET_MODE (tgt) != DImode)
3312 addr = gen_lowpart (GET_MODE (tgt), addr);
3313 emit_move_insn (tgt, addr);
3318 meml = gen_reg_rtx (DImode);
3319 memh = gen_reg_rtx (DImode);
3320 addr = gen_reg_rtx (DImode);
3321 extl = gen_reg_rtx (DImode);
3322 exth = gen_reg_rtx (DImode);
3324 mema = XEXP (mem, 0);
3325 if (GET_CODE (mema) == LO_SUM)
3326 mema = force_reg (Pmode, mema);
3328 /* AND addresses cannot be in any alias set, since they may implicitly
3329 alias surrounding code. Ideally we'd have some alias set that
3330 covered all types except those with alignment 8 or higher. */
3332 tmp = change_address (mem, DImode,
3333 gen_rtx_AND (DImode,
3334 plus_constant (mema, ofs),
3336 set_mem_alias_set (tmp, 0);
3337 emit_move_insn (meml, tmp);
3339 tmp = change_address (mem, DImode,
3340 gen_rtx_AND (DImode,
3341 plus_constant (mema, ofs + size - 1),
3343 set_mem_alias_set (tmp, 0);
3344 emit_move_insn (memh, tmp);
3346 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3348 emit_move_insn (addr, plus_constant (mema, -1));
3350 emit_insn (gen_extqh_be (extl, meml, addr));
3351 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3353 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3354 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3355 addr, 1, OPTAB_WIDEN);
3357 else if (sign && size == 2)
3359 emit_move_insn (addr, plus_constant (mema, ofs+2));
3361 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3362 emit_insn (gen_extqh_le (exth, memh, addr));
3364 /* We must use tgt here for the target. Alpha-vms port fails if we use
3365 addr for the target, because addr is marked as a pointer and combine
3366 knows that pointers are always sign-extended 32-bit values. */
3367 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3368 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3369 addr, 1, OPTAB_WIDEN);
3373 if (WORDS_BIG_ENDIAN)
3375 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3379 emit_insn (gen_extwh_be (extl, meml, addr));
3384 emit_insn (gen_extlh_be (extl, meml, addr));
3389 emit_insn (gen_extqh_be (extl, meml, addr));
3396 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3400 emit_move_insn (addr, plus_constant (mema, ofs));
3401 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3405 emit_insn (gen_extwh_le (exth, memh, addr));
3410 emit_insn (gen_extlh_le (exth, memh, addr));
3415 emit_insn (gen_extqh_le (exth, memh, addr));
3424 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3425 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3430 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3433 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3436 alpha_expand_unaligned_store (rtx dst, rtx src,
3437 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3439 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3441 if (TARGET_BWX && size == 2)
3443 if (src != const0_rtx)
3445 dstl = gen_lowpart (QImode, src);
3446 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3447 NULL, 1, OPTAB_LIB_WIDEN);
3448 dsth = gen_lowpart (QImode, dsth);
3451 dstl = dsth = const0_rtx;
3453 meml = adjust_address (dst, QImode, ofs);
3454 memh = adjust_address (dst, QImode, ofs+1);
3455 if (BYTES_BIG_ENDIAN)
3456 addr = meml, meml = memh, memh = addr;
3458 emit_move_insn (meml, dstl);
3459 emit_move_insn (memh, dsth);
3463 dstl = gen_reg_rtx (DImode);
3464 dsth = gen_reg_rtx (DImode);
3465 insl = gen_reg_rtx (DImode);
3466 insh = gen_reg_rtx (DImode);
3468 dsta = XEXP (dst, 0);
3469 if (GET_CODE (dsta) == LO_SUM)
3470 dsta = force_reg (Pmode, dsta);
3472 /* AND addresses cannot be in any alias set, since they may implicitly
3473 alias surrounding code. Ideally we'd have some alias set that
3474 covered all types except those with alignment 8 or higher. */
3476 meml = change_address (dst, DImode,
3477 gen_rtx_AND (DImode,
3478 plus_constant (dsta, ofs),
3480 set_mem_alias_set (meml, 0);
3482 memh = change_address (dst, DImode,
3483 gen_rtx_AND (DImode,
3484 plus_constant (dsta, ofs + size - 1),
3486 set_mem_alias_set (memh, 0);
3488 emit_move_insn (dsth, memh);
3489 emit_move_insn (dstl, meml);
3490 if (WORDS_BIG_ENDIAN)
3492 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3494 if (src != const0_rtx)
3499 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3502 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3505 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3508 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3509 GEN_INT (size*8), addr));
3515 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3519 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3520 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3524 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3528 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3532 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3534 if (src != CONST0_RTX (GET_MODE (src)))
3536 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3537 GEN_INT (size*8), addr));
3542 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3545 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3548 emit_insn (gen_insql_le (insl, src, addr));
3553 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3558 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3562 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3563 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3567 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3572 if (src != CONST0_RTX (GET_MODE (src)))
3574 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3575 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3578 if (WORDS_BIG_ENDIAN)
3580 emit_move_insn (meml, dstl);
3581 emit_move_insn (memh, dsth);
3585 /* Must store high before low for degenerate case of aligned. */
3586 emit_move_insn (memh, dsth);
3587 emit_move_insn (meml, dstl);
3591 /* The block move code tries to maximize speed by separating loads and
3592 stores at the expense of register pressure: we load all of the data
3593 before we store it back out. There are two secondary effects worth
3594 mentioning, that this speeds copying to/from aligned and unaligned
3595 buffers, and that it makes the code significantly easier to write. */
3597 #define MAX_MOVE_WORDS 8
3599 /* Load an integral number of consecutive unaligned quadwords. */
3602 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3603 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3605 rtx const im8 = GEN_INT (-8);
3606 rtx const i64 = GEN_INT (64);
3607 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3608 rtx sreg, areg, tmp, smema;
3611 smema = XEXP (smem, 0);
3612 if (GET_CODE (smema) == LO_SUM)
3613 smema = force_reg (Pmode, smema);
3615 /* Generate all the tmp registers we need. */
3616 for (i = 0; i < words; ++i)
3618 data_regs[i] = out_regs[i];
3619 ext_tmps[i] = gen_reg_rtx (DImode);
3621 data_regs[words] = gen_reg_rtx (DImode);
3624 smem = adjust_address (smem, GET_MODE (smem), ofs);
3626 /* Load up all of the source data. */
3627 for (i = 0; i < words; ++i)
3629 tmp = change_address (smem, DImode,
3630 gen_rtx_AND (DImode,
3631 plus_constant (smema, 8*i),
3633 set_mem_alias_set (tmp, 0);
3634 emit_move_insn (data_regs[i], tmp);
3637 tmp = change_address (smem, DImode,
3638 gen_rtx_AND (DImode,
3639 plus_constant (smema, 8*words - 1),
3641 set_mem_alias_set (tmp, 0);
3642 emit_move_insn (data_regs[words], tmp);
3644 /* Extract the half-word fragments. Unfortunately DEC decided to make
3645 extxh with offset zero a noop instead of zeroing the register, so
3646 we must take care of that edge condition ourselves with cmov. */
3648 sreg = copy_addr_to_reg (smema);
3649 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3651 if (WORDS_BIG_ENDIAN)
3652 emit_move_insn (sreg, plus_constant (sreg, 7));
3653 for (i = 0; i < words; ++i)
3655 if (WORDS_BIG_ENDIAN)
3657 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3658 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3662 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3663 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3665 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3666 gen_rtx_IF_THEN_ELSE (DImode,
3667 gen_rtx_EQ (DImode, areg,
3669 const0_rtx, ext_tmps[i])));
3672 /* Merge the half-words into whole words. */
3673 for (i = 0; i < words; ++i)
3675 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3676 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3680 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3681 may be NULL to store zeros. */
3684 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3685 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3687 rtx const im8 = GEN_INT (-8);
3688 rtx const i64 = GEN_INT (64);
3689 rtx ins_tmps[MAX_MOVE_WORDS];
3690 rtx st_tmp_1, st_tmp_2, dreg;
3691 rtx st_addr_1, st_addr_2, dmema;
3694 dmema = XEXP (dmem, 0);
3695 if (GET_CODE (dmema) == LO_SUM)
3696 dmema = force_reg (Pmode, dmema);
3698 /* Generate all the tmp registers we need. */
3699 if (data_regs != NULL)
3700 for (i = 0; i < words; ++i)
3701 ins_tmps[i] = gen_reg_rtx(DImode);
3702 st_tmp_1 = gen_reg_rtx(DImode);
3703 st_tmp_2 = gen_reg_rtx(DImode);
3706 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3708 st_addr_2 = change_address (dmem, DImode,
3709 gen_rtx_AND (DImode,
3710 plus_constant (dmema, words*8 - 1),
3712 set_mem_alias_set (st_addr_2, 0);
3714 st_addr_1 = change_address (dmem, DImode,
3715 gen_rtx_AND (DImode, dmema, im8));
3716 set_mem_alias_set (st_addr_1, 0);
3718 /* Load up the destination end bits. */
3719 emit_move_insn (st_tmp_2, st_addr_2);
3720 emit_move_insn (st_tmp_1, st_addr_1);
3722 /* Shift the input data into place. */
3723 dreg = copy_addr_to_reg (dmema);
3724 if (WORDS_BIG_ENDIAN)
3725 emit_move_insn (dreg, plus_constant (dreg, 7));
3726 if (data_regs != NULL)
3728 for (i = words-1; i >= 0; --i)
3730 if (WORDS_BIG_ENDIAN)
3732 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3733 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3737 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3738 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3741 for (i = words-1; i > 0; --i)
3743 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3744 ins_tmps[i-1], ins_tmps[i-1], 1,
3749 /* Split and merge the ends with the destination data. */
3750 if (WORDS_BIG_ENDIAN)
3752 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3753 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3757 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3758 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3761 if (data_regs != NULL)
3763 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3764 st_tmp_2, 1, OPTAB_WIDEN);
3765 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3766 st_tmp_1, 1, OPTAB_WIDEN);
3770 if (WORDS_BIG_ENDIAN)
3771 emit_move_insn (st_addr_1, st_tmp_1);
3773 emit_move_insn (st_addr_2, st_tmp_2);
3774 for (i = words-1; i > 0; --i)
3776 rtx tmp = change_address (dmem, DImode,
3777 gen_rtx_AND (DImode,
3778 plus_constant(dmema,
3779 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3781 set_mem_alias_set (tmp, 0);
3782 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3784 if (WORDS_BIG_ENDIAN)
3785 emit_move_insn (st_addr_2, st_tmp_2);
3787 emit_move_insn (st_addr_1, st_tmp_1);
3791 /* Expand string/block move operations.
3793 operands[0] is the pointer to the destination.
3794 operands[1] is the pointer to the source.
3795 operands[2] is the number of bytes to move.
3796 operands[3] is the alignment. */
3799 alpha_expand_block_move (rtx operands[])
3801 rtx bytes_rtx = operands[2];
3802 rtx align_rtx = operands[3];
3803 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3804 HOST_WIDE_INT bytes = orig_bytes;
3805 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3806 HOST_WIDE_INT dst_align = src_align;
3807 rtx orig_src = operands[1];
3808 rtx orig_dst = operands[0];
3809 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3811 unsigned int i, words, ofs, nregs = 0;
3813 if (orig_bytes <= 0)
3815 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3818 /* Look for additional alignment information from recorded register info. */
3820 tmp = XEXP (orig_src, 0);
3822 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3823 else if (GET_CODE (tmp) == PLUS
3824 && REG_P (XEXP (tmp, 0))
3825 && CONST_INT_P (XEXP (tmp, 1)))
3827 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3828 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3832 if (a >= 64 && c % 8 == 0)
3834 else if (a >= 32 && c % 4 == 0)
3836 else if (a >= 16 && c % 2 == 0)
3841 tmp = XEXP (orig_dst, 0);
3843 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3844 else if (GET_CODE (tmp) == PLUS
3845 && REG_P (XEXP (tmp, 0))
3846 && CONST_INT_P (XEXP (tmp, 1)))
3848 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3849 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3853 if (a >= 64 && c % 8 == 0)
3855 else if (a >= 32 && c % 4 == 0)
3857 else if (a >= 16 && c % 2 == 0)
3863 if (src_align >= 64 && bytes >= 8)
3867 for (i = 0; i < words; ++i)
3868 data_regs[nregs + i] = gen_reg_rtx (DImode);
3870 for (i = 0; i < words; ++i)
3871 emit_move_insn (data_regs[nregs + i],
3872 adjust_address (orig_src, DImode, ofs + i * 8));
3879 if (src_align >= 32 && bytes >= 4)
3883 for (i = 0; i < words; ++i)
3884 data_regs[nregs + i] = gen_reg_rtx (SImode);
3886 for (i = 0; i < words; ++i)
3887 emit_move_insn (data_regs[nregs + i],
3888 adjust_address (orig_src, SImode, ofs + i * 4));
3899 for (i = 0; i < words+1; ++i)
3900 data_regs[nregs + i] = gen_reg_rtx (DImode);
3902 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3910 if (! TARGET_BWX && bytes >= 4)
3912 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3913 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3920 if (src_align >= 16)
3923 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3924 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3927 } while (bytes >= 2);
3929 else if (! TARGET_BWX)
3931 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3932 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3940 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3941 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3946 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3948 /* Now save it back out again. */
3952 /* Write out the data in whatever chunks reading the source allowed. */
3953 if (dst_align >= 64)
3955 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3957 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3964 if (dst_align >= 32)
3966 /* If the source has remaining DImode regs, write them out in
3968 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3970 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3971 NULL_RTX, 1, OPTAB_WIDEN);
3973 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3974 gen_lowpart (SImode, data_regs[i]));
3975 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3976 gen_lowpart (SImode, tmp));
3981 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3983 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3990 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3992 /* Write out a remaining block of words using unaligned methods. */
3994 for (words = 1; i + words < nregs; words++)
3995 if (GET_MODE (data_regs[i + words]) != DImode)
3999 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4001 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4008 /* Due to the above, this won't be aligned. */
4009 /* ??? If we have more than one of these, consider constructing full
4010 words in registers and using alpha_expand_unaligned_store_words. */
4011 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4013 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4018 if (dst_align >= 16)
4019 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4021 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4026 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4028 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4033 /* The remainder must be byte copies. */
4036 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4037 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4046 alpha_expand_block_clear (rtx operands[])
4048 rtx bytes_rtx = operands[1];
4049 rtx align_rtx = operands[3];
4050 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4051 HOST_WIDE_INT bytes = orig_bytes;
4052 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4053 HOST_WIDE_INT alignofs = 0;
4054 rtx orig_dst = operands[0];
4056 int i, words, ofs = 0;
4058 if (orig_bytes <= 0)
4060 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4063 /* Look for stricter alignment. */
4064 tmp = XEXP (orig_dst, 0);
4066 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4067 else if (GET_CODE (tmp) == PLUS
4068 && REG_P (XEXP (tmp, 0))
4069 && CONST_INT_P (XEXP (tmp, 1)))
4071 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4072 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4077 align = a, alignofs = 8 - c % 8;
4079 align = a, alignofs = 4 - c % 4;
4081 align = a, alignofs = 2 - c % 2;
4085 /* Handle an unaligned prefix first. */
4089 #if HOST_BITS_PER_WIDE_INT >= 64
4090 /* Given that alignofs is bounded by align, the only time BWX could
4091 generate three stores is for a 7 byte fill. Prefer two individual
4092 stores over a load/mask/store sequence. */
4093 if ((!TARGET_BWX || alignofs == 7)
4095 && !(alignofs == 4 && bytes >= 4))
4097 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4098 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4102 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4103 set_mem_alias_set (mem, 0);
4105 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4106 if (bytes < alignofs)
4108 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4119 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4120 NULL_RTX, 1, OPTAB_WIDEN);
4122 emit_move_insn (mem, tmp);
4126 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4128 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4133 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4135 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4140 if (alignofs == 4 && bytes >= 4)
4142 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4148 /* If we've not used the extra lead alignment information by now,
4149 we won't be able to. Downgrade align to match what's left over. */
4152 alignofs = alignofs & -alignofs;
4153 align = MIN (align, alignofs * BITS_PER_UNIT);
4157 /* Handle a block of contiguous long-words. */
4159 if (align >= 64 && bytes >= 8)
4163 for (i = 0; i < words; ++i)
4164 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4171 /* If the block is large and appropriately aligned, emit a single
4172 store followed by a sequence of stq_u insns. */
4174 if (align >= 32 && bytes > 16)
4178 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4182 orig_dsta = XEXP (orig_dst, 0);
4183 if (GET_CODE (orig_dsta) == LO_SUM)
4184 orig_dsta = force_reg (Pmode, orig_dsta);
4187 for (i = 0; i < words; ++i)
4190 = change_address (orig_dst, DImode,
4191 gen_rtx_AND (DImode,
4192 plus_constant (orig_dsta, ofs + i*8),
4194 set_mem_alias_set (mem, 0);
4195 emit_move_insn (mem, const0_rtx);
4198 /* Depending on the alignment, the first stq_u may have overlapped
4199 with the initial stl, which means that the last stq_u didn't
4200 write as much as it would appear. Leave those questionable bytes
4202 bytes -= words * 8 - 4;
4203 ofs += words * 8 - 4;
4206 /* Handle a smaller block of aligned words. */
4208 if ((align >= 64 && bytes == 4)
4209 || (align == 32 && bytes >= 4))
4213 for (i = 0; i < words; ++i)
4214 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4221 /* An unaligned block uses stq_u stores for as many as possible. */
4227 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4233 /* Next clean up any trailing pieces. */
4235 #if HOST_BITS_PER_WIDE_INT >= 64
4236 /* Count the number of bits in BYTES for which aligned stores could
4239 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4243 /* If we have appropriate alignment (and it wouldn't take too many
4244 instructions otherwise), mask out the bytes we need. */
4245 if (TARGET_BWX ? words > 2 : bytes > 0)
4252 mem = adjust_address (orig_dst, DImode, ofs);
4253 set_mem_alias_set (mem, 0);
4255 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4257 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4258 NULL_RTX, 1, OPTAB_WIDEN);
4260 emit_move_insn (mem, tmp);
4263 else if (align >= 32 && bytes < 4)
4268 mem = adjust_address (orig_dst, SImode, ofs);
4269 set_mem_alias_set (mem, 0);
4271 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4273 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4274 NULL_RTX, 1, OPTAB_WIDEN);
4276 emit_move_insn (mem, tmp);
4282 if (!TARGET_BWX && bytes >= 4)
4284 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4294 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4298 } while (bytes >= 2);
4300 else if (! TARGET_BWX)
4302 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4310 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4318 /* Returns a mask so that zap(x, value) == x & mask. */
4321 alpha_expand_zap_mask (HOST_WIDE_INT value)
4326 if (HOST_BITS_PER_WIDE_INT >= 64)
4328 HOST_WIDE_INT mask = 0;
4330 for (i = 7; i >= 0; --i)
4333 if (!((value >> i) & 1))
4337 result = gen_int_mode (mask, DImode);
4341 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4343 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4345 for (i = 7; i >= 4; --i)
4348 if (!((value >> i) & 1))
4352 for (i = 3; i >= 0; --i)
4355 if (!((value >> i) & 1))
4359 result = immed_double_const (mask_lo, mask_hi, DImode);
4366 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4367 enum machine_mode mode,
4368 rtx op0, rtx op1, rtx op2)
4370 op0 = gen_lowpart (mode, op0);
4372 if (op1 == const0_rtx)
4373 op1 = CONST0_RTX (mode);
4375 op1 = gen_lowpart (mode, op1);
4377 if (op2 == const0_rtx)
4378 op2 = CONST0_RTX (mode);
4380 op2 = gen_lowpart (mode, op2);
4382 emit_insn ((*gen) (op0, op1, op2));
4385 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4386 COND is true. Mark the jump as unlikely to be taken. */
4389 emit_unlikely_jump (rtx cond, rtx label)
4391 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4394 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4395 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4396 add_reg_note (x, REG_BR_PROB, very_unlikely);
4399 /* A subroutine of the atomic operation splitters. Emit a load-locked
4400 instruction in MODE. */
4403 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4405 rtx (*fn) (rtx, rtx) = NULL;
4407 fn = gen_load_locked_si;
4408 else if (mode == DImode)
4409 fn = gen_load_locked_di;
4410 emit_insn (fn (reg, mem));
4413 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4414 instruction in MODE. */
4417 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4419 rtx (*fn) (rtx, rtx, rtx) = NULL;
4421 fn = gen_store_conditional_si;
4422 else if (mode == DImode)
4423 fn = gen_store_conditional_di;
4424 emit_insn (fn (res, mem, val));
4427 /* A subroutine of the atomic operation splitters. Emit an insxl
4428 instruction in MODE. */
4431 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4433 rtx ret = gen_reg_rtx (DImode);
4434 rtx (*fn) (rtx, rtx, rtx);
4436 if (WORDS_BIG_ENDIAN)
4450 /* The insbl and inswl patterns require a register operand. */
4451 op1 = force_reg (mode, op1);
4452 emit_insn (fn (ret, op1, op2));
4457 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4458 to perform. MEM is the memory on which to operate. VAL is the second
4459 operand of the binary operator. BEFORE and AFTER are optional locations to
4460 return the value of MEM either before of after the operation. SCRATCH is
4461 a scratch register. */
4464 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4465 rtx before, rtx after, rtx scratch)
4467 enum machine_mode mode = GET_MODE (mem);
4468 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4470 emit_insn (gen_memory_barrier ());
4472 label = gen_label_rtx ();
4474 label = gen_rtx_LABEL_REF (DImode, label);
4478 emit_load_locked (mode, before, mem);
4482 x = gen_rtx_AND (mode, before, val);
4483 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4485 x = gen_rtx_NOT (mode, val);
4488 x = gen_rtx_fmt_ee (code, mode, before, val);
4490 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4491 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4493 emit_store_conditional (mode, cond, mem, scratch);
4495 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4496 emit_unlikely_jump (x, label);
4498 emit_insn (gen_memory_barrier ());
4501 /* Expand a compare and swap operation. */
4504 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4507 enum machine_mode mode = GET_MODE (mem);
4508 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4510 emit_insn (gen_memory_barrier ());
4512 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4513 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4514 emit_label (XEXP (label1, 0));
4516 emit_load_locked (mode, retval, mem);
4518 x = gen_lowpart (DImode, retval);
4519 if (oldval == const0_rtx)
4520 x = gen_rtx_NE (DImode, x, const0_rtx);
4523 x = gen_rtx_EQ (DImode, x, oldval);
4524 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4525 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4527 emit_unlikely_jump (x, label2);
4529 emit_move_insn (scratch, newval);
4530 emit_store_conditional (mode, cond, mem, scratch);
4532 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4533 emit_unlikely_jump (x, label1);
4535 emit_insn (gen_memory_barrier ());
4536 emit_label (XEXP (label2, 0));
4540 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4542 enum machine_mode mode = GET_MODE (mem);
4543 rtx addr, align, wdst;
4544 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4546 addr = force_reg (DImode, XEXP (mem, 0));
4547 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4548 NULL_RTX, 1, OPTAB_DIRECT);
4550 oldval = convert_modes (DImode, mode, oldval, 1);
4551 newval = emit_insxl (mode, newval, addr);
4553 wdst = gen_reg_rtx (DImode);
4555 fn5 = gen_sync_compare_and_swapqi_1;
4557 fn5 = gen_sync_compare_and_swaphi_1;
4558 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4560 emit_move_insn (dst, gen_lowpart (mode, wdst));
4564 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4565 rtx oldval, rtx newval, rtx align,
4566 rtx scratch, rtx cond)
4568 rtx label1, label2, mem, width, mask, x;
4570 mem = gen_rtx_MEM (DImode, align);
4571 MEM_VOLATILE_P (mem) = 1;
4573 emit_insn (gen_memory_barrier ());
4574 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4575 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4576 emit_label (XEXP (label1, 0));
4578 emit_load_locked (DImode, scratch, mem);
4580 width = GEN_INT (GET_MODE_BITSIZE (mode));
4581 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4582 if (WORDS_BIG_ENDIAN)
4583 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4585 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4587 if (oldval == const0_rtx)
4588 x = gen_rtx_NE (DImode, dest, const0_rtx);
4591 x = gen_rtx_EQ (DImode, dest, oldval);
4592 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4593 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4595 emit_unlikely_jump (x, label2);
4597 if (WORDS_BIG_ENDIAN)
4598 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4600 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4601 emit_insn (gen_iordi3 (scratch, scratch, newval));
4603 emit_store_conditional (DImode, scratch, mem, scratch);
4605 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4606 emit_unlikely_jump (x, label1);
4608 emit_insn (gen_memory_barrier ());
4609 emit_label (XEXP (label2, 0));
4612 /* Expand an atomic exchange operation. */
4615 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4617 enum machine_mode mode = GET_MODE (mem);
4618 rtx label, x, cond = gen_lowpart (DImode, scratch);
4620 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4621 emit_label (XEXP (label, 0));
4623 emit_load_locked (mode, retval, mem);
4624 emit_move_insn (scratch, val);
4625 emit_store_conditional (mode, cond, mem, scratch);
4627 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4628 emit_unlikely_jump (x, label);
4630 emit_insn (gen_memory_barrier ());
4634 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4636 enum machine_mode mode = GET_MODE (mem);
4637 rtx addr, align, wdst;
4638 rtx (*fn4) (rtx, rtx, rtx, rtx);
4640 /* Force the address into a register. */
4641 addr = force_reg (DImode, XEXP (mem, 0));
4643 /* Align it to a multiple of 8. */
4644 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4645 NULL_RTX, 1, OPTAB_DIRECT);
4647 /* Insert val into the correct byte location within the word. */
4648 val = emit_insxl (mode, val, addr);
4650 wdst = gen_reg_rtx (DImode);
4652 fn4 = gen_sync_lock_test_and_setqi_1;
4654 fn4 = gen_sync_lock_test_and_sethi_1;
4655 emit_insn (fn4 (wdst, addr, val, align));
4657 emit_move_insn (dst, gen_lowpart (mode, wdst));
4661 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4662 rtx val, rtx align, rtx scratch)
4664 rtx label, mem, width, mask, x;
4666 mem = gen_rtx_MEM (DImode, align);
4667 MEM_VOLATILE_P (mem) = 1;
4669 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4670 emit_label (XEXP (label, 0));
4672 emit_load_locked (DImode, scratch, mem);
4674 width = GEN_INT (GET_MODE_BITSIZE (mode));
4675 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4676 if (WORDS_BIG_ENDIAN)
4678 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4679 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4683 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4684 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4686 emit_insn (gen_iordi3 (scratch, scratch, val));
4688 emit_store_conditional (DImode, scratch, mem, scratch);
4690 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4691 emit_unlikely_jump (x, label);
4693 emit_insn (gen_memory_barrier ());
4696 /* Adjust the cost of a scheduling dependency. Return the new cost of
4697 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4700 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4702 enum attr_type insn_type, dep_insn_type;
4704 /* If the dependence is an anti-dependence, there is no cost. For an
4705 output dependence, there is sometimes a cost, but it doesn't seem
4706 worth handling those few cases. */
4707 if (REG_NOTE_KIND (link) != 0)
4710 /* If we can't recognize the insns, we can't really do anything. */
4711 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4714 insn_type = get_attr_type (insn);
4715 dep_insn_type = get_attr_type (dep_insn);
4717 /* Bring in the user-defined memory latency. */
4718 if (dep_insn_type == TYPE_ILD
4719 || dep_insn_type == TYPE_FLD
4720 || dep_insn_type == TYPE_LDSYM)
4721 cost += alpha_memory_latency-1;
4723 /* Everything else handled in DFA bypasses now. */
4728 /* The number of instructions that can be issued per cycle. */
4731 alpha_issue_rate (void)
4733 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4736 /* How many alternative schedules to try. This should be as wide as the
4737 scheduling freedom in the DFA, but no wider. Making this value too
4738 large results extra work for the scheduler.
4740 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4741 alternative schedules. For EV5, we can choose between E0/E1 and
4742 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4745 alpha_multipass_dfa_lookahead (void)
4747 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4750 /* Machine-specific function data. */
4752 struct GTY(()) machine_function
4755 /* List of call information words for calls from this function. */
4756 struct rtx_def *first_ciw;
4757 struct rtx_def *last_ciw;
4760 /* List of deferred case vectors. */
4761 struct rtx_def *addr_list;
4764 const char *some_ld_name;
4766 /* For TARGET_LD_BUGGY_LDGP. */
4767 struct rtx_def *gp_save_rtx;
4770 /* How to allocate a 'struct machine_function'. */
4772 static struct machine_function *
4773 alpha_init_machine_status (void)
4775 return ((struct machine_function *)
4776 ggc_alloc_cleared (sizeof (struct machine_function)));
4779 /* Functions to save and restore alpha_return_addr_rtx. */
4781 /* Start the ball rolling with RETURN_ADDR_RTX. */
4784 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4789 return get_hard_reg_initial_val (Pmode, REG_RA);
4792 /* Return or create a memory slot containing the gp value for the current
4793 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4796 alpha_gp_save_rtx (void)
4798 rtx seq, m = cfun->machine->gp_save_rtx;
4804 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4805 m = validize_mem (m);
4806 emit_move_insn (m, pic_offset_table_rtx);
4811 /* We used to simply emit the sequence after entry_of_function.
4812 However this breaks the CFG if the first instruction in the
4813 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4814 label. Emit the sequence properly on the edge. We are only
4815 invoked from dw2_build_landing_pads and finish_eh_generation
4816 will call commit_edge_insertions thanks to a kludge. */
4817 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4819 cfun->machine->gp_save_rtx = m;
4826 alpha_ra_ever_killed (void)
4830 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4831 return (int)df_regs_ever_live_p (REG_RA);
4833 push_topmost_sequence ();
4835 pop_topmost_sequence ();
4837 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4841 /* Return the trap mode suffix applicable to the current
4842 instruction, or NULL. */
4845 get_trap_mode_suffix (void)
4847 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4851 case TRAP_SUFFIX_NONE:
4854 case TRAP_SUFFIX_SU:
4855 if (alpha_fptm >= ALPHA_FPTM_SU)
4859 case TRAP_SUFFIX_SUI:
4860 if (alpha_fptm >= ALPHA_FPTM_SUI)
4864 case TRAP_SUFFIX_V_SV:
4872 case ALPHA_FPTM_SUI:
4878 case TRAP_SUFFIX_V_SV_SVI:
4887 case ALPHA_FPTM_SUI:
4894 case TRAP_SUFFIX_U_SU_SUI:
4903 case ALPHA_FPTM_SUI:
4916 /* Return the rounding mode suffix applicable to the current
4917 instruction, or NULL. */
4920 get_round_mode_suffix (void)
4922 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4926 case ROUND_SUFFIX_NONE:
4928 case ROUND_SUFFIX_NORMAL:
4931 case ALPHA_FPRM_NORM:
4933 case ALPHA_FPRM_MINF:
4935 case ALPHA_FPRM_CHOP:
4937 case ALPHA_FPRM_DYN:
4944 case ROUND_SUFFIX_C:
4953 /* Locate some local-dynamic symbol still in use by this function
4954 so that we can print its name in some movdi_er_tlsldm pattern. */
4957 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4961 if (GET_CODE (x) == SYMBOL_REF
4962 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4964 cfun->machine->some_ld_name = XSTR (x, 0);
4972 get_some_local_dynamic_name (void)
4976 if (cfun->machine->some_ld_name)
4977 return cfun->machine->some_ld_name;
4979 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4981 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4982 return cfun->machine->some_ld_name;
4987 /* Print an operand. Recognize special options, documented below. */
4990 print_operand (FILE *file, rtx x, int code)
4997 /* Print the assembler name of the current function. */
4998 assemble_name (file, alpha_fnname);
5002 assemble_name (file, get_some_local_dynamic_name ());
5007 const char *trap = get_trap_mode_suffix ();
5008 const char *round = get_round_mode_suffix ();
5011 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5012 (trap ? trap : ""), (round ? round : ""));
5017 /* Generates single precision instruction suffix. */
5018 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5022 /* Generates double precision instruction suffix. */
5023 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5027 if (alpha_this_literal_sequence_number == 0)
5028 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5029 fprintf (file, "%d", alpha_this_literal_sequence_number);
5033 if (alpha_this_gpdisp_sequence_number == 0)
5034 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5035 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5039 if (GET_CODE (x) == HIGH)
5040 output_addr_const (file, XEXP (x, 0));
5042 output_operand_lossage ("invalid %%H value");
5049 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5051 x = XVECEXP (x, 0, 0);
5052 lituse = "lituse_tlsgd";
5054 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5056 x = XVECEXP (x, 0, 0);
5057 lituse = "lituse_tlsldm";
5059 else if (CONST_INT_P (x))
5060 lituse = "lituse_jsr";
5063 output_operand_lossage ("invalid %%J value");
5067 if (x != const0_rtx)
5068 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5076 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5077 lituse = "lituse_jsrdirect";
5079 lituse = "lituse_jsr";
5082 gcc_assert (INTVAL (x) != 0);
5083 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5087 /* If this operand is the constant zero, write it as "$31". */
5089 fprintf (file, "%s", reg_names[REGNO (x)]);
5090 else if (x == CONST0_RTX (GET_MODE (x)))
5091 fprintf (file, "$31");
5093 output_operand_lossage ("invalid %%r value");
5097 /* Similar, but for floating-point. */
5099 fprintf (file, "%s", reg_names[REGNO (x)]);
5100 else if (x == CONST0_RTX (GET_MODE (x)))
5101 fprintf (file, "$f31");
5103 output_operand_lossage ("invalid %%R value");
5107 /* Write the 1's complement of a constant. */
5108 if (!CONST_INT_P (x))
5109 output_operand_lossage ("invalid %%N value");
5111 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5115 /* Write 1 << C, for a constant C. */
5116 if (!CONST_INT_P (x))
5117 output_operand_lossage ("invalid %%P value");
5119 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5123 /* Write the high-order 16 bits of a constant, sign-extended. */
5124 if (!CONST_INT_P (x))
5125 output_operand_lossage ("invalid %%h value");
5127 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5131 /* Write the low-order 16 bits of a constant, sign-extended. */
5132 if (!CONST_INT_P (x))
5133 output_operand_lossage ("invalid %%L value");
5135 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5136 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5140 /* Write mask for ZAP insn. */
5141 if (GET_CODE (x) == CONST_DOUBLE)
5143 HOST_WIDE_INT mask = 0;
5144 HOST_WIDE_INT value;
5146 value = CONST_DOUBLE_LOW (x);
5147 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5152 value = CONST_DOUBLE_HIGH (x);
5153 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5156 mask |= (1 << (i + sizeof (int)));
5158 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5161 else if (CONST_INT_P (x))
5163 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5165 for (i = 0; i < 8; i++, value >>= 8)
5169 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5172 output_operand_lossage ("invalid %%m value");
5176 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5177 if (!CONST_INT_P (x)
5178 || (INTVAL (x) != 8 && INTVAL (x) != 16
5179 && INTVAL (x) != 32 && INTVAL (x) != 64))
5180 output_operand_lossage ("invalid %%M value");
5182 fprintf (file, "%s",
5183 (INTVAL (x) == 8 ? "b"
5184 : INTVAL (x) == 16 ? "w"
5185 : INTVAL (x) == 32 ? "l"
5190 /* Similar, except do it from the mask. */
5191 if (CONST_INT_P (x))
5193 HOST_WIDE_INT value = INTVAL (x);
5200 if (value == 0xffff)
5205 if (value == 0xffffffff)
5216 else if (HOST_BITS_PER_WIDE_INT == 32
5217 && GET_CODE (x) == CONST_DOUBLE
5218 && CONST_DOUBLE_LOW (x) == 0xffffffff
5219 && CONST_DOUBLE_HIGH (x) == 0)
5224 output_operand_lossage ("invalid %%U value");
5228 /* Write the constant value divided by 8 for little-endian mode or
5229 (56 - value) / 8 for big-endian mode. */
5231 if (!CONST_INT_P (x)
5232 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5235 || (INTVAL (x) & 7) != 0)
5236 output_operand_lossage ("invalid %%s value");
5238 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5240 ? (56 - INTVAL (x)) / 8
5245 /* Same, except compute (64 - c) / 8 */
5247 if (!CONST_INT_P (x)
5248 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5249 && (INTVAL (x) & 7) != 8)
5250 output_operand_lossage ("invalid %%s value");
5252 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5257 /* On Unicos/Mk systems: use a DEX expression if the symbol
5258 clashes with a register name. */
5259 int dex = unicosmk_need_dex (x);
5261 fprintf (file, "DEX(%d)", dex);
5263 output_addr_const (file, x);
5267 case 'C': case 'D': case 'c': case 'd':
5268 /* Write out comparison name. */
5270 enum rtx_code c = GET_CODE (x);
5272 if (!COMPARISON_P (x))
5273 output_operand_lossage ("invalid %%C value");
5275 else if (code == 'D')
5276 c = reverse_condition (c);
5277 else if (code == 'c')
5278 c = swap_condition (c);
5279 else if (code == 'd')
5280 c = swap_condition (reverse_condition (c));
5283 fprintf (file, "ule");
5285 fprintf (file, "ult");
5286 else if (c == UNORDERED)
5287 fprintf (file, "un");
5289 fprintf (file, "%s", GET_RTX_NAME (c));
5294 /* Write the divide or modulus operator. */
5295 switch (GET_CODE (x))
5298 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5301 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5304 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5307 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5310 output_operand_lossage ("invalid %%E value");
5316 /* Write "_u" for unaligned access. */
5317 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5318 fprintf (file, "_u");
5323 fprintf (file, "%s", reg_names[REGNO (x)]);
5325 output_address (XEXP (x, 0));
5326 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5328 switch (XINT (XEXP (x, 0), 1))
5332 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5335 output_operand_lossage ("unknown relocation unspec");
5340 output_addr_const (file, x);
5344 output_operand_lossage ("invalid %%xn code");
5349 print_operand_address (FILE *file, rtx addr)
5352 HOST_WIDE_INT offset = 0;
5354 if (GET_CODE (addr) == AND)
5355 addr = XEXP (addr, 0);
5357 if (GET_CODE (addr) == PLUS
5358 && CONST_INT_P (XEXP (addr, 1)))
5360 offset = INTVAL (XEXP (addr, 1));
5361 addr = XEXP (addr, 0);
5364 if (GET_CODE (addr) == LO_SUM)
5366 const char *reloc16, *reloclo;
5367 rtx op1 = XEXP (addr, 1);
5369 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5371 op1 = XEXP (op1, 0);
5372 switch (XINT (op1, 1))
5376 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5380 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5383 output_operand_lossage ("unknown relocation unspec");
5387 output_addr_const (file, XVECEXP (op1, 0, 0));
5392 reloclo = "gprellow";
5393 output_addr_const (file, op1);
5397 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5399 addr = XEXP (addr, 0);
5400 switch (GET_CODE (addr))
5403 basereg = REGNO (addr);
5407 basereg = subreg_regno (addr);
5414 fprintf (file, "($%d)\t\t!%s", basereg,
5415 (basereg == 29 ? reloc16 : reloclo));
5419 switch (GET_CODE (addr))
5422 basereg = REGNO (addr);
5426 basereg = subreg_regno (addr);
5430 offset = INTVAL (addr);
5433 #if TARGET_ABI_OPEN_VMS
5435 fprintf (file, "%s", XSTR (addr, 0));
5439 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5440 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5441 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5442 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5443 INTVAL (XEXP (XEXP (addr, 0), 1)));
5451 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5454 /* Emit RTL insns to initialize the variable parts of a trampoline at
5455 TRAMP. FNADDR is an RTX for the address of the function's pure
5456 code. CXT is an RTX for the static chain value for the function.
5458 The three offset parameters are for the individual template's
5459 layout. A JMPOFS < 0 indicates that the trampoline does not
5460 contain instructions at all.
5462 We assume here that a function will be called many more times than
5463 its address is taken (e.g., it might be passed to qsort), so we
5464 take the trouble to initialize the "hint" field in the JMP insn.
5465 Note that the hint field is PC (new) + 4 * bits 13:0. */
5468 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5469 int fnofs, int cxtofs, int jmpofs)
5472 /* VMS really uses DImode pointers in memory at this point. */
5473 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5475 #ifdef POINTERS_EXTEND_UNSIGNED
5476 fnaddr = convert_memory_address (mode, fnaddr);
5477 cxt = convert_memory_address (mode, cxt);
5480 /* Store function address and CXT. */
5481 addr = memory_address (mode, plus_constant (tramp, fnofs));
5482 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5483 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5484 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5486 #ifdef ENABLE_EXECUTE_STACK
5487 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5488 0, VOIDmode, 1, tramp, Pmode);
5492 emit_insn (gen_imb ());
5495 /* Determine where to put an argument to a function.
5496 Value is zero to push the argument on the stack,
5497 or a hard register in which to store the argument.
5499 MODE is the argument's machine mode.
5500 TYPE is the data type of the argument (as a tree).
5501 This is null for libcalls where that information may
5503 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5504 the preceding args and about the function being called.
5505 NAMED is nonzero if this argument is a named parameter
5506 (otherwise it is an extra parameter matching an ellipsis).
5508 On Alpha the first 6 words of args are normally in registers
5509 and the rest are pushed. */
5512 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5513 int named ATTRIBUTE_UNUSED)
5518 /* Don't get confused and pass small structures in FP registers. */
5519 if (type && AGGREGATE_TYPE_P (type))
5523 #ifdef ENABLE_CHECKING
5524 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5526 gcc_assert (!COMPLEX_MODE_P (mode));
5529 /* Set up defaults for FP operands passed in FP registers, and
5530 integral operands passed in integer registers. */
5531 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5537 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5538 the three platforms, so we can't avoid conditional compilation. */
5539 #if TARGET_ABI_OPEN_VMS
5541 if (mode == VOIDmode)
5542 return alpha_arg_info_reg_val (cum);
5544 num_args = cum.num_args;
5546 || targetm.calls.must_pass_in_stack (mode, type))
5549 #elif TARGET_ABI_UNICOSMK
5553 /* If this is the last argument, generate the call info word (CIW). */
5554 /* ??? We don't include the caller's line number in the CIW because
5555 I don't know how to determine it if debug infos are turned off. */
5556 if (mode == VOIDmode)
5565 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5566 if (cum.reg_args_type[i])
5567 lo |= (1 << (7 - i));
5569 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5572 lo |= cum.num_reg_words;
5574 #if HOST_BITS_PER_WIDE_INT == 32
5575 hi = (cum.num_args << 20) | cum.num_arg_words;
5577 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5578 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5581 ciw = immed_double_const (lo, hi, DImode);
5583 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5584 UNSPEC_UMK_LOAD_CIW);
5587 size = ALPHA_ARG_SIZE (mode, type, named);
5588 num_args = cum.num_reg_words;
5590 || cum.num_reg_words + size > 6
5591 || targetm.calls.must_pass_in_stack (mode, type))
5593 else if (type && TYPE_MODE (type) == BLKmode)
5597 reg1 = gen_rtx_REG (DImode, num_args + 16);
5598 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5600 /* The argument fits in two registers. Note that we still need to
5601 reserve a register for empty structures. */
5605 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5608 reg2 = gen_rtx_REG (DImode, num_args + 17);
5609 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5610 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5614 #elif TARGET_ABI_OSF
5620 /* VOID is passed as a special flag for "last argument". */
5621 if (type == void_type_node)
5623 else if (targetm.calls.must_pass_in_stack (mode, type))
5627 #error Unhandled ABI
5630 return gen_rtx_REG (mode, num_args + basereg);
5634 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5635 enum machine_mode mode ATTRIBUTE_UNUSED,
5636 tree type ATTRIBUTE_UNUSED,
5637 bool named ATTRIBUTE_UNUSED)
5641 #if TARGET_ABI_OPEN_VMS
5642 if (cum->num_args < 6
5643 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5644 words = 6 - cum->num_args;
5645 #elif TARGET_ABI_UNICOSMK
5646 /* Never any split arguments. */
5647 #elif TARGET_ABI_OSF
5648 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5651 #error Unhandled ABI
5654 return words * UNITS_PER_WORD;
5658 /* Return true if TYPE must be returned in memory, instead of in registers. */
5661 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5663 enum machine_mode mode = VOIDmode;
5668 mode = TYPE_MODE (type);
5670 /* All aggregates are returned in memory. */
5671 if (AGGREGATE_TYPE_P (type))
5675 size = GET_MODE_SIZE (mode);
5676 switch (GET_MODE_CLASS (mode))
5678 case MODE_VECTOR_FLOAT:
5679 /* Pass all float vectors in memory, like an aggregate. */
5682 case MODE_COMPLEX_FLOAT:
5683 /* We judge complex floats on the size of their element,
5684 not the size of the whole type. */
5685 size = GET_MODE_UNIT_SIZE (mode);
5690 case MODE_COMPLEX_INT:
5691 case MODE_VECTOR_INT:
5695 /* ??? We get called on all sorts of random stuff from
5696 aggregate_value_p. We must return something, but it's not
5697 clear what's safe to return. Pretend it's a struct I
5702 /* Otherwise types must fit in one register. */
5703 return size > UNITS_PER_WORD;
5706 /* Return true if TYPE should be passed by invisible reference. */
5709 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5710 enum machine_mode mode,
5711 const_tree type ATTRIBUTE_UNUSED,
5712 bool named ATTRIBUTE_UNUSED)
5714 return mode == TFmode || mode == TCmode;
5717 /* Define how to find the value returned by a function. VALTYPE is the
5718 data type of the value (as a tree). If the precise function being
5719 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5720 MODE is set instead of VALTYPE for libcalls.
5722 On Alpha the value is found in $0 for integer functions and
5723 $f0 for floating-point functions. */
5726 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5727 enum machine_mode mode)
5729 unsigned int regnum, dummy;
5730 enum mode_class mclass;
5732 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5735 mode = TYPE_MODE (valtype);
5737 mclass = GET_MODE_CLASS (mode);
5741 PROMOTE_MODE (mode, dummy, valtype);
5744 case MODE_COMPLEX_INT:
5745 case MODE_VECTOR_INT:
5753 case MODE_COMPLEX_FLOAT:
5755 enum machine_mode cmode = GET_MODE_INNER (mode);
5757 return gen_rtx_PARALLEL
5760 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5762 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5763 GEN_INT (GET_MODE_SIZE (cmode)))));
5770 return gen_rtx_REG (mode, regnum);
5773 /* TCmode complex values are passed by invisible reference. We
5774 should not split these values. */
5777 alpha_split_complex_arg (const_tree type)
5779 return TYPE_MODE (type) != TCmode;
5783 alpha_build_builtin_va_list (void)
5785 tree base, ofs, space, record, type_decl;
5787 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5788 return ptr_type_node;
5790 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5791 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5792 TREE_CHAIN (record) = type_decl;
5793 TYPE_NAME (record) = type_decl;
5795 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5797 /* Dummy field to prevent alignment warnings. */
5798 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5799 DECL_FIELD_CONTEXT (space) = record;
5800 DECL_ARTIFICIAL (space) = 1;
5801 DECL_IGNORED_P (space) = 1;
5803 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5805 DECL_FIELD_CONTEXT (ofs) = record;
5806 TREE_CHAIN (ofs) = space;
5808 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5810 DECL_FIELD_CONTEXT (base) = record;
5811 TREE_CHAIN (base) = ofs;
5813 TYPE_FIELDS (record) = base;
5814 layout_type (record);
5816 va_list_gpr_counter_field = ofs;
5821 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5822 and constant additions. */
5825 va_list_skip_additions (tree lhs)
5831 enum tree_code code;
5833 stmt = SSA_NAME_DEF_STMT (lhs);
5835 if (gimple_code (stmt) == GIMPLE_PHI)
5838 if (!is_gimple_assign (stmt)
5839 || gimple_assign_lhs (stmt) != lhs)
5842 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5844 code = gimple_assign_rhs_code (stmt);
5845 if (!CONVERT_EXPR_CODE_P (code)
5846 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5847 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5848 || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5851 lhs = gimple_assign_rhs1 (stmt);
5855 /* Check if LHS = RHS statement is
5856 LHS = *(ap.__base + ap.__offset + cst)
5859 + ((ap.__offset + cst <= 47)
5860 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5861 If the former, indicate that GPR registers are needed,
5862 if the latter, indicate that FPR registers are needed.
5864 Also look for LHS = (*ptr).field, where ptr is one of the forms
5867 On alpha, cfun->va_list_gpr_size is used as size of the needed
5868 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5869 registers are needed and bit 1 set if FPR registers are needed.
5870 Return true if va_list references should not be scanned for the
5871 current statement. */
5874 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
5876 tree base, offset, rhs;
5880 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5881 != GIMPLE_SINGLE_RHS)
5884 rhs = gimple_assign_rhs1 (stmt);
5885 while (handled_component_p (rhs))
5886 rhs = TREE_OPERAND (rhs, 0);
5887 if (TREE_CODE (rhs) != INDIRECT_REF
5888 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5891 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5893 || !is_gimple_assign (stmt)
5894 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
5897 base = gimple_assign_rhs1 (stmt);
5898 if (TREE_CODE (base) == SSA_NAME)
5900 base_stmt = va_list_skip_additions (base);
5902 && is_gimple_assign (base_stmt)
5903 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5904 base = gimple_assign_rhs1 (base_stmt);
5907 if (TREE_CODE (base) != COMPONENT_REF
5908 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5910 base = gimple_assign_rhs2 (stmt);
5911 if (TREE_CODE (base) == SSA_NAME)
5913 base_stmt = va_list_skip_additions (base);
5915 && is_gimple_assign (base_stmt)
5916 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5917 base = gimple_assign_rhs1 (base_stmt);
5920 if (TREE_CODE (base) != COMPONENT_REF
5921 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5927 base = get_base_address (base);
5928 if (TREE_CODE (base) != VAR_DECL
5929 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5932 offset = gimple_op (stmt, 1 + offset_arg);
5933 if (TREE_CODE (offset) == SSA_NAME)
5935 gimple offset_stmt = va_list_skip_additions (offset);
5938 && gimple_code (offset_stmt) == GIMPLE_PHI)
5941 gimple arg1_stmt, arg2_stmt;
5943 enum tree_code code1, code2;
5945 if (gimple_phi_num_args (offset_stmt) != 2)
5949 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
5951 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
5952 if (arg1_stmt == NULL
5953 || !is_gimple_assign (arg1_stmt)
5954 || arg2_stmt == NULL
5955 || !is_gimple_assign (arg2_stmt))
5958 code1 = gimple_assign_rhs_code (arg1_stmt);
5959 code2 = gimple_assign_rhs_code (arg2_stmt);
5960 if (code1 == COMPONENT_REF
5961 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
5963 else if (code2 == COMPONENT_REF
5964 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
5966 gimple tem = arg1_stmt;
5968 arg1_stmt = arg2_stmt;
5974 if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
5977 sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
5978 if (code2 == MINUS_EXPR)
5980 if (sub < -48 || sub > -32)
5983 arg1 = gimple_assign_rhs1 (arg1_stmt);
5984 arg2 = gimple_assign_rhs1 (arg2_stmt);
5985 if (TREE_CODE (arg2) == SSA_NAME)
5987 arg2_stmt = va_list_skip_additions (arg2);
5988 if (arg2_stmt == NULL
5989 || !is_gimple_assign (arg2_stmt)
5990 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
5992 arg2 = gimple_assign_rhs1 (arg2_stmt);
5997 if (TREE_CODE (arg1) != COMPONENT_REF
5998 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5999 || get_base_address (arg1) != base)
6002 /* Need floating point regs. */
6003 cfun->va_list_fpr_size |= 2;
6007 && is_gimple_assign (offset_stmt)
6008 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6009 offset = gimple_assign_rhs1 (offset_stmt);
6011 if (TREE_CODE (offset) != COMPONENT_REF
6012 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6013 || get_base_address (offset) != base)
6016 /* Need general regs. */
6017 cfun->va_list_fpr_size |= 1;
6021 si->va_list_escapes = true;
6026 /* Perform any needed actions needed for a function that is receiving a
6027 variable number of arguments. */
6030 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6031 tree type, int *pretend_size, int no_rtl)
6033 CUMULATIVE_ARGS cum = *pcum;
6035 /* Skip the current argument. */
6036 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
6038 #if TARGET_ABI_UNICOSMK
6039 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6040 arguments on the stack. Unfortunately, it doesn't always store the first
6041 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6042 with stdargs as we always have at least one named argument there. */
6043 if (cum.num_reg_words < 6)
6047 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6048 emit_insn (gen_arg_home_umk ());
6052 #elif TARGET_ABI_OPEN_VMS
6053 /* For VMS, we allocate space for all 6 arg registers plus a count.
6055 However, if NO registers need to be saved, don't allocate any space.
6056 This is not only because we won't need the space, but because AP
6057 includes the current_pretend_args_size and we don't want to mess up
6058 any ap-relative addresses already made. */
6059 if (cum.num_args < 6)
6063 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6064 emit_insn (gen_arg_home ());
6066 *pretend_size = 7 * UNITS_PER_WORD;
6069 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6070 only push those that are remaining. However, if NO registers need to
6071 be saved, don't allocate any space. This is not only because we won't
6072 need the space, but because AP includes the current_pretend_args_size
6073 and we don't want to mess up any ap-relative addresses already made.
6075 If we are not to use the floating-point registers, save the integer
6076 registers where we would put the floating-point registers. This is
6077 not the most efficient way to implement varargs with just one register
6078 class, but it isn't worth doing anything more efficient in this rare
6086 alias_set_type set = get_varargs_alias_set ();
6089 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6090 if (count > 6 - cum)
6093 /* Detect whether integer registers or floating-point registers
6094 are needed by the detected va_arg statements. See above for
6095 how these values are computed. Note that the "escape" value
6096 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6098 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6100 if (cfun->va_list_fpr_size & 1)
6102 tmp = gen_rtx_MEM (BLKmode,
6103 plus_constant (virtual_incoming_args_rtx,
6104 (cum + 6) * UNITS_PER_WORD));
6105 MEM_NOTRAP_P (tmp) = 1;
6106 set_mem_alias_set (tmp, set);
6107 move_block_from_reg (16 + cum, tmp, count);
6110 if (cfun->va_list_fpr_size & 2)
6112 tmp = gen_rtx_MEM (BLKmode,
6113 plus_constant (virtual_incoming_args_rtx,
6114 cum * UNITS_PER_WORD));
6115 MEM_NOTRAP_P (tmp) = 1;
6116 set_mem_alias_set (tmp, set);
6117 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6120 *pretend_size = 12 * UNITS_PER_WORD;
6125 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6127 HOST_WIDE_INT offset;
6128 tree t, offset_field, base_field;
6130 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6133 if (TARGET_ABI_UNICOSMK)
6134 std_expand_builtin_va_start (valist, nextarg);
6136 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6137 up by 48, storing fp arg registers in the first 48 bytes, and the
6138 integer arg registers in the next 48 bytes. This is only done,
6139 however, if any integer registers need to be stored.
6141 If no integer registers need be stored, then we must subtract 48
6142 in order to account for the integer arg registers which are counted
6143 in argsize above, but which are not actually stored on the stack.
6144 Must further be careful here about structures straddling the last
6145 integer argument register; that futzes with pretend_args_size,
6146 which changes the meaning of AP. */
6149 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6151 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6153 if (TARGET_ABI_OPEN_VMS)
6155 nextarg = plus_constant (nextarg, offset);
6156 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6157 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist,
6158 make_tree (ptr_type_node, nextarg));
6159 TREE_SIDE_EFFECTS (t) = 1;
6161 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6165 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6166 offset_field = TREE_CHAIN (base_field);
6168 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6169 valist, base_field, NULL_TREE);
6170 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6171 valist, offset_field, NULL_TREE);
6173 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6174 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6176 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6177 TREE_SIDE_EFFECTS (t) = 1;
6178 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6180 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6181 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6182 TREE_SIDE_EFFECTS (t) = 1;
6183 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6188 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6191 tree type_size, ptr_type, addend, t, addr;
6192 gimple_seq internal_post;
6194 /* If the type could not be passed in registers, skip the block
6195 reserved for the registers. */
6196 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6198 t = build_int_cst (TREE_TYPE (offset), 6*8);
6199 gimplify_assign (offset,
6200 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6205 ptr_type = build_pointer_type (type);
6207 if (TREE_CODE (type) == COMPLEX_TYPE)
6209 tree real_part, imag_part, real_temp;
6211 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6214 /* Copy the value into a new temporary, lest the formal temporary
6215 be reused out from under us. */
6216 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6218 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6221 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6223 else if (TREE_CODE (type) == REAL_TYPE)
6225 tree fpaddend, cond, fourtyeight;
6227 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6228 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6229 addend, fourtyeight);
6230 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6231 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6235 /* Build the final address and force that value into a temporary. */
6236 addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6237 fold_convert (sizetype, addend));
6238 internal_post = NULL;
6239 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6240 gimple_seq_add_seq (pre_p, internal_post);
6242 /* Update the offset field. */
6243 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6244 if (type_size == NULL || TREE_OVERFLOW (type_size))
6248 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6249 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6250 t = size_binop (MULT_EXPR, t, size_int (8));
6252 t = fold_convert (TREE_TYPE (offset), t);
6253 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6256 return build_va_arg_indirect_ref (addr);
6260 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6263 tree offset_field, base_field, offset, base, t, r;
6266 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6267 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6269 base_field = TYPE_FIELDS (va_list_type_node);
6270 offset_field = TREE_CHAIN (base_field);
6271 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6272 valist, base_field, NULL_TREE);
6273 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6274 valist, offset_field, NULL_TREE);
6276 /* Pull the fields of the structure out into temporaries. Since we never
6277 modify the base field, we can use a formal temporary. Sign-extend the
6278 offset field so that it's the proper width for pointer arithmetic. */
6279 base = get_formal_tmp_var (base_field, pre_p);
6281 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6282 offset = get_initialized_tmp_var (t, pre_p, NULL);
6284 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6286 type = build_pointer_type (type);
6288 /* Find the value. Note that this will be a stable indirection, or
6289 a composite of stable indirections in the case of complex. */
6290 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6292 /* Stuff the offset temporary back into its field. */
6293 gimplify_assign (unshare_expr (offset_field),
6294 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6297 r = build_va_arg_indirect_ref (r);
6306 ALPHA_BUILTIN_CMPBGE,
6307 ALPHA_BUILTIN_EXTBL,
6308 ALPHA_BUILTIN_EXTWL,
6309 ALPHA_BUILTIN_EXTLL,
6310 ALPHA_BUILTIN_EXTQL,
6311 ALPHA_BUILTIN_EXTWH,
6312 ALPHA_BUILTIN_EXTLH,
6313 ALPHA_BUILTIN_EXTQH,
6314 ALPHA_BUILTIN_INSBL,
6315 ALPHA_BUILTIN_INSWL,
6316 ALPHA_BUILTIN_INSLL,
6317 ALPHA_BUILTIN_INSQL,
6318 ALPHA_BUILTIN_INSWH,
6319 ALPHA_BUILTIN_INSLH,
6320 ALPHA_BUILTIN_INSQH,
6321 ALPHA_BUILTIN_MSKBL,
6322 ALPHA_BUILTIN_MSKWL,
6323 ALPHA_BUILTIN_MSKLL,
6324 ALPHA_BUILTIN_MSKQL,
6325 ALPHA_BUILTIN_MSKWH,
6326 ALPHA_BUILTIN_MSKLH,
6327 ALPHA_BUILTIN_MSKQH,
6328 ALPHA_BUILTIN_UMULH,
6330 ALPHA_BUILTIN_ZAPNOT,
6331 ALPHA_BUILTIN_AMASK,
6332 ALPHA_BUILTIN_IMPLVER,
6334 ALPHA_BUILTIN_THREAD_POINTER,
6335 ALPHA_BUILTIN_SET_THREAD_POINTER,
6338 ALPHA_BUILTIN_MINUB8,
6339 ALPHA_BUILTIN_MINSB8,
6340 ALPHA_BUILTIN_MINUW4,
6341 ALPHA_BUILTIN_MINSW4,
6342 ALPHA_BUILTIN_MAXUB8,
6343 ALPHA_BUILTIN_MAXSB8,
6344 ALPHA_BUILTIN_MAXUW4,
6345 ALPHA_BUILTIN_MAXSW4,
6349 ALPHA_BUILTIN_UNPKBL,
6350 ALPHA_BUILTIN_UNPKBW,
6355 ALPHA_BUILTIN_CTPOP,
6360 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6361 CODE_FOR_builtin_cmpbge,
6362 CODE_FOR_builtin_extbl,
6363 CODE_FOR_builtin_extwl,
6364 CODE_FOR_builtin_extll,
6365 CODE_FOR_builtin_extql,
6366 CODE_FOR_builtin_extwh,
6367 CODE_FOR_builtin_extlh,
6368 CODE_FOR_builtin_extqh,
6369 CODE_FOR_builtin_insbl,
6370 CODE_FOR_builtin_inswl,
6371 CODE_FOR_builtin_insll,
6372 CODE_FOR_builtin_insql,
6373 CODE_FOR_builtin_inswh,
6374 CODE_FOR_builtin_inslh,
6375 CODE_FOR_builtin_insqh,
6376 CODE_FOR_builtin_mskbl,
6377 CODE_FOR_builtin_mskwl,
6378 CODE_FOR_builtin_mskll,
6379 CODE_FOR_builtin_mskql,
6380 CODE_FOR_builtin_mskwh,
6381 CODE_FOR_builtin_msklh,
6382 CODE_FOR_builtin_mskqh,
6383 CODE_FOR_umuldi3_highpart,
6384 CODE_FOR_builtin_zap,
6385 CODE_FOR_builtin_zapnot,
6386 CODE_FOR_builtin_amask,
6387 CODE_FOR_builtin_implver,
6388 CODE_FOR_builtin_rpcc,
6393 CODE_FOR_builtin_minub8,
6394 CODE_FOR_builtin_minsb8,
6395 CODE_FOR_builtin_minuw4,
6396 CODE_FOR_builtin_minsw4,
6397 CODE_FOR_builtin_maxub8,
6398 CODE_FOR_builtin_maxsb8,
6399 CODE_FOR_builtin_maxuw4,
6400 CODE_FOR_builtin_maxsw4,
6401 CODE_FOR_builtin_perr,
6402 CODE_FOR_builtin_pklb,
6403 CODE_FOR_builtin_pkwb,
6404 CODE_FOR_builtin_unpkbl,
6405 CODE_FOR_builtin_unpkbw,
6410 CODE_FOR_popcountdi2
6413 struct alpha_builtin_def
6416 enum alpha_builtin code;
6417 unsigned int target_mask;
6421 static struct alpha_builtin_def const zero_arg_builtins[] = {
6422 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6423 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6426 static struct alpha_builtin_def const one_arg_builtins[] = {
6427 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6428 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6429 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6430 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6431 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6432 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6433 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6434 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6437 static struct alpha_builtin_def const two_arg_builtins[] = {
6438 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6439 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6440 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6441 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6442 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6443 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6444 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6445 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6446 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6447 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6448 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6449 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6450 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6451 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6452 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6453 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6454 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6455 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6456 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6457 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6458 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6459 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6460 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6461 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6462 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6463 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6464 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6465 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6466 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6467 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6468 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6469 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6470 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6471 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6474 static GTY(()) tree alpha_v8qi_u;
6475 static GTY(()) tree alpha_v8qi_s;
6476 static GTY(()) tree alpha_v4hi_u;
6477 static GTY(()) tree alpha_v4hi_s;
6479 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6480 functions pointed to by P, with function type FTYPE. */
6483 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6489 for (i = 0; i < count; ++i, ++p)
6490 if ((target_flags & p->target_mask) == p->target_mask)
6492 decl = add_builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6495 TREE_READONLY (decl) = 1;
6496 TREE_NOTHROW (decl) = 1;
6502 alpha_init_builtins (void)
6504 tree dimode_integer_type_node;
6507 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6509 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6510 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6513 ftype = build_function_type_list (dimode_integer_type_node,
6514 dimode_integer_type_node, NULL_TREE);
6515 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6518 ftype = build_function_type_list (dimode_integer_type_node,
6519 dimode_integer_type_node,
6520 dimode_integer_type_node, NULL_TREE);
6521 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6524 ftype = build_function_type (ptr_type_node, void_list_node);
6525 decl = add_builtin_function ("__builtin_thread_pointer", ftype,
6526 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6528 TREE_NOTHROW (decl) = 1;
6530 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6531 decl = add_builtin_function ("__builtin_set_thread_pointer", ftype,
6532 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6534 TREE_NOTHROW (decl) = 1;
6536 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6537 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6538 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6539 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6542 /* Expand an expression EXP that calls a built-in function,
6543 with result going to TARGET if that's convenient
6544 (and in mode MODE if that's convenient).
6545 SUBTARGET may be used as the target for computing one of EXP's operands.
6546 IGNORE is nonzero if the value is to be ignored. */
6549 alpha_expand_builtin (tree exp, rtx target,
6550 rtx subtarget ATTRIBUTE_UNUSED,
6551 enum machine_mode mode ATTRIBUTE_UNUSED,
6552 int ignore ATTRIBUTE_UNUSED)
6556 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6557 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6559 call_expr_arg_iterator iter;
6560 enum insn_code icode;
6561 rtx op[MAX_ARGS], pat;
6565 if (fcode >= ALPHA_BUILTIN_max)
6566 internal_error ("bad builtin fcode");
6567 icode = code_for_builtin[fcode];
6569 internal_error ("bad builtin fcode");
6571 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6574 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6576 const struct insn_operand_data *insn_op;
6578 if (arg == error_mark_node)
6580 if (arity > MAX_ARGS)
6583 insn_op = &insn_data[icode].operand[arity + nonvoid];
6585 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6587 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6588 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6594 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6596 || GET_MODE (target) != tmode
6597 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6598 target = gen_reg_rtx (tmode);
6604 pat = GEN_FCN (icode) (target);
6608 pat = GEN_FCN (icode) (target, op[0]);
6610 pat = GEN_FCN (icode) (op[0]);
6613 pat = GEN_FCN (icode) (target, op[0], op[1]);
6629 /* Several bits below assume HWI >= 64 bits. This should be enforced
6631 #if HOST_BITS_PER_WIDE_INT < 64
6632 # error "HOST_WIDE_INT too small"
6635 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6636 with an 8-bit output vector. OPINT contains the integer operands; bit N
6637 of OP_CONST is set if OPINT[N] is valid. */
6640 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6645 for (i = 0, val = 0; i < 8; ++i)
6647 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6648 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6652 return build_int_cst (long_integer_type_node, val);
6654 else if (op_const == 2 && opint[1] == 0)
6655 return build_int_cst (long_integer_type_node, 0xff);
6659 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6660 specialized form of an AND operation. Other byte manipulation instructions
6661 are defined in terms of this instruction, so this is also used as a
6662 subroutine for other builtins.
6664 OP contains the tree operands; OPINT contains the extracted integer values.
6665 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6666 OPINT may be considered. */
6669 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6674 unsigned HOST_WIDE_INT mask = 0;
6677 for (i = 0; i < 8; ++i)
6678 if ((opint[1] >> i) & 1)
6679 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6682 return build_int_cst (long_integer_type_node, opint[0] & mask);
6685 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6686 build_int_cst (long_integer_type_node, mask));
6688 else if ((op_const & 1) && opint[0] == 0)
6689 return build_int_cst (long_integer_type_node, 0);
6693 /* Fold the builtins for the EXT family of instructions. */
6696 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6697 long op_const, unsigned HOST_WIDE_INT bytemask,
6701 tree *zap_op = NULL;
6705 unsigned HOST_WIDE_INT loc;
6708 if (BYTES_BIG_ENDIAN)
6716 unsigned HOST_WIDE_INT temp = opint[0];
6729 opint[1] = bytemask;
6730 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6733 /* Fold the builtins for the INS family of instructions. */
6736 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6737 long op_const, unsigned HOST_WIDE_INT bytemask,
6740 if ((op_const & 1) && opint[0] == 0)
6741 return build_int_cst (long_integer_type_node, 0);
6745 unsigned HOST_WIDE_INT temp, loc, byteloc;
6746 tree *zap_op = NULL;
6749 if (BYTES_BIG_ENDIAN)
6756 byteloc = (64 - (loc * 8)) & 0x3f;
6773 opint[1] = bytemask;
6774 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6781 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6782 long op_const, unsigned HOST_WIDE_INT bytemask,
6787 unsigned HOST_WIDE_INT loc;
6790 if (BYTES_BIG_ENDIAN)
6797 opint[1] = bytemask ^ 0xff;
6800 return alpha_fold_builtin_zapnot (op, opint, op_const);
6804 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6810 unsigned HOST_WIDE_INT l;
6813 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6815 #if HOST_BITS_PER_WIDE_INT > 64
6819 return build_int_cst (long_integer_type_node, h);
6823 opint[1] = opint[0];
6826 /* Note that (X*1) >> 64 == 0. */
6827 if (opint[1] == 0 || opint[1] == 1)
6828 return build_int_cst (long_integer_type_node, 0);
6835 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6837 tree op0 = fold_convert (vtype, op[0]);
6838 tree op1 = fold_convert (vtype, op[1]);
6839 tree val = fold_build2 (code, vtype, op0, op1);
6840 return fold_build1 (VIEW_CONVERT_EXPR, long_integer_type_node, val);
6844 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6846 unsigned HOST_WIDE_INT temp = 0;
6852 for (i = 0; i < 8; ++i)
6854 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6855 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6862 return build_int_cst (long_integer_type_node, temp);
6866 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6868 unsigned HOST_WIDE_INT temp;
6873 temp = opint[0] & 0xff;
6874 temp |= (opint[0] >> 24) & 0xff00;
6876 return build_int_cst (long_integer_type_node, temp);
6880 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6882 unsigned HOST_WIDE_INT temp;
6887 temp = opint[0] & 0xff;
6888 temp |= (opint[0] >> 8) & 0xff00;
6889 temp |= (opint[0] >> 16) & 0xff0000;
6890 temp |= (opint[0] >> 24) & 0xff000000;
6892 return build_int_cst (long_integer_type_node, temp);
6896 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6898 unsigned HOST_WIDE_INT temp;
6903 temp = opint[0] & 0xff;
6904 temp |= (opint[0] & 0xff00) << 24;
6906 return build_int_cst (long_integer_type_node, temp);
6910 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6912 unsigned HOST_WIDE_INT temp;
6917 temp = opint[0] & 0xff;
6918 temp |= (opint[0] & 0x0000ff00) << 8;
6919 temp |= (opint[0] & 0x00ff0000) << 16;
6920 temp |= (opint[0] & 0xff000000) << 24;
6922 return build_int_cst (long_integer_type_node, temp);
6926 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6928 unsigned HOST_WIDE_INT temp;
6936 temp = exact_log2 (opint[0] & -opint[0]);
6938 return build_int_cst (long_integer_type_node, temp);
6942 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6944 unsigned HOST_WIDE_INT temp;
6952 temp = 64 - floor_log2 (opint[0]) - 1;
6954 return build_int_cst (long_integer_type_node, temp);
6958 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6960 unsigned HOST_WIDE_INT temp, op;
6968 temp++, op &= op - 1;
6970 return build_int_cst (long_integer_type_node, temp);
6973 /* Fold one of our builtin functions. */
6976 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6978 tree op[MAX_ARGS], t;
6979 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6980 long op_const = 0, arity = 0;
6982 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6984 tree arg = TREE_VALUE (t);
6985 if (arg == error_mark_node)
6987 if (arity >= MAX_ARGS)
6992 if (TREE_CODE (arg) == INTEGER_CST)
6994 op_const |= 1L << arity;
6995 opint[arity] = int_cst_value (arg);
6999 switch (DECL_FUNCTION_CODE (fndecl))
7001 case ALPHA_BUILTIN_CMPBGE:
7002 return alpha_fold_builtin_cmpbge (opint, op_const);
7004 case ALPHA_BUILTIN_EXTBL:
7005 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7006 case ALPHA_BUILTIN_EXTWL:
7007 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7008 case ALPHA_BUILTIN_EXTLL:
7009 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7010 case ALPHA_BUILTIN_EXTQL:
7011 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7012 case ALPHA_BUILTIN_EXTWH:
7013 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7014 case ALPHA_BUILTIN_EXTLH:
7015 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7016 case ALPHA_BUILTIN_EXTQH:
7017 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7019 case ALPHA_BUILTIN_INSBL:
7020 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7021 case ALPHA_BUILTIN_INSWL:
7022 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7023 case ALPHA_BUILTIN_INSLL:
7024 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7025 case ALPHA_BUILTIN_INSQL:
7026 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7027 case ALPHA_BUILTIN_INSWH:
7028 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7029 case ALPHA_BUILTIN_INSLH:
7030 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7031 case ALPHA_BUILTIN_INSQH:
7032 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7034 case ALPHA_BUILTIN_MSKBL:
7035 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7036 case ALPHA_BUILTIN_MSKWL:
7037 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7038 case ALPHA_BUILTIN_MSKLL:
7039 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7040 case ALPHA_BUILTIN_MSKQL:
7041 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7042 case ALPHA_BUILTIN_MSKWH:
7043 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7044 case ALPHA_BUILTIN_MSKLH:
7045 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7046 case ALPHA_BUILTIN_MSKQH:
7047 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7049 case ALPHA_BUILTIN_UMULH:
7050 return alpha_fold_builtin_umulh (opint, op_const);
7052 case ALPHA_BUILTIN_ZAP:
7055 case ALPHA_BUILTIN_ZAPNOT:
7056 return alpha_fold_builtin_zapnot (op, opint, op_const);
7058 case ALPHA_BUILTIN_MINUB8:
7059 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7060 case ALPHA_BUILTIN_MINSB8:
7061 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7062 case ALPHA_BUILTIN_MINUW4:
7063 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7064 case ALPHA_BUILTIN_MINSW4:
7065 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7066 case ALPHA_BUILTIN_MAXUB8:
7067 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7068 case ALPHA_BUILTIN_MAXSB8:
7069 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7070 case ALPHA_BUILTIN_MAXUW4:
7071 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7072 case ALPHA_BUILTIN_MAXSW4:
7073 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7075 case ALPHA_BUILTIN_PERR:
7076 return alpha_fold_builtin_perr (opint, op_const);
7077 case ALPHA_BUILTIN_PKLB:
7078 return alpha_fold_builtin_pklb (opint, op_const);
7079 case ALPHA_BUILTIN_PKWB:
7080 return alpha_fold_builtin_pkwb (opint, op_const);
7081 case ALPHA_BUILTIN_UNPKBL:
7082 return alpha_fold_builtin_unpkbl (opint, op_const);
7083 case ALPHA_BUILTIN_UNPKBW:
7084 return alpha_fold_builtin_unpkbw (opint, op_const);
7086 case ALPHA_BUILTIN_CTTZ:
7087 return alpha_fold_builtin_cttz (opint, op_const);
7088 case ALPHA_BUILTIN_CTLZ:
7089 return alpha_fold_builtin_ctlz (opint, op_const);
7090 case ALPHA_BUILTIN_CTPOP:
7091 return alpha_fold_builtin_ctpop (opint, op_const);
7093 case ALPHA_BUILTIN_AMASK:
7094 case ALPHA_BUILTIN_IMPLVER:
7095 case ALPHA_BUILTIN_RPCC:
7096 case ALPHA_BUILTIN_THREAD_POINTER:
7097 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7098 /* None of these are foldable at compile-time. */
7104 /* This page contains routines that are used to determine what the function
7105 prologue and epilogue code will do and write them out. */
7107 /* Compute the size of the save area in the stack. */
7109 /* These variables are used for communication between the following functions.
7110 They indicate various things about the current function being compiled
7111 that are used to tell what kind of prologue, epilogue and procedure
7112 descriptor to generate. */
7114 /* Nonzero if we need a stack procedure. */
7115 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7116 static enum alpha_procedure_types alpha_procedure_type;
7118 /* Register number (either FP or SP) that is used to unwind the frame. */
7119 static int vms_unwind_regno;
7121 /* Register number used to save FP. We need not have one for RA since
7122 we don't modify it for register procedures. This is only defined
7123 for register frame procedures. */
7124 static int vms_save_fp_regno;
7126 /* Register number used to reference objects off our PV. */
7127 static int vms_base_regno;
7129 /* Compute register masks for saved registers. */
7132 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7134 unsigned long imask = 0;
7135 unsigned long fmask = 0;
7138 /* When outputting a thunk, we don't have valid register life info,
7139 but assemble_start_function wants to output .frame and .mask
7148 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7149 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7151 /* One for every register we have to save. */
7152 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7153 if (! fixed_regs[i] && ! call_used_regs[i]
7154 && df_regs_ever_live_p (i) && i != REG_RA
7155 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7158 imask |= (1UL << i);
7160 fmask |= (1UL << (i - 32));
7163 /* We need to restore these for the handler. */
7164 if (crtl->calls_eh_return)
7168 unsigned regno = EH_RETURN_DATA_REGNO (i);
7169 if (regno == INVALID_REGNUM)
7171 imask |= 1UL << regno;
7175 /* If any register spilled, then spill the return address also. */
7176 /* ??? This is required by the Digital stack unwind specification
7177 and isn't needed if we're doing Dwarf2 unwinding. */
7178 if (imask || fmask || alpha_ra_ever_killed ())
7179 imask |= (1UL << REG_RA);
7186 alpha_sa_size (void)
7188 unsigned long mask[2];
7192 alpha_sa_mask (&mask[0], &mask[1]);
7194 if (TARGET_ABI_UNICOSMK)
7196 if (mask[0] || mask[1])
7201 for (j = 0; j < 2; ++j)
7202 for (i = 0; i < 32; ++i)
7203 if ((mask[j] >> i) & 1)
7207 if (TARGET_ABI_UNICOSMK)
7209 /* We might not need to generate a frame if we don't make any calls
7210 (including calls to __T3E_MISMATCH if this is a vararg function),
7211 don't have any local variables which require stack slots, don't
7212 use alloca and have not determined that we need a frame for other
7215 alpha_procedure_type
7216 = (sa_size || get_frame_size() != 0
7217 || crtl->outgoing_args_size
7218 || cfun->stdarg || cfun->calls_alloca
7219 || frame_pointer_needed)
7220 ? PT_STACK : PT_REGISTER;
7222 /* Always reserve space for saving callee-saved registers if we
7223 need a frame as required by the calling convention. */
7224 if (alpha_procedure_type == PT_STACK)
7227 else if (TARGET_ABI_OPEN_VMS)
7229 /* Start by assuming we can use a register procedure if we don't
7230 make any calls (REG_RA not used) or need to save any
7231 registers and a stack procedure if we do. */
7232 if ((mask[0] >> REG_RA) & 1)
7233 alpha_procedure_type = PT_STACK;
7234 else if (get_frame_size() != 0)
7235 alpha_procedure_type = PT_REGISTER;
7237 alpha_procedure_type = PT_NULL;
7239 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7240 made the final decision on stack procedure vs register procedure. */
7241 if (alpha_procedure_type == PT_STACK)
7244 /* Decide whether to refer to objects off our PV via FP or PV.
7245 If we need FP for something else or if we receive a nonlocal
7246 goto (which expects PV to contain the value), we must use PV.
7247 Otherwise, start by assuming we can use FP. */
7250 = (frame_pointer_needed
7251 || cfun->has_nonlocal_label
7252 || alpha_procedure_type == PT_STACK
7253 || crtl->outgoing_args_size)
7254 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7256 /* If we want to copy PV into FP, we need to find some register
7257 in which to save FP. */
7259 vms_save_fp_regno = -1;
7260 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7261 for (i = 0; i < 32; i++)
7262 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7263 vms_save_fp_regno = i;
7265 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7266 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7267 else if (alpha_procedure_type == PT_NULL)
7268 vms_base_regno = REG_PV;
7270 /* Stack unwinding should be done via FP unless we use it for PV. */
7271 vms_unwind_regno = (vms_base_regno == REG_PV
7272 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7274 /* If this is a stack procedure, allow space for saving FP and RA. */
7275 if (alpha_procedure_type == PT_STACK)
7280 /* Our size must be even (multiple of 16 bytes). */
7288 /* Define the offset between two registers, one to be eliminated,
7289 and the other its replacement, at the start of a routine. */
7292 alpha_initial_elimination_offset (unsigned int from,
7293 unsigned int to ATTRIBUTE_UNUSED)
7297 ret = alpha_sa_size ();
7298 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7302 case FRAME_POINTER_REGNUM:
7305 case ARG_POINTER_REGNUM:
7306 ret += (ALPHA_ROUND (get_frame_size ()
7307 + crtl->args.pretend_args_size)
7308 - crtl->args.pretend_args_size);
7319 alpha_pv_save_size (void)
7322 return alpha_procedure_type == PT_STACK ? 8 : 0;
7326 alpha_using_fp (void)
7329 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7332 #if TARGET_ABI_OPEN_VMS
7334 const struct attribute_spec vms_attribute_table[] =
7336 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7337 { "overlaid", 0, 0, true, false, false, NULL },
7338 { "global", 0, 0, true, false, false, NULL },
7339 { "initialize", 0, 0, true, false, false, NULL },
7340 { NULL, 0, 0, false, false, false, NULL }
7346 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7348 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7352 alpha_find_lo_sum_using_gp (rtx insn)
7354 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7358 alpha_does_function_need_gp (void)
7362 /* The GP being variable is an OSF abi thing. */
7363 if (! TARGET_ABI_OSF)
7366 /* We need the gp to load the address of __mcount. */
7367 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7370 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7374 /* The nonlocal receiver pattern assumes that the gp is valid for
7375 the nested function. Reasonable because it's almost always set
7376 correctly already. For the cases where that's wrong, make sure
7377 the nested function loads its gp on entry. */
7378 if (crtl->has_nonlocal_goto)
7381 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7382 Even if we are a static function, we still need to do this in case
7383 our address is taken and passed to something like qsort. */
7385 push_topmost_sequence ();
7386 insn = get_insns ();
7387 pop_topmost_sequence ();
7389 for (; insn; insn = NEXT_INSN (insn))
7391 && ! JUMP_TABLE_DATA_P (insn)
7392 && GET_CODE (PATTERN (insn)) != USE
7393 && GET_CODE (PATTERN (insn)) != CLOBBER
7394 && get_attr_usegp (insn))
7401 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7405 set_frame_related_p (void)
7407 rtx seq = get_insns ();
7418 while (insn != NULL_RTX)
7420 RTX_FRAME_RELATED_P (insn) = 1;
7421 insn = NEXT_INSN (insn);
7423 seq = emit_insn (seq);
7427 seq = emit_insn (seq);
7428 RTX_FRAME_RELATED_P (seq) = 1;
7433 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7435 /* Generates a store with the proper unwind info attached. VALUE is
7436 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7437 contains SP+FRAME_BIAS, and that is the unwind info that should be
7438 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7439 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7442 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7443 HOST_WIDE_INT base_ofs, rtx frame_reg)
7445 rtx addr, mem, insn;
7447 addr = plus_constant (base_reg, base_ofs);
7448 mem = gen_rtx_MEM (DImode, addr);
7449 set_mem_alias_set (mem, alpha_sr_alias_set);
7451 insn = emit_move_insn (mem, value);
7452 RTX_FRAME_RELATED_P (insn) = 1;
7454 if (frame_bias || value != frame_reg)
7458 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7459 mem = gen_rtx_MEM (DImode, addr);
7462 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7463 gen_rtx_SET (VOIDmode, mem, frame_reg));
7468 emit_frame_store (unsigned int regno, rtx base_reg,
7469 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7471 rtx reg = gen_rtx_REG (DImode, regno);
7472 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7475 /* Write function prologue. */
7477 /* On vms we have two kinds of functions:
7479 - stack frame (PROC_STACK)
7480 these are 'normal' functions with local vars and which are
7481 calling other functions
7482 - register frame (PROC_REGISTER)
7483 keeps all data in registers, needs no stack
7485 We must pass this to the assembler so it can generate the
7486 proper pdsc (procedure descriptor)
7487 This is done with the '.pdesc' command.
7489 On not-vms, we don't really differentiate between the two, as we can
7490 simply allocate stack without saving registers. */
7493 alpha_expand_prologue (void)
7495 /* Registers to save. */
7496 unsigned long imask = 0;
7497 unsigned long fmask = 0;
7498 /* Stack space needed for pushing registers clobbered by us. */
7499 HOST_WIDE_INT sa_size;
7500 /* Complete stack size needed. */
7501 HOST_WIDE_INT frame_size;
7502 /* Offset from base reg to register save area. */
7503 HOST_WIDE_INT reg_offset;
7507 sa_size = alpha_sa_size ();
7509 frame_size = get_frame_size ();
7510 if (TARGET_ABI_OPEN_VMS)
7511 frame_size = ALPHA_ROUND (sa_size
7512 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7514 + crtl->args.pretend_args_size);
7515 else if (TARGET_ABI_UNICOSMK)
7516 /* We have to allocate space for the DSIB if we generate a frame. */
7517 frame_size = ALPHA_ROUND (sa_size
7518 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7519 + ALPHA_ROUND (frame_size
7520 + crtl->outgoing_args_size);
7522 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
7524 + ALPHA_ROUND (frame_size
7525 + crtl->args.pretend_args_size));
7527 if (TARGET_ABI_OPEN_VMS)
7530 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7532 alpha_sa_mask (&imask, &fmask);
7534 /* Emit an insn to reload GP, if needed. */
7537 alpha_function_needs_gp = alpha_does_function_need_gp ();
7538 if (alpha_function_needs_gp)
7539 emit_insn (gen_prologue_ldgp ());
7542 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7543 the call to mcount ourselves, rather than having the linker do it
7544 magically in response to -pg. Since _mcount has special linkage,
7545 don't represent the call as a call. */
7546 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7547 emit_insn (gen_prologue_mcount ());
7549 if (TARGET_ABI_UNICOSMK)
7550 unicosmk_gen_dsib (&imask);
7552 /* Adjust the stack by the frame size. If the frame size is > 4096
7553 bytes, we need to be sure we probe somewhere in the first and last
7554 4096 bytes (we can probably get away without the latter test) and
7555 every 8192 bytes in between. If the frame size is > 32768, we
7556 do this in a loop. Otherwise, we generate the explicit probe
7559 Note that we are only allowed to adjust sp once in the prologue. */
7561 if (frame_size <= 32768)
7563 if (frame_size > 4096)
7567 for (probed = 4096; probed < frame_size; probed += 8192)
7568 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7572 /* We only have to do this probe if we aren't saving registers. */
7573 if (sa_size == 0 && frame_size > probed - 4096)
7574 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7577 if (frame_size != 0)
7578 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7579 GEN_INT (TARGET_ABI_UNICOSMK
7585 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7586 number of 8192 byte blocks to probe. We then probe each block
7587 in the loop and then set SP to the proper location. If the
7588 amount remaining is > 4096, we have to do one more probe if we
7589 are not saving any registers. */
7591 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7592 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7593 rtx ptr = gen_rtx_REG (DImode, 22);
7594 rtx count = gen_rtx_REG (DImode, 23);
7597 emit_move_insn (count, GEN_INT (blocks));
7598 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7599 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7601 /* Because of the difficulty in emitting a new basic block this
7602 late in the compilation, generate the loop as a single insn. */
7603 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7605 if (leftover > 4096 && sa_size == 0)
7607 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7608 MEM_VOLATILE_P (last) = 1;
7609 emit_move_insn (last, const0_rtx);
7612 if (TARGET_ABI_WINDOWS_NT)
7614 /* For NT stack unwind (done by 'reverse execution'), it's
7615 not OK to take the result of a loop, even though the value
7616 is already in ptr, so we reload it via a single operation
7617 and subtract it to sp.
7619 Yes, that's correct -- we have to reload the whole constant
7620 into a temporary via ldah+lda then subtract from sp. */
7622 HOST_WIDE_INT lo, hi;
7623 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7624 hi = frame_size - lo;
7626 emit_move_insn (ptr, GEN_INT (hi));
7627 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7628 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7633 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7634 GEN_INT (-leftover)));
7637 /* This alternative is special, because the DWARF code cannot
7638 possibly intuit through the loop above. So we invent this
7639 note it looks at instead. */
7640 RTX_FRAME_RELATED_P (seq) = 1;
7641 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7642 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7643 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7644 GEN_INT (TARGET_ABI_UNICOSMK
7649 if (!TARGET_ABI_UNICOSMK)
7651 HOST_WIDE_INT sa_bias = 0;
7653 /* Cope with very large offsets to the register save area. */
7654 sa_reg = stack_pointer_rtx;
7655 if (reg_offset + sa_size > 0x8000)
7657 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7660 if (low + sa_size <= 0x8000)
7661 sa_bias = reg_offset - low, reg_offset = low;
7663 sa_bias = reg_offset, reg_offset = 0;
7665 sa_reg = gen_rtx_REG (DImode, 24);
7666 sa_bias_rtx = GEN_INT (sa_bias);
7668 if (add_operand (sa_bias_rtx, DImode))
7669 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7672 emit_move_insn (sa_reg, sa_bias_rtx);
7673 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7677 /* Save regs in stack order. Beginning with VMS PV. */
7678 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7679 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7681 /* Save register RA next. */
7682 if (imask & (1UL << REG_RA))
7684 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7685 imask &= ~(1UL << REG_RA);
7689 /* Now save any other registers required to be saved. */
7690 for (i = 0; i < 31; i++)
7691 if (imask & (1UL << i))
7693 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7697 for (i = 0; i < 31; i++)
7698 if (fmask & (1UL << i))
7700 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7704 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7706 /* The standard frame on the T3E includes space for saving registers.
7707 We just have to use it. We don't have to save the return address and
7708 the old frame pointer here - they are saved in the DSIB. */
7711 for (i = 9; i < 15; i++)
7712 if (imask & (1UL << i))
7714 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7717 for (i = 2; i < 10; i++)
7718 if (fmask & (1UL << i))
7720 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7725 if (TARGET_ABI_OPEN_VMS)
7727 if (alpha_procedure_type == PT_REGISTER)
7728 /* Register frame procedures save the fp.
7729 ?? Ought to have a dwarf2 save for this. */
7730 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7731 hard_frame_pointer_rtx);
7733 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7734 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7735 gen_rtx_REG (DImode, REG_PV)));
7737 if (alpha_procedure_type != PT_NULL
7738 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7739 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7741 /* If we have to allocate space for outgoing args, do it now. */
7742 if (crtl->outgoing_args_size != 0)
7745 = emit_move_insn (stack_pointer_rtx,
7747 (hard_frame_pointer_rtx,
7749 (crtl->outgoing_args_size))));
7751 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7752 if ! frame_pointer_needed. Setting the bit will change the CFA
7753 computation rule to use sp again, which would be wrong if we had
7754 frame_pointer_needed, as this means sp might move unpredictably
7758 frame_pointer_needed
7759 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7761 crtl->outgoing_args_size != 0
7762 => alpha_procedure_type != PT_NULL,
7764 so when we are not setting the bit here, we are guaranteed to
7765 have emitted an FRP frame pointer update just before. */
7766 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7769 else if (!TARGET_ABI_UNICOSMK)
7771 /* If we need a frame pointer, set it from the stack pointer. */
7772 if (frame_pointer_needed)
7774 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7775 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7777 /* This must always be the last instruction in the
7778 prologue, thus we emit a special move + clobber. */
7779 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7780 stack_pointer_rtx, sa_reg)));
7784 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7785 the prologue, for exception handling reasons, we cannot do this for
7786 any insn that might fault. We could prevent this for mems with a
7787 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7788 have to prevent all such scheduling with a blockage.
7790 Linux, on the other hand, never bothered to implement OSF/1's
7791 exception handling, and so doesn't care about such things. Anyone
7792 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7794 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7795 emit_insn (gen_blockage ());
7798 /* Count the number of .file directives, so that .loc is up to date. */
7799 int num_source_filenames = 0;
7801 /* Output the textual info surrounding the prologue. */
7804 alpha_start_function (FILE *file, const char *fnname,
7805 tree decl ATTRIBUTE_UNUSED)
7807 unsigned long imask = 0;
7808 unsigned long fmask = 0;
7809 /* Stack space needed for pushing registers clobbered by us. */
7810 HOST_WIDE_INT sa_size;
7811 /* Complete stack size needed. */
7812 unsigned HOST_WIDE_INT frame_size;
7813 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7814 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7817 /* Offset from base reg to register save area. */
7818 HOST_WIDE_INT reg_offset;
7819 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7822 /* Don't emit an extern directive for functions defined in the same file. */
7823 if (TARGET_ABI_UNICOSMK)
7826 name_tree = get_identifier (fnname);
7827 TREE_ASM_WRITTEN (name_tree) = 1;
7830 alpha_fnname = fnname;
7831 sa_size = alpha_sa_size ();
7833 frame_size = get_frame_size ();
7834 if (TARGET_ABI_OPEN_VMS)
7835 frame_size = ALPHA_ROUND (sa_size
7836 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7838 + crtl->args.pretend_args_size);
7839 else if (TARGET_ABI_UNICOSMK)
7840 frame_size = ALPHA_ROUND (sa_size
7841 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7842 + ALPHA_ROUND (frame_size
7843 + crtl->outgoing_args_size);
7845 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
7847 + ALPHA_ROUND (frame_size
7848 + crtl->args.pretend_args_size));
7850 if (TARGET_ABI_OPEN_VMS)
7853 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7855 alpha_sa_mask (&imask, &fmask);
7857 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7858 We have to do that before the .ent directive as we cannot switch
7859 files within procedures with native ecoff because line numbers are
7860 linked to procedure descriptors.
7861 Outputting the lineno helps debugging of one line functions as they
7862 would otherwise get no line number at all. Please note that we would
7863 like to put out last_linenum from final.c, but it is not accessible. */
7865 if (write_symbols == SDB_DEBUG)
7867 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7868 ASM_OUTPUT_SOURCE_FILENAME (file,
7869 DECL_SOURCE_FILE (current_function_decl));
7871 #ifdef SDB_OUTPUT_SOURCE_LINE
7872 if (debug_info_level != DINFO_LEVEL_TERSE)
7873 SDB_OUTPUT_SOURCE_LINE (file,
7874 DECL_SOURCE_LINE (current_function_decl));
7878 /* Issue function start and label. */
7879 if (TARGET_ABI_OPEN_VMS
7880 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7882 fputs ("\t.ent ", file);
7883 assemble_name (file, fnname);
7886 /* If the function needs GP, we'll write the "..ng" label there.
7887 Otherwise, do it here. */
7889 && ! alpha_function_needs_gp
7890 && ! cfun->is_thunk)
7893 assemble_name (file, fnname);
7894 fputs ("..ng:\n", file);
7898 strcpy (entry_label, fnname);
7899 if (TARGET_ABI_OPEN_VMS)
7900 strcat (entry_label, "..en");
7902 /* For public functions, the label must be globalized by appending an
7903 additional colon. */
7904 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7905 strcat (entry_label, ":");
7907 ASM_OUTPUT_LABEL (file, entry_label);
7908 inside_function = TRUE;
7910 if (TARGET_ABI_OPEN_VMS)
7911 fprintf (file, "\t.base $%d\n", vms_base_regno);
7913 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7914 && !flag_inhibit_size_directive)
7916 /* Set flags in procedure descriptor to request IEEE-conformant
7917 math-library routines. The value we set it to is PDSC_EXC_IEEE
7918 (/usr/include/pdsc.h). */
7919 fputs ("\t.eflag 48\n", file);
7922 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7923 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
7924 alpha_arg_offset = -frame_size + 48;
7926 /* Describe our frame. If the frame size is larger than an integer,
7927 print it as zero to avoid an assembler error. We won't be
7928 properly describing such a frame, but that's the best we can do. */
7929 if (TARGET_ABI_UNICOSMK)
7931 else if (TARGET_ABI_OPEN_VMS)
7932 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7933 HOST_WIDE_INT_PRINT_DEC "\n",
7935 frame_size >= (1UL << 31) ? 0 : frame_size,
7937 else if (!flag_inhibit_size_directive)
7938 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7939 (frame_pointer_needed
7940 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7941 frame_size >= max_frame_size ? 0 : frame_size,
7942 crtl->args.pretend_args_size);
7944 /* Describe which registers were spilled. */
7945 if (TARGET_ABI_UNICOSMK)
7947 else if (TARGET_ABI_OPEN_VMS)
7950 /* ??? Does VMS care if mask contains ra? The old code didn't
7951 set it, so I don't here. */
7952 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7954 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7955 if (alpha_procedure_type == PT_REGISTER)
7956 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7958 else if (!flag_inhibit_size_directive)
7962 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7963 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7965 for (i = 0; i < 32; ++i)
7966 if (imask & (1UL << i))
7971 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7972 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7975 #if TARGET_ABI_OPEN_VMS
7976 /* Ifdef'ed cause link_section are only available then. */
7977 switch_to_section (readonly_data_section);
7978 fprintf (file, "\t.align 3\n");
7979 assemble_name (file, fnname); fputs ("..na:\n", file);
7980 fputs ("\t.ascii \"", file);
7981 assemble_name (file, fnname);
7982 fputs ("\\0\"\n", file);
7983 alpha_need_linkage (fnname, 1);
7984 switch_to_section (text_section);
7988 /* Emit the .prologue note at the scheduled end of the prologue. */
7991 alpha_output_function_end_prologue (FILE *file)
7993 if (TARGET_ABI_UNICOSMK)
7995 else if (TARGET_ABI_OPEN_VMS)
7996 fputs ("\t.prologue\n", file);
7997 else if (TARGET_ABI_WINDOWS_NT)
7998 fputs ("\t.prologue 0\n", file);
7999 else if (!flag_inhibit_size_directive)
8000 fprintf (file, "\t.prologue %d\n",
8001 alpha_function_needs_gp || cfun->is_thunk);
8004 /* Write function epilogue. */
8006 /* ??? At some point we will want to support full unwind, and so will
8007 need to mark the epilogue as well. At the moment, we just confuse
8010 #define FRP(exp) exp
8013 alpha_expand_epilogue (void)
8015 /* Registers to save. */
8016 unsigned long imask = 0;
8017 unsigned long fmask = 0;
8018 /* Stack space needed for pushing registers clobbered by us. */
8019 HOST_WIDE_INT sa_size;
8020 /* Complete stack size needed. */
8021 HOST_WIDE_INT frame_size;
8022 /* Offset from base reg to register save area. */
8023 HOST_WIDE_INT reg_offset;
8024 int fp_is_frame_pointer, fp_offset;
8025 rtx sa_reg, sa_reg_exp = NULL;
8026 rtx sp_adj1, sp_adj2, mem;
8030 sa_size = alpha_sa_size ();
8032 frame_size = get_frame_size ();
8033 if (TARGET_ABI_OPEN_VMS)
8034 frame_size = ALPHA_ROUND (sa_size
8035 + (alpha_procedure_type == PT_STACK ? 8 : 0)
8037 + crtl->args.pretend_args_size);
8038 else if (TARGET_ABI_UNICOSMK)
8039 frame_size = ALPHA_ROUND (sa_size
8040 + (alpha_procedure_type == PT_STACK ? 48 : 0))
8041 + ALPHA_ROUND (frame_size
8042 + crtl->outgoing_args_size);
8044 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
8046 + ALPHA_ROUND (frame_size
8047 + crtl->args.pretend_args_size));
8049 if (TARGET_ABI_OPEN_VMS)
8051 if (alpha_procedure_type == PT_STACK)
8057 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8059 alpha_sa_mask (&imask, &fmask);
8062 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8063 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8065 sa_reg = stack_pointer_rtx;
8067 if (crtl->calls_eh_return)
8068 eh_ofs = EH_RETURN_STACKADJ_RTX;
8072 if (!TARGET_ABI_UNICOSMK && sa_size)
8074 /* If we have a frame pointer, restore SP from it. */
8075 if ((TARGET_ABI_OPEN_VMS
8076 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8077 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8078 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8080 /* Cope with very large offsets to the register save area. */
8081 if (reg_offset + sa_size > 0x8000)
8083 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8086 if (low + sa_size <= 0x8000)
8087 bias = reg_offset - low, reg_offset = low;
8089 bias = reg_offset, reg_offset = 0;
8091 sa_reg = gen_rtx_REG (DImode, 22);
8092 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8094 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8097 /* Restore registers in order, excepting a true frame pointer. */
8099 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8101 set_mem_alias_set (mem, alpha_sr_alias_set);
8102 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8105 imask &= ~(1UL << REG_RA);
8107 for (i = 0; i < 31; ++i)
8108 if (imask & (1UL << i))
8110 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8111 fp_offset = reg_offset;
8114 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8115 set_mem_alias_set (mem, alpha_sr_alias_set);
8116 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8121 for (i = 0; i < 31; ++i)
8122 if (fmask & (1UL << i))
8124 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8125 set_mem_alias_set (mem, alpha_sr_alias_set);
8126 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8130 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8132 /* Restore callee-saved general-purpose registers. */
8136 for (i = 9; i < 15; i++)
8137 if (imask & (1UL << i))
8139 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8141 set_mem_alias_set (mem, alpha_sr_alias_set);
8142 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8146 for (i = 2; i < 10; i++)
8147 if (fmask & (1UL << i))
8149 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8151 set_mem_alias_set (mem, alpha_sr_alias_set);
8152 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8156 /* Restore the return address from the DSIB. */
8158 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8159 set_mem_alias_set (mem, alpha_sr_alias_set);
8160 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8163 if (frame_size || eh_ofs)
8165 sp_adj1 = stack_pointer_rtx;
8169 sp_adj1 = gen_rtx_REG (DImode, 23);
8170 emit_move_insn (sp_adj1,
8171 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8174 /* If the stack size is large, begin computation into a temporary
8175 register so as not to interfere with a potential fp restore,
8176 which must be consecutive with an SP restore. */
8177 if (frame_size < 32768
8178 && ! (TARGET_ABI_UNICOSMK && cfun->calls_alloca))
8179 sp_adj2 = GEN_INT (frame_size);
8180 else if (TARGET_ABI_UNICOSMK)
8182 sp_adj1 = gen_rtx_REG (DImode, 23);
8183 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8184 sp_adj2 = const0_rtx;
8186 else if (frame_size < 0x40007fffL)
8188 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8190 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8191 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8195 sp_adj1 = gen_rtx_REG (DImode, 23);
8196 FRP (emit_move_insn (sp_adj1, sp_adj2));
8198 sp_adj2 = GEN_INT (low);
8202 rtx tmp = gen_rtx_REG (DImode, 23);
8203 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8207 /* We can't drop new things to memory this late, afaik,
8208 so build it up by pieces. */
8209 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8210 -(frame_size < 0)));
8211 gcc_assert (sp_adj2);
8215 /* From now on, things must be in order. So emit blockages. */
8217 /* Restore the frame pointer. */
8218 if (TARGET_ABI_UNICOSMK)
8220 emit_insn (gen_blockage ());
8221 mem = gen_rtx_MEM (DImode,
8222 plus_constant (hard_frame_pointer_rtx, -16));
8223 set_mem_alias_set (mem, alpha_sr_alias_set);
8224 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8226 else if (fp_is_frame_pointer)
8228 emit_insn (gen_blockage ());
8229 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8230 set_mem_alias_set (mem, alpha_sr_alias_set);
8231 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8233 else if (TARGET_ABI_OPEN_VMS)
8235 emit_insn (gen_blockage ());
8236 FRP (emit_move_insn (hard_frame_pointer_rtx,
8237 gen_rtx_REG (DImode, vms_save_fp_regno)));
8240 /* Restore the stack pointer. */
8241 emit_insn (gen_blockage ());
8242 if (sp_adj2 == const0_rtx)
8243 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8245 FRP (emit_move_insn (stack_pointer_rtx,
8246 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8250 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8252 emit_insn (gen_blockage ());
8253 FRP (emit_move_insn (hard_frame_pointer_rtx,
8254 gen_rtx_REG (DImode, vms_save_fp_regno)));
8256 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8258 /* Decrement the frame pointer if the function does not have a
8261 emit_insn (gen_blockage ());
8262 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8263 hard_frame_pointer_rtx, constm1_rtx)));
8268 /* Output the rest of the textual info surrounding the epilogue. */
8271 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8275 /* We output a nop after noreturn calls at the very end of the function to
8276 ensure that the return address always remains in the caller's code range,
8277 as not doing so might confuse unwinding engines. */
8278 insn = get_last_insn ();
8280 insn = prev_active_insn (insn);
8282 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8286 free_after_compilation (cfun);
8289 #if TARGET_ABI_OPEN_VMS
8290 alpha_write_linkage (file, fnname, decl);
8293 /* End the function. */
8294 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8296 fputs ("\t.end ", file);
8297 assemble_name (file, fnname);
8300 inside_function = FALSE;
8302 /* Output jump tables and the static subroutine information block. */
8303 if (TARGET_ABI_UNICOSMK)
8305 unicosmk_output_ssib (file, fnname);
8306 unicosmk_output_deferred_case_vectors (file);
8311 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8313 In order to avoid the hordes of differences between generated code
8314 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8315 lots of code loading up large constants, generate rtl and emit it
8316 instead of going straight to text.
8318 Not sure why this idea hasn't been explored before... */
8321 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8322 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8325 HOST_WIDE_INT hi, lo;
8326 rtx this_rtx, insn, funexp;
8328 gcc_assert (cfun->is_thunk);
8330 /* We always require a valid GP. */
8331 emit_insn (gen_prologue_ldgp ());
8332 emit_note (NOTE_INSN_PROLOGUE_END);
8334 /* Find the "this" pointer. If the function returns a structure,
8335 the structure return pointer is in $16. */
8336 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8337 this_rtx = gen_rtx_REG (Pmode, 17);
8339 this_rtx = gen_rtx_REG (Pmode, 16);
8341 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8342 entire constant for the add. */
8343 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8344 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8345 if (hi + lo == delta)
8348 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8350 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8354 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8355 delta, -(delta < 0));
8356 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8359 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8364 tmp = gen_rtx_REG (Pmode, 0);
8365 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8367 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8368 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8369 if (hi + lo == vcall_offset)
8372 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8376 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8377 vcall_offset, -(vcall_offset < 0));
8378 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8382 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8385 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8387 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8390 /* Generate a tail call to the target function. */
8391 if (! TREE_USED (function))
8393 assemble_external (function);
8394 TREE_USED (function) = 1;
8396 funexp = XEXP (DECL_RTL (function), 0);
8397 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8398 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8399 SIBLING_CALL_P (insn) = 1;
8401 /* Run just enough of rest_of_compilation to get the insns emitted.
8402 There's not really enough bulk here to make other passes such as
8403 instruction scheduling worth while. Note that use_thunk calls
8404 assemble_start_function and assemble_end_function. */
8405 insn = get_insns ();
8406 insn_locators_alloc ();
8407 shorten_branches (insn);
8408 final_start_function (insn, file, 1);
8409 final (insn, file, 1);
8410 final_end_function ();
8412 #endif /* TARGET_ABI_OSF */
8414 /* Debugging support. */
8418 /* Count the number of sdb related labels are generated (to find block
8419 start and end boundaries). */
8421 int sdb_label_count = 0;
8423 /* Name of the file containing the current function. */
8425 static const char *current_function_file = "";
8427 /* Offsets to alpha virtual arg/local debugging pointers. */
8429 long alpha_arg_offset;
8430 long alpha_auto_offset;
8432 /* Emit a new filename to a stream. */
8435 alpha_output_filename (FILE *stream, const char *name)
8437 static int first_time = TRUE;
8442 ++num_source_filenames;
8443 current_function_file = name;
8444 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8445 output_quoted_string (stream, name);
8446 fprintf (stream, "\n");
8447 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8448 fprintf (stream, "\t#@stabs\n");
8451 else if (write_symbols == DBX_DEBUG)
8452 /* dbxout.c will emit an appropriate .stabs directive. */
8455 else if (name != current_function_file
8456 && strcmp (name, current_function_file) != 0)
8458 if (inside_function && ! TARGET_GAS)
8459 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8462 ++num_source_filenames;
8463 current_function_file = name;
8464 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8467 output_quoted_string (stream, name);
8468 fprintf (stream, "\n");
8472 /* Structure to show the current status of registers and memory. */
8474 struct shadow_summary
8477 unsigned int i : 31; /* Mask of int regs */
8478 unsigned int fp : 31; /* Mask of fp regs */
8479 unsigned int mem : 1; /* mem == imem | fpmem */
8483 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8484 to the summary structure. SET is nonzero if the insn is setting the
8485 object, otherwise zero. */
8488 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8490 const char *format_ptr;
8496 switch (GET_CODE (x))
8498 /* ??? Note that this case would be incorrect if the Alpha had a
8499 ZERO_EXTRACT in SET_DEST. */
8501 summarize_insn (SET_SRC (x), sum, 0);
8502 summarize_insn (SET_DEST (x), sum, 1);
8506 summarize_insn (XEXP (x, 0), sum, 1);
8510 summarize_insn (XEXP (x, 0), sum, 0);
8514 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8515 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8519 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8520 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8524 summarize_insn (SUBREG_REG (x), sum, 0);
8529 int regno = REGNO (x);
8530 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8532 if (regno == 31 || regno == 63)
8538 sum->defd.i |= mask;
8540 sum->defd.fp |= mask;
8545 sum->used.i |= mask;
8547 sum->used.fp |= mask;
8558 /* Find the regs used in memory address computation: */
8559 summarize_insn (XEXP (x, 0), sum, 0);
8562 case CONST_INT: case CONST_DOUBLE:
8563 case SYMBOL_REF: case LABEL_REF: case CONST:
8564 case SCRATCH: case ASM_INPUT:
8567 /* Handle common unary and binary ops for efficiency. */
8568 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8569 case MOD: case UDIV: case UMOD: case AND: case IOR:
8570 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8571 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8572 case NE: case EQ: case GE: case GT: case LE:
8573 case LT: case GEU: case GTU: case LEU: case LTU:
8574 summarize_insn (XEXP (x, 0), sum, 0);
8575 summarize_insn (XEXP (x, 1), sum, 0);
8578 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8579 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8580 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8581 case SQRT: case FFS:
8582 summarize_insn (XEXP (x, 0), sum, 0);
8586 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8587 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8588 switch (format_ptr[i])
8591 summarize_insn (XEXP (x, i), sum, 0);
8595 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8596 summarize_insn (XVECEXP (x, i, j), sum, 0);
8608 /* Ensure a sufficient number of `trapb' insns are in the code when
8609 the user requests code with a trap precision of functions or
8612 In naive mode, when the user requests a trap-precision of
8613 "instruction", a trapb is needed after every instruction that may
8614 generate a trap. This ensures that the code is resumption safe but
8617 When optimizations are turned on, we delay issuing a trapb as long
8618 as possible. In this context, a trap shadow is the sequence of
8619 instructions that starts with a (potentially) trap generating
8620 instruction and extends to the next trapb or call_pal instruction
8621 (but GCC never generates call_pal by itself). We can delay (and
8622 therefore sometimes omit) a trapb subject to the following
8625 (a) On entry to the trap shadow, if any Alpha register or memory
8626 location contains a value that is used as an operand value by some
8627 instruction in the trap shadow (live on entry), then no instruction
8628 in the trap shadow may modify the register or memory location.
8630 (b) Within the trap shadow, the computation of the base register
8631 for a memory load or store instruction may not involve using the
8632 result of an instruction that might generate an UNPREDICTABLE
8635 (c) Within the trap shadow, no register may be used more than once
8636 as a destination register. (This is to make life easier for the
8639 (d) The trap shadow may not include any branch instructions. */
8642 alpha_handle_trap_shadows (void)
8644 struct shadow_summary shadow;
8645 int trap_pending, exception_nesting;
8649 exception_nesting = 0;
8652 shadow.used.mem = 0;
8653 shadow.defd = shadow.used;
8655 for (i = get_insns (); i ; i = NEXT_INSN (i))
8659 switch (NOTE_KIND (i))
8661 case NOTE_INSN_EH_REGION_BEG:
8662 exception_nesting++;
8667 case NOTE_INSN_EH_REGION_END:
8668 exception_nesting--;
8673 case NOTE_INSN_EPILOGUE_BEG:
8674 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8679 else if (trap_pending)
8681 if (alpha_tp == ALPHA_TP_FUNC)
8684 && GET_CODE (PATTERN (i)) == RETURN)
8687 else if (alpha_tp == ALPHA_TP_INSN)
8691 struct shadow_summary sum;
8696 sum.defd = sum.used;
8698 switch (GET_CODE (i))
8701 /* Annoyingly, get_attr_trap will die on these. */
8702 if (GET_CODE (PATTERN (i)) == USE
8703 || GET_CODE (PATTERN (i)) == CLOBBER)
8706 summarize_insn (PATTERN (i), &sum, 0);
8708 if ((sum.defd.i & shadow.defd.i)
8709 || (sum.defd.fp & shadow.defd.fp))
8711 /* (c) would be violated */
8715 /* Combine shadow with summary of current insn: */
8716 shadow.used.i |= sum.used.i;
8717 shadow.used.fp |= sum.used.fp;
8718 shadow.used.mem |= sum.used.mem;
8719 shadow.defd.i |= sum.defd.i;
8720 shadow.defd.fp |= sum.defd.fp;
8721 shadow.defd.mem |= sum.defd.mem;
8723 if ((sum.defd.i & shadow.used.i)
8724 || (sum.defd.fp & shadow.used.fp)
8725 || (sum.defd.mem & shadow.used.mem))
8727 /* (a) would be violated (also takes care of (b)) */
8728 gcc_assert (get_attr_trap (i) != TRAP_YES
8729 || (!(sum.defd.i & sum.used.i)
8730 && !(sum.defd.fp & sum.used.fp)));
8748 n = emit_insn_before (gen_trapb (), i);
8749 PUT_MODE (n, TImode);
8750 PUT_MODE (i, TImode);
8754 shadow.used.mem = 0;
8755 shadow.defd = shadow.used;
8760 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8761 && NONJUMP_INSN_P (i)
8762 && GET_CODE (PATTERN (i)) != USE
8763 && GET_CODE (PATTERN (i)) != CLOBBER
8764 && get_attr_trap (i) == TRAP_YES)
8766 if (optimize && !trap_pending)
8767 summarize_insn (PATTERN (i), &shadow, 0);
8773 /* Alpha can only issue instruction groups simultaneously if they are
8774 suitably aligned. This is very processor-specific. */
8775 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8776 that are marked "fake". These instructions do not exist on that target,
8777 but it is possible to see these insns with deranged combinations of
8778 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8779 choose a result at random. */
8781 enum alphaev4_pipe {
8788 enum alphaev5_pipe {
8799 static enum alphaev4_pipe
8800 alphaev4_insn_pipe (rtx insn)
8802 if (recog_memoized (insn) < 0)
8804 if (get_attr_length (insn) != 4)
8807 switch (get_attr_type (insn))
8823 case TYPE_MVI: /* fake */
8838 case TYPE_FSQRT: /* fake */
8839 case TYPE_FTOI: /* fake */
8840 case TYPE_ITOF: /* fake */
8848 static enum alphaev5_pipe
8849 alphaev5_insn_pipe (rtx insn)
8851 if (recog_memoized (insn) < 0)
8853 if (get_attr_length (insn) != 4)
8856 switch (get_attr_type (insn))
8876 case TYPE_FTOI: /* fake */
8877 case TYPE_ITOF: /* fake */
8892 case TYPE_FSQRT: /* fake */
8903 /* IN_USE is a mask of the slots currently filled within the insn group.
8904 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8905 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8907 LEN is, of course, the length of the group in bytes. */
8910 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8917 || GET_CODE (PATTERN (insn)) == CLOBBER
8918 || GET_CODE (PATTERN (insn)) == USE)
8923 enum alphaev4_pipe pipe;
8925 pipe = alphaev4_insn_pipe (insn);
8929 /* Force complex instructions to start new groups. */
8933 /* If this is a completely unrecognized insn, it's an asm.
8934 We don't know how long it is, so record length as -1 to
8935 signal a needed realignment. */
8936 if (recog_memoized (insn) < 0)
8939 len = get_attr_length (insn);
8943 if (in_use & EV4_IB0)
8945 if (in_use & EV4_IB1)
8950 in_use |= EV4_IB0 | EV4_IBX;
8954 if (in_use & EV4_IB0)
8956 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8964 if (in_use & EV4_IB1)
8974 /* Haifa doesn't do well scheduling branches. */
8979 insn = next_nonnote_insn (insn);
8981 if (!insn || ! INSN_P (insn))
8984 /* Let Haifa tell us where it thinks insn group boundaries are. */
8985 if (GET_MODE (insn) == TImode)
8988 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8993 insn = next_nonnote_insn (insn);
9001 /* IN_USE is a mask of the slots currently filled within the insn group.
9002 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9003 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9005 LEN is, of course, the length of the group in bytes. */
9008 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
9015 || GET_CODE (PATTERN (insn)) == CLOBBER
9016 || GET_CODE (PATTERN (insn)) == USE)
9021 enum alphaev5_pipe pipe;
9023 pipe = alphaev5_insn_pipe (insn);
9027 /* Force complex instructions to start new groups. */
9031 /* If this is a completely unrecognized insn, it's an asm.
9032 We don't know how long it is, so record length as -1 to
9033 signal a needed realignment. */
9034 if (recog_memoized (insn) < 0)
9037 len = get_attr_length (insn);
9040 /* ??? Most of the places below, we would like to assert never
9041 happen, as it would indicate an error either in Haifa, or
9042 in the scheduling description. Unfortunately, Haifa never
9043 schedules the last instruction of the BB, so we don't have
9044 an accurate TI bit to go off. */
9046 if (in_use & EV5_E0)
9048 if (in_use & EV5_E1)
9053 in_use |= EV5_E0 | EV5_E01;
9057 if (in_use & EV5_E0)
9059 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9067 if (in_use & EV5_E1)
9073 if (in_use & EV5_FA)
9075 if (in_use & EV5_FM)
9080 in_use |= EV5_FA | EV5_FAM;
9084 if (in_use & EV5_FA)
9090 if (in_use & EV5_FM)
9103 /* Haifa doesn't do well scheduling branches. */
9104 /* ??? If this is predicted not-taken, slotting continues, except
9105 that no more IBR, FBR, or JSR insns may be slotted. */
9110 insn = next_nonnote_insn (insn);
9112 if (!insn || ! INSN_P (insn))
9115 /* Let Haifa tell us where it thinks insn group boundaries are. */
9116 if (GET_MODE (insn) == TImode)
9119 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9124 insn = next_nonnote_insn (insn);
9133 alphaev4_next_nop (int *pin_use)
9135 int in_use = *pin_use;
9138 if (!(in_use & EV4_IB0))
9143 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9148 else if (TARGET_FP && !(in_use & EV4_IB1))
9161 alphaev5_next_nop (int *pin_use)
9163 int in_use = *pin_use;
9166 if (!(in_use & EV5_E1))
9171 else if (TARGET_FP && !(in_use & EV5_FA))
9176 else if (TARGET_FP && !(in_use & EV5_FM))
9188 /* The instruction group alignment main loop. */
9191 alpha_align_insns (unsigned int max_align,
9192 rtx (*next_group) (rtx, int *, int *),
9193 rtx (*next_nop) (int *))
9195 /* ALIGN is the known alignment for the insn group. */
9197 /* OFS is the offset of the current insn in the insn group. */
9199 int prev_in_use, in_use, len, ldgp;
9202 /* Let shorten branches care for assigning alignments to code labels. */
9203 shorten_branches (get_insns ());
9205 if (align_functions < 4)
9207 else if ((unsigned int) align_functions < max_align)
9208 align = align_functions;
9212 ofs = prev_in_use = 0;
9215 i = next_nonnote_insn (i);
9217 ldgp = alpha_function_needs_gp ? 8 : 0;
9221 next = (*next_group) (i, &in_use, &len);
9223 /* When we see a label, resync alignment etc. */
9226 unsigned int new_align = 1 << label_to_alignment (i);
9228 if (new_align >= align)
9230 align = new_align < max_align ? new_align : max_align;
9234 else if (ofs & (new_align-1))
9235 ofs = (ofs | (new_align-1)) + 1;
9239 /* Handle complex instructions special. */
9240 else if (in_use == 0)
9242 /* Asms will have length < 0. This is a signal that we have
9243 lost alignment knowledge. Assume, however, that the asm
9244 will not mis-align instructions. */
9253 /* If the known alignment is smaller than the recognized insn group,
9254 realign the output. */
9255 else if ((int) align < len)
9257 unsigned int new_log_align = len > 8 ? 4 : 3;
9260 where = prev = prev_nonnote_insn (i);
9261 if (!where || !LABEL_P (where))
9264 /* Can't realign between a call and its gp reload. */
9265 if (! (TARGET_EXPLICIT_RELOCS
9266 && prev && CALL_P (prev)))
9268 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9269 align = 1 << new_log_align;
9274 /* We may not insert padding inside the initial ldgp sequence. */
9278 /* If the group won't fit in the same INT16 as the previous,
9279 we need to add padding to keep the group together. Rather
9280 than simply leaving the insn filling to the assembler, we
9281 can make use of the knowledge of what sorts of instructions
9282 were issued in the previous group to make sure that all of
9283 the added nops are really free. */
9284 else if (ofs + len > (int) align)
9286 int nop_count = (align - ofs) / 4;
9289 /* Insert nops before labels, branches, and calls to truly merge
9290 the execution of the nops with the previous instruction group. */
9291 where = prev_nonnote_insn (i);
9294 if (LABEL_P (where))
9296 rtx where2 = prev_nonnote_insn (where);
9297 if (where2 && JUMP_P (where2))
9300 else if (NONJUMP_INSN_P (where))
9307 emit_insn_before ((*next_nop)(&prev_in_use), where);
9308 while (--nop_count);
9312 ofs = (ofs + len) & (align - 1);
9313 prev_in_use = in_use;
9318 /* Insert an unop between a noreturn function call and GP load. */
9321 alpha_pad_noreturn (void)
9325 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9328 || !find_reg_note (insn, REG_NORETURN, NULL_RTX))
9331 next = next_active_insn (insn);
9335 rtx pat = PATTERN (next);
9337 if (GET_CODE (pat) == SET
9338 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9339 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9340 emit_insn_after (gen_unop (), insn);
9345 /* Machine dependent reorg pass. */
9350 /* Workaround for a linker error that triggers when an
9351 exception handler immediatelly follows a noreturn function.
9353 The instruction stream from an object file:
9355 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9356 58: 00 00 ba 27 ldah gp,0(ra)
9357 5c: 00 00 bd 23 lda gp,0(gp)
9358 60: 00 00 7d a7 ldq t12,0(gp)
9359 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9361 was converted in the final link pass to:
9363 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9364 fdb28: 00 00 fe 2f unop
9365 fdb2c: 00 00 fe 2f unop
9366 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9367 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9369 GP load instructions were wrongly cleared by the linker relaxation
9370 pass. This workaround prevents removal of GP loads by inserting
9371 an unop instruction between a noreturn function call and
9372 exception handler prologue. */
9374 if (current_function_has_exception_handlers ())
9375 alpha_pad_noreturn ();
9377 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9378 alpha_handle_trap_shadows ();
9380 /* Due to the number of extra trapb insns, don't bother fixing up
9381 alignment when trap precision is instruction. Moreover, we can
9382 only do our job when sched2 is run. */
9383 if (optimize && !optimize_size
9384 && alpha_tp != ALPHA_TP_INSN
9385 && flag_schedule_insns_after_reload)
9387 if (alpha_tune == PROCESSOR_EV4)
9388 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9389 else if (alpha_tune == PROCESSOR_EV5)
9390 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9394 #if !TARGET_ABI_UNICOSMK
9401 alpha_file_start (void)
9403 #ifdef OBJECT_FORMAT_ELF
9404 /* If emitting dwarf2 debug information, we cannot generate a .file
9405 directive to start the file, as it will conflict with dwarf2out
9406 file numbers. So it's only useful when emitting mdebug output. */
9407 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9410 default_file_start ();
9412 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9415 fputs ("\t.set noreorder\n", asm_out_file);
9416 fputs ("\t.set volatile\n", asm_out_file);
9417 if (!TARGET_ABI_OPEN_VMS)
9418 fputs ("\t.set noat\n", asm_out_file);
9419 if (TARGET_EXPLICIT_RELOCS)
9420 fputs ("\t.set nomacro\n", asm_out_file);
9421 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9425 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9427 else if (TARGET_MAX)
9429 else if (TARGET_BWX)
9431 else if (alpha_cpu == PROCESSOR_EV5)
9436 fprintf (asm_out_file, "\t.arch %s\n", arch);
9441 #ifdef OBJECT_FORMAT_ELF
9442 /* Since we don't have a .dynbss section, we should not allow global
9443 relocations in the .rodata section. */
9446 alpha_elf_reloc_rw_mask (void)
9448 return flag_pic ? 3 : 2;
9451 /* Return a section for X. The only special thing we do here is to
9452 honor small data. */
9455 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9456 unsigned HOST_WIDE_INT align)
9458 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9459 /* ??? Consider using mergeable sdata sections. */
9460 return sdata_section;
9462 return default_elf_select_rtx_section (mode, x, align);
9466 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9468 unsigned int flags = 0;
9470 if (strcmp (name, ".sdata") == 0
9471 || strncmp (name, ".sdata.", 7) == 0
9472 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9473 || strcmp (name, ".sbss") == 0
9474 || strncmp (name, ".sbss.", 6) == 0
9475 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9476 flags = SECTION_SMALL;
9478 flags |= default_section_type_flags (decl, name, reloc);
9481 #endif /* OBJECT_FORMAT_ELF */
9483 /* Structure to collect function names for final output in link section. */
9484 /* Note that items marked with GTY can't be ifdef'ed out. */
9486 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9487 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9489 struct GTY(()) alpha_links
9493 enum links_kind lkind;
9494 enum reloc_kind rkind;
9497 struct GTY(()) alpha_funcs
9500 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9504 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9505 splay_tree alpha_links_tree;
9506 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9507 splay_tree alpha_funcs_tree;
9509 static GTY(()) int alpha_funcs_num;
9511 #if TARGET_ABI_OPEN_VMS
9513 /* Return the VMS argument type corresponding to MODE. */
9516 alpha_arg_type (enum machine_mode mode)
9521 return TARGET_FLOAT_VAX ? FF : FS;
9523 return TARGET_FLOAT_VAX ? FD : FT;
9529 /* Return an rtx for an integer representing the VMS Argument Information
9533 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9535 unsigned HOST_WIDE_INT regval = cum.num_args;
9538 for (i = 0; i < 6; i++)
9539 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9541 return GEN_INT (regval);
9544 /* Make (or fake) .linkage entry for function call.
9546 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9548 Return an SYMBOL_REF rtx for the linkage. */
9551 alpha_need_linkage (const char *name, int is_local)
9553 splay_tree_node node;
9554 struct alpha_links *al;
9561 struct alpha_funcs *cfaf;
9563 if (!alpha_funcs_tree)
9564 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9565 splay_tree_compare_pointers);
9567 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9570 cfaf->num = ++alpha_funcs_num;
9572 splay_tree_insert (alpha_funcs_tree,
9573 (splay_tree_key) current_function_decl,
9574 (splay_tree_value) cfaf);
9577 if (alpha_links_tree)
9579 /* Is this name already defined? */
9581 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9584 al = (struct alpha_links *) node->value;
9587 /* Defined here but external assumed. */
9588 if (al->lkind == KIND_EXTERN)
9589 al->lkind = KIND_LOCAL;
9593 /* Used here but unused assumed. */
9594 if (al->lkind == KIND_UNUSED)
9595 al->lkind = KIND_LOCAL;
9601 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9603 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9604 name = ggc_strdup (name);
9606 /* Assume external if no definition. */
9607 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9609 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9610 get_identifier (name);
9612 /* Construct a SYMBOL_REF for us to call. */
9614 size_t name_len = strlen (name);
9615 char *linksym = XALLOCAVEC (char, name_len + 6);
9617 memcpy (linksym + 1, name, name_len);
9618 memcpy (linksym + 1 + name_len, "..lk", 5);
9619 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9620 ggc_alloc_string (linksym, name_len + 5));
9623 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9624 (splay_tree_value) al);
9630 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9632 splay_tree_node cfunnode;
9633 struct alpha_funcs *cfaf;
9634 struct alpha_links *al;
9635 const char *name = XSTR (linkage, 0);
9637 cfaf = (struct alpha_funcs *) 0;
9638 al = (struct alpha_links *) 0;
9640 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9641 cfaf = (struct alpha_funcs *) cfunnode->value;
9645 splay_tree_node lnode;
9647 /* Is this name already defined? */
9649 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9651 al = (struct alpha_links *) lnode->value;
9654 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9662 splay_tree_node node = 0;
9663 struct alpha_links *anl;
9668 name_len = strlen (name);
9670 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9671 al->num = cfaf->num;
9673 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9676 anl = (struct alpha_links *) node->value;
9677 al->lkind = anl->lkind;
9680 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9681 buflen = strlen (buf);
9682 linksym = XALLOCAVEC (char, buflen + 1);
9683 memcpy (linksym, buf, buflen + 1);
9685 al->linkage = gen_rtx_SYMBOL_REF
9686 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9688 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9689 (splay_tree_value) al);
9693 al->rkind = KIND_CODEADDR;
9695 al->rkind = KIND_LINKAGE;
9698 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9704 alpha_write_one_linkage (splay_tree_node node, void *data)
9706 const char *const name = (const char *) node->key;
9707 struct alpha_links *link = (struct alpha_links *) node->value;
9708 FILE *stream = (FILE *) data;
9710 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9711 if (link->rkind == KIND_CODEADDR)
9713 if (link->lkind == KIND_LOCAL)
9715 /* Local and used */
9716 fprintf (stream, "\t.quad %s..en\n", name);
9720 /* External and used, request code address. */
9721 fprintf (stream, "\t.code_address %s\n", name);
9726 if (link->lkind == KIND_LOCAL)
9728 /* Local and used, build linkage pair. */
9729 fprintf (stream, "\t.quad %s..en\n", name);
9730 fprintf (stream, "\t.quad %s\n", name);
9734 /* External and used, request linkage pair. */
9735 fprintf (stream, "\t.linkage %s\n", name);
9743 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9745 splay_tree_node node;
9746 struct alpha_funcs *func;
9748 fprintf (stream, "\t.link\n");
9749 fprintf (stream, "\t.align 3\n");
9752 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9753 func = (struct alpha_funcs *) node->value;
9755 fputs ("\t.name ", stream);
9756 assemble_name (stream, funname);
9757 fputs ("..na\n", stream);
9758 ASM_OUTPUT_LABEL (stream, funname);
9759 fprintf (stream, "\t.pdesc ");
9760 assemble_name (stream, funname);
9761 fprintf (stream, "..en,%s\n",
9762 alpha_procedure_type == PT_STACK ? "stack"
9763 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9767 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9768 /* splay_tree_delete (func->links); */
9772 /* Given a decl, a section name, and whether the decl initializer
9773 has relocs, choose attributes for the section. */
9775 #define SECTION_VMS_OVERLAY SECTION_FORGET
9776 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9777 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9780 vms_section_type_flags (tree decl, const char *name, int reloc)
9782 unsigned int flags = default_section_type_flags (decl, name, reloc);
9784 if (decl && DECL_ATTRIBUTES (decl)
9785 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9786 flags |= SECTION_VMS_OVERLAY;
9787 if (decl && DECL_ATTRIBUTES (decl)
9788 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9789 flags |= SECTION_VMS_GLOBAL;
9790 if (decl && DECL_ATTRIBUTES (decl)
9791 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9792 flags |= SECTION_VMS_INITIALIZE;
9797 /* Switch to an arbitrary section NAME with attributes as specified
9798 by FLAGS. ALIGN specifies any known alignment requirements for
9799 the section; 0 if the default should be used. */
9802 vms_asm_named_section (const char *name, unsigned int flags,
9803 tree decl ATTRIBUTE_UNUSED)
9805 fputc ('\n', asm_out_file);
9806 fprintf (asm_out_file, ".section\t%s", name);
9808 if (flags & SECTION_VMS_OVERLAY)
9809 fprintf (asm_out_file, ",OVR");
9810 if (flags & SECTION_VMS_GLOBAL)
9811 fprintf (asm_out_file, ",GBL");
9812 if (flags & SECTION_VMS_INITIALIZE)
9813 fprintf (asm_out_file, ",NOMOD");
9814 if (flags & SECTION_DEBUG)
9815 fprintf (asm_out_file, ",NOWRT");
9817 fputc ('\n', asm_out_file);
9820 /* Record an element in the table of global constructors. SYMBOL is
9821 a SYMBOL_REF of the function to be called; PRIORITY is a number
9822 between 0 and MAX_INIT_PRIORITY.
9824 Differs from default_ctors_section_asm_out_constructor in that the
9825 width of the .ctors entry is always 64 bits, rather than the 32 bits
9826 used by a normal pointer. */
9829 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9831 switch_to_section (ctors_section);
9832 assemble_align (BITS_PER_WORD);
9833 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9837 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9839 switch_to_section (dtors_section);
9840 assemble_align (BITS_PER_WORD);
9841 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9846 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9847 int is_local ATTRIBUTE_UNUSED)
9853 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9854 tree cfundecl ATTRIBUTE_UNUSED,
9855 int lflag ATTRIBUTE_UNUSED,
9856 int rflag ATTRIBUTE_UNUSED)
9861 #endif /* TARGET_ABI_OPEN_VMS */
9863 #if TARGET_ABI_UNICOSMK
9865 /* This evaluates to true if we do not know how to pass TYPE solely in
9866 registers. This is the case for all arguments that do not fit in two
9870 unicosmk_must_pass_in_stack (enum machine_mode mode, const_tree type)
9875 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9877 if (TREE_ADDRESSABLE (type))
9880 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9883 /* Define the offset between two registers, one to be eliminated, and the
9884 other its replacement, at the start of a routine. */
9887 unicosmk_initial_elimination_offset (int from, int to)
9891 fixed_size = alpha_sa_size();
9892 if (fixed_size != 0)
9895 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9897 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9899 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9900 return (ALPHA_ROUND (crtl->outgoing_args_size)
9901 + ALPHA_ROUND (get_frame_size()));
9902 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9903 return (ALPHA_ROUND (fixed_size)
9904 + ALPHA_ROUND (get_frame_size()
9905 + crtl->outgoing_args_size));
9910 /* Output the module name for .ident and .end directives. We have to strip
9911 directories and add make sure that the module name starts with a letter
9915 unicosmk_output_module_name (FILE *file)
9917 const char *name = lbasename (main_input_filename);
9918 unsigned len = strlen (name);
9919 char *clean_name = alloca (len + 2);
9920 char *ptr = clean_name;
9922 /* CAM only accepts module names that start with a letter or '$'. We
9923 prefix the module name with a '$' if necessary. */
9925 if (!ISALPHA (*name))
9927 memcpy (ptr, name, len + 1);
9928 clean_symbol_name (clean_name);
9929 fputs (clean_name, file);
9932 /* Output the definition of a common variable. */
9935 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9938 printf ("T3E__: common %s\n", name);
9941 fputs("\t.endp\n\n\t.psect ", file);
9942 assemble_name(file, name);
9943 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9944 fprintf(file, "\t.byte\t0:%d\n", size);
9946 /* Mark the symbol as defined in this module. */
9947 name_tree = get_identifier (name);
9948 TREE_ASM_WRITTEN (name_tree) = 1;
9951 #define SECTION_PUBLIC SECTION_MACH_DEP
9952 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9953 static int current_section_align;
9955 /* A get_unnamed_section callback for switching to the text section. */
9958 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9960 static int count = 0;
9961 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9964 /* A get_unnamed_section callback for switching to the data section. */
9967 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9969 static int count = 1;
9970 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9973 /* Implement TARGET_ASM_INIT_SECTIONS.
9975 The Cray assembler is really weird with respect to sections. It has only
9976 named sections and you can't reopen a section once it has been closed.
9977 This means that we have to generate unique names whenever we want to
9978 reenter the text or the data section. */
9981 unicosmk_init_sections (void)
9983 text_section = get_unnamed_section (SECTION_CODE,
9984 unicosmk_output_text_section_asm_op,
9986 data_section = get_unnamed_section (SECTION_WRITE,
9987 unicosmk_output_data_section_asm_op,
9989 readonly_data_section = data_section;
9993 unicosmk_section_type_flags (tree decl, const char *name,
9994 int reloc ATTRIBUTE_UNUSED)
9996 unsigned int flags = default_section_type_flags (decl, name, reloc);
10001 if (TREE_CODE (decl) == FUNCTION_DECL)
10003 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10004 if (align_functions_log > current_section_align)
10005 current_section_align = align_functions_log;
10007 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
10008 flags |= SECTION_MAIN;
10011 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
10013 if (TREE_PUBLIC (decl))
10014 flags |= SECTION_PUBLIC;
10019 /* Generate a section name for decl and associate it with the
10023 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
10030 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10031 name = default_strip_name_encoding (name);
10032 len = strlen (name);
10034 if (TREE_CODE (decl) == FUNCTION_DECL)
10038 /* It is essential that we prefix the section name here because
10039 otherwise the section names generated for constructors and
10040 destructors confuse collect2. */
10042 string = alloca (len + 6);
10043 sprintf (string, "code@%s", name);
10044 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10046 else if (TREE_PUBLIC (decl))
10047 DECL_SECTION_NAME (decl) = build_string (len, name);
10052 string = alloca (len + 6);
10053 sprintf (string, "data@%s", name);
10054 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10058 /* Switch to an arbitrary section NAME with attributes as specified
10059 by FLAGS. ALIGN specifies any known alignment requirements for
10060 the section; 0 if the default should be used. */
10063 unicosmk_asm_named_section (const char *name, unsigned int flags,
10064 tree decl ATTRIBUTE_UNUSED)
10068 /* Close the previous section. */
10070 fputs ("\t.endp\n\n", asm_out_file);
10072 /* Find out what kind of section we are opening. */
10074 if (flags & SECTION_MAIN)
10075 fputs ("\t.start\tmain\n", asm_out_file);
10077 if (flags & SECTION_CODE)
10079 else if (flags & SECTION_PUBLIC)
10084 if (current_section_align != 0)
10085 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
10086 current_section_align, kind);
10088 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
10092 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
10095 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
10096 unicosmk_unique_section (decl, 0);
10099 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
10100 in code sections because .align fill unused space with zeroes. */
10103 unicosmk_output_align (FILE *file, int align)
10105 if (inside_function)
10106 fprintf (file, "\tgcc@code@align\t%d\n", align);
10108 fprintf (file, "\t.align\t%d\n", align);
10111 /* Add a case vector to the current function's list of deferred case
10112 vectors. Case vectors have to be put into a separate section because CAM
10113 does not allow data definitions in code sections. */
10116 unicosmk_defer_case_vector (rtx lab, rtx vec)
10118 struct machine_function *machine = cfun->machine;
10120 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10121 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10122 machine->addr_list);
10125 /* Output a case vector. */
10128 unicosmk_output_addr_vec (FILE *file, rtx vec)
10130 rtx lab = XEXP (vec, 0);
10131 rtx body = XEXP (vec, 1);
10132 int vlen = XVECLEN (body, 0);
10135 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10137 for (idx = 0; idx < vlen; idx++)
10139 ASM_OUTPUT_ADDR_VEC_ELT
10140 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10144 /* Output current function's deferred case vectors. */
10147 unicosmk_output_deferred_case_vectors (FILE *file)
10149 struct machine_function *machine = cfun->machine;
10152 if (machine->addr_list == NULL_RTX)
10155 switch_to_section (data_section);
10156 for (t = machine->addr_list; t; t = XEXP (t, 1))
10157 unicosmk_output_addr_vec (file, XEXP (t, 0));
10160 /* Generate the name of the SSIB section for the current function. */
10162 #define SSIB_PREFIX "__SSIB_"
10163 #define SSIB_PREFIX_LEN 7
10165 static const char *
10166 unicosmk_ssib_name (void)
10168 /* This is ok since CAM won't be able to deal with names longer than that
10171 static char name[256];
10174 const char *fnname;
10177 x = DECL_RTL (cfun->decl);
10178 gcc_assert (MEM_P (x));
10180 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10181 fnname = XSTR (x, 0);
10183 len = strlen (fnname);
10184 if (len + SSIB_PREFIX_LEN > 255)
10185 len = 255 - SSIB_PREFIX_LEN;
10187 strcpy (name, SSIB_PREFIX);
10188 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10189 name[len + SSIB_PREFIX_LEN] = 0;
10194 /* Set up the dynamic subprogram information block (DSIB) and update the
10195 frame pointer register ($15) for subroutines which have a frame. If the
10196 subroutine doesn't have a frame, simply increment $15. */
10199 unicosmk_gen_dsib (unsigned long *imaskP)
10201 if (alpha_procedure_type == PT_STACK)
10203 const char *ssib_name;
10206 /* Allocate 64 bytes for the DSIB. */
10208 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10210 emit_insn (gen_blockage ());
10212 /* Save the return address. */
10214 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10215 set_mem_alias_set (mem, alpha_sr_alias_set);
10216 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10217 (*imaskP) &= ~(1UL << REG_RA);
10219 /* Save the old frame pointer. */
10221 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10222 set_mem_alias_set (mem, alpha_sr_alias_set);
10223 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10224 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10226 emit_insn (gen_blockage ());
10228 /* Store the SSIB pointer. */
10230 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10231 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10232 set_mem_alias_set (mem, alpha_sr_alias_set);
10234 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10235 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10236 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10238 /* Save the CIW index. */
10240 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10241 set_mem_alias_set (mem, alpha_sr_alias_set);
10242 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10244 emit_insn (gen_blockage ());
10246 /* Set the new frame pointer. */
10248 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10249 stack_pointer_rtx, GEN_INT (64))));
10254 /* Increment the frame pointer register to indicate that we do not
10257 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10258 hard_frame_pointer_rtx, const1_rtx)));
10262 /* Output the static subroutine information block for the current
10266 unicosmk_output_ssib (FILE *file, const char *fnname)
10272 struct machine_function *machine = cfun->machine;
10275 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10276 unicosmk_ssib_name ());
10278 /* Some required stuff and the function name length. */
10280 len = strlen (fnname);
10281 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10284 ??? We don't do that yet. */
10286 fputs ("\t.quad\t0\n", file);
10288 /* Function address. */
10290 fputs ("\t.quad\t", file);
10291 assemble_name (file, fnname);
10294 fputs ("\t.quad\t0\n", file);
10295 fputs ("\t.quad\t0\n", file);
10298 ??? We do it the same way Cray CC does it but this could be
10301 for( i = 0; i < len; i++ )
10302 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10303 if( (len % 8) == 0 )
10304 fputs ("\t.quad\t0\n", file);
10306 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10308 /* All call information words used in the function. */
10310 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10313 #if HOST_BITS_PER_WIDE_INT == 32
10314 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10315 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10317 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10322 /* Add a call information word (CIW) to the list of the current function's
10323 CIWs and return its index.
10325 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10328 unicosmk_add_call_info_word (rtx x)
10331 struct machine_function *machine = cfun->machine;
10333 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10334 if (machine->first_ciw == NULL_RTX)
10335 machine->first_ciw = node;
10337 XEXP (machine->last_ciw, 1) = node;
10339 machine->last_ciw = node;
10340 ++machine->ciw_count;
10342 return GEN_INT (machine->ciw_count
10343 + strlen (current_function_name ())/8 + 5);
10346 /* The Cray assembler doesn't accept extern declarations for symbols which
10347 are defined in the same file. We have to keep track of all global
10348 symbols which are referenced and/or defined in a source file and output
10349 extern declarations for those which are referenced but not defined at
10350 the end of file. */
10352 /* List of identifiers for which an extern declaration might have to be
10354 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10356 struct unicosmk_extern_list
10358 struct unicosmk_extern_list *next;
10362 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10364 /* Output extern declarations which are required for every asm file. */
10367 unicosmk_output_default_externs (FILE *file)
10369 static const char *const externs[] =
10370 { "__T3E_MISMATCH" };
10375 n = ARRAY_SIZE (externs);
10377 for (i = 0; i < n; i++)
10378 fprintf (file, "\t.extern\t%s\n", externs[i]);
10381 /* Output extern declarations for global symbols which are have been
10382 referenced but not defined. */
10385 unicosmk_output_externs (FILE *file)
10387 struct unicosmk_extern_list *p;
10388 const char *real_name;
10392 len = strlen (user_label_prefix);
10393 for (p = unicosmk_extern_head; p != 0; p = p->next)
10395 /* We have to strip the encoding and possibly remove user_label_prefix
10396 from the identifier in order to handle -fleading-underscore and
10397 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10398 real_name = default_strip_name_encoding (p->name);
10399 if (len && p->name[0] == '*'
10400 && !memcmp (real_name, user_label_prefix, len))
10403 name_tree = get_identifier (real_name);
10404 if (! TREE_ASM_WRITTEN (name_tree))
10406 TREE_ASM_WRITTEN (name_tree) = 1;
10407 fputs ("\t.extern\t", file);
10408 assemble_name (file, p->name);
10414 /* Record an extern. */
10417 unicosmk_add_extern (const char *name)
10419 struct unicosmk_extern_list *p;
10421 p = (struct unicosmk_extern_list *)
10422 xmalloc (sizeof (struct unicosmk_extern_list));
10423 p->next = unicosmk_extern_head;
10425 unicosmk_extern_head = p;
10428 /* The Cray assembler generates incorrect code if identifiers which
10429 conflict with register names are used as instruction operands. We have
10430 to replace such identifiers with DEX expressions. */
10432 /* Structure to collect identifiers which have been replaced by DEX
10434 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10436 struct unicosmk_dex {
10437 struct unicosmk_dex *next;
10441 /* List of identifiers which have been replaced by DEX expressions. The DEX
10442 number is determined by the position in the list. */
10444 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10446 /* The number of elements in the DEX list. */
10448 static int unicosmk_dex_count = 0;
10450 /* Check if NAME must be replaced by a DEX expression. */
10453 unicosmk_special_name (const char *name)
10455 if (name[0] == '*')
10458 if (name[0] == '$')
10461 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10466 case '1': case '2':
10467 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10470 return (name[2] == '\0'
10471 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10474 return (ISDIGIT (name[1]) && name[2] == '\0');
10478 /* Return the DEX number if X must be replaced by a DEX expression and 0
10482 unicosmk_need_dex (rtx x)
10484 struct unicosmk_dex *dex;
10488 if (GET_CODE (x) != SYMBOL_REF)
10492 if (! unicosmk_special_name (name))
10495 i = unicosmk_dex_count;
10496 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10498 if (! strcmp (name, dex->name))
10503 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10505 dex->next = unicosmk_dex_list;
10506 unicosmk_dex_list = dex;
10508 ++unicosmk_dex_count;
10509 return unicosmk_dex_count;
10512 /* Output the DEX definitions for this file. */
10515 unicosmk_output_dex (FILE *file)
10517 struct unicosmk_dex *dex;
10520 if (unicosmk_dex_list == NULL)
10523 fprintf (file, "\t.dexstart\n");
10525 i = unicosmk_dex_count;
10526 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10528 fprintf (file, "\tDEX (%d) = ", i);
10529 assemble_name (file, dex->name);
10534 fprintf (file, "\t.dexend\n");
10537 /* Output text that to appear at the beginning of an assembler file. */
10540 unicosmk_file_start (void)
10544 fputs ("\t.ident\t", asm_out_file);
10545 unicosmk_output_module_name (asm_out_file);
10546 fputs ("\n\n", asm_out_file);
10548 /* The Unicos/Mk assembler uses different register names. Instead of trying
10549 to support them, we simply use micro definitions. */
10551 /* CAM has different register names: rN for the integer register N and fN
10552 for the floating-point register N. Instead of trying to use these in
10553 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10556 for (i = 0; i < 32; ++i)
10557 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10559 for (i = 0; i < 32; ++i)
10560 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10562 putc ('\n', asm_out_file);
10564 /* The .align directive fill unused space with zeroes which does not work
10565 in code sections. We define the macro 'gcc@code@align' which uses nops
10566 instead. Note that it assumes that code sections always have the
10567 biggest possible alignment since . refers to the current offset from
10568 the beginning of the section. */
10570 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10571 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10572 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10573 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10574 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10575 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10576 fputs ("\t.endr\n", asm_out_file);
10577 fputs ("\t.endif\n", asm_out_file);
10578 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10580 /* Output extern declarations which should always be visible. */
10581 unicosmk_output_default_externs (asm_out_file);
10583 /* Open a dummy section. We always need to be inside a section for the
10584 section-switching code to work correctly.
10585 ??? This should be a module id or something like that. I still have to
10586 figure out what the rules for those are. */
10587 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10590 /* Output text to appear at the end of an assembler file. This includes all
10591 pending extern declarations and DEX expressions. */
10594 unicosmk_file_end (void)
10596 fputs ("\t.endp\n\n", asm_out_file);
10598 /* Output all pending externs. */
10600 unicosmk_output_externs (asm_out_file);
10602 /* Output dex definitions used for functions whose names conflict with
10605 unicosmk_output_dex (asm_out_file);
10607 fputs ("\t.end\t", asm_out_file);
10608 unicosmk_output_module_name (asm_out_file);
10609 putc ('\n', asm_out_file);
10615 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10619 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10623 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10624 const char * fnname ATTRIBUTE_UNUSED)
10628 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10634 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10639 #endif /* TARGET_ABI_UNICOSMK */
10642 alpha_init_libfuncs (void)
10644 if (TARGET_ABI_UNICOSMK)
10646 /* Prevent gcc from generating calls to __divsi3. */
10647 set_optab_libfunc (sdiv_optab, SImode, 0);
10648 set_optab_libfunc (udiv_optab, SImode, 0);
10650 /* Use the functions provided by the system library
10651 for DImode integer division. */
10652 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10653 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10655 else if (TARGET_ABI_OPEN_VMS)
10657 /* Use the VMS runtime library functions for division and
10659 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10660 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10661 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10662 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10663 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10664 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10665 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10666 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10671 /* Initialize the GCC target structure. */
10672 #if TARGET_ABI_OPEN_VMS
10673 # undef TARGET_ATTRIBUTE_TABLE
10674 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10675 # undef TARGET_SECTION_TYPE_FLAGS
10676 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10679 #undef TARGET_IN_SMALL_DATA_P
10680 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10682 #if TARGET_ABI_UNICOSMK
10683 # undef TARGET_INSERT_ATTRIBUTES
10684 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10685 # undef TARGET_SECTION_TYPE_FLAGS
10686 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10687 # undef TARGET_ASM_UNIQUE_SECTION
10688 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10689 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10690 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10691 # undef TARGET_ASM_GLOBALIZE_LABEL
10692 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10693 # undef TARGET_MUST_PASS_IN_STACK
10694 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10697 #undef TARGET_ASM_ALIGNED_HI_OP
10698 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10699 #undef TARGET_ASM_ALIGNED_DI_OP
10700 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10702 /* Default unaligned ops are provided for ELF systems. To get unaligned
10703 data for non-ELF systems, we have to turn off auto alignment. */
10704 #ifndef OBJECT_FORMAT_ELF
10705 #undef TARGET_ASM_UNALIGNED_HI_OP
10706 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10707 #undef TARGET_ASM_UNALIGNED_SI_OP
10708 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10709 #undef TARGET_ASM_UNALIGNED_DI_OP
10710 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10713 #ifdef OBJECT_FORMAT_ELF
10714 #undef TARGET_ASM_RELOC_RW_MASK
10715 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
10716 #undef TARGET_ASM_SELECT_RTX_SECTION
10717 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10718 #undef TARGET_SECTION_TYPE_FLAGS
10719 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
10722 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10723 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10725 #undef TARGET_INIT_LIBFUNCS
10726 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10728 #if TARGET_ABI_UNICOSMK
10729 #undef TARGET_ASM_FILE_START
10730 #define TARGET_ASM_FILE_START unicosmk_file_start
10731 #undef TARGET_ASM_FILE_END
10732 #define TARGET_ASM_FILE_END unicosmk_file_end
10734 #undef TARGET_ASM_FILE_START
10735 #define TARGET_ASM_FILE_START alpha_file_start
10736 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10737 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10740 #undef TARGET_SCHED_ADJUST_COST
10741 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10742 #undef TARGET_SCHED_ISSUE_RATE
10743 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10744 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10745 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10746 alpha_multipass_dfa_lookahead
10748 #undef TARGET_HAVE_TLS
10749 #define TARGET_HAVE_TLS HAVE_AS_TLS
10751 #undef TARGET_INIT_BUILTINS
10752 #define TARGET_INIT_BUILTINS alpha_init_builtins
10753 #undef TARGET_EXPAND_BUILTIN
10754 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10755 #undef TARGET_FOLD_BUILTIN
10756 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10758 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10759 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10760 #undef TARGET_CANNOT_COPY_INSN_P
10761 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10762 #undef TARGET_CANNOT_FORCE_CONST_MEM
10763 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10766 #undef TARGET_ASM_OUTPUT_MI_THUNK
10767 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10768 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10769 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10770 #undef TARGET_STDARG_OPTIMIZE_HOOK
10771 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10774 #undef TARGET_RTX_COSTS
10775 #define TARGET_RTX_COSTS alpha_rtx_costs
10776 #undef TARGET_ADDRESS_COST
10777 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
10779 #undef TARGET_MACHINE_DEPENDENT_REORG
10780 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10782 #undef TARGET_PROMOTE_FUNCTION_ARGS
10783 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
10784 #undef TARGET_PROMOTE_FUNCTION_RETURN
10785 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
10786 #undef TARGET_PROMOTE_PROTOTYPES
10787 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
10788 #undef TARGET_RETURN_IN_MEMORY
10789 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10790 #undef TARGET_PASS_BY_REFERENCE
10791 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10792 #undef TARGET_SETUP_INCOMING_VARARGS
10793 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10794 #undef TARGET_STRICT_ARGUMENT_NAMING
10795 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10796 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10797 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10798 #undef TARGET_SPLIT_COMPLEX_ARG
10799 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10800 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10801 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10802 #undef TARGET_ARG_PARTIAL_BYTES
10803 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10805 #undef TARGET_SECONDARY_RELOAD
10806 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10808 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10809 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10810 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10811 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10813 #undef TARGET_BUILD_BUILTIN_VA_LIST
10814 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10816 #undef TARGET_EXPAND_BUILTIN_VA_START
10817 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
10819 /* The Alpha architecture does not require sequential consistency. See
10820 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10821 for an example of how it can be violated in practice. */
10822 #undef TARGET_RELAXED_ORDERING
10823 #define TARGET_RELAXED_ORDERING true
10825 #undef TARGET_DEFAULT_TARGET_FLAGS
10826 #define TARGET_DEFAULT_TARGET_FLAGS \
10827 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10828 #undef TARGET_HANDLE_OPTION
10829 #define TARGET_HANDLE_OPTION alpha_handle_option
10831 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10832 #undef TARGET_MANGLE_TYPE
10833 #define TARGET_MANGLE_TYPE alpha_mangle_type
10836 struct gcc_target targetm = TARGET_INITIALIZER;
10839 #include "gt-alpha.h"