1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-attr.h"
44 #include "diagnostic-core.h"
46 #include "integrate.h"
49 #include "target-def.h"
51 #include "langhooks.h"
52 #include "splay-tree.h"
53 #include "cfglayout.h"
55 #include "tree-flow.h"
56 #include "tree-stdarg.h"
57 #include "tm-constrs.h"
61 /* Specify which cpu to schedule for. */
62 enum processor_type alpha_tune;
64 /* Which cpu we're generating code for. */
65 enum processor_type alpha_cpu;
67 static const char * const alpha_cpu_name[] =
72 /* Specify how accurate floating-point traps need to be. */
74 enum alpha_trap_precision alpha_tp;
76 /* Specify the floating-point rounding mode. */
78 enum alpha_fp_rounding_mode alpha_fprm;
80 /* Specify which things cause traps. */
82 enum alpha_fp_trap_mode alpha_fptm;
84 /* Nonzero if inside of a function, because the Alpha asm can't
85 handle .files inside of functions. */
87 static int inside_function = FALSE;
89 /* The number of cycles of latency we should assume on memory reads. */
91 int alpha_memory_latency = 3;
93 /* Whether the function needs the GP. */
95 static int alpha_function_needs_gp;
97 /* The alias set for prologue/epilogue register save/restore. */
99 static GTY(()) alias_set_type alpha_sr_alias_set;
101 /* The assembler name of the current function. */
103 static const char *alpha_fnname;
105 /* The next explicit relocation sequence number. */
106 extern GTY(()) int alpha_next_sequence_number;
107 int alpha_next_sequence_number = 1;
109 /* The literal and gpdisp sequence numbers for this insn, as printed
110 by %# and %* respectively. */
111 extern GTY(()) int alpha_this_literal_sequence_number;
112 extern GTY(()) int alpha_this_gpdisp_sequence_number;
113 int alpha_this_literal_sequence_number;
114 int alpha_this_gpdisp_sequence_number;
116 /* Costs of various operations on the different architectures. */
118 struct alpha_rtx_cost_data
120 unsigned char fp_add;
121 unsigned char fp_mult;
122 unsigned char fp_div_sf;
123 unsigned char fp_div_df;
124 unsigned char int_mult_si;
125 unsigned char int_mult_di;
126 unsigned char int_shift;
127 unsigned char int_cmov;
128 unsigned short int_div;
131 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
134 COSTS_N_INSNS (6), /* fp_add */
135 COSTS_N_INSNS (6), /* fp_mult */
136 COSTS_N_INSNS (34), /* fp_div_sf */
137 COSTS_N_INSNS (63), /* fp_div_df */
138 COSTS_N_INSNS (23), /* int_mult_si */
139 COSTS_N_INSNS (23), /* int_mult_di */
140 COSTS_N_INSNS (2), /* int_shift */
141 COSTS_N_INSNS (2), /* int_cmov */
142 COSTS_N_INSNS (97), /* int_div */
145 COSTS_N_INSNS (4), /* fp_add */
146 COSTS_N_INSNS (4), /* fp_mult */
147 COSTS_N_INSNS (15), /* fp_div_sf */
148 COSTS_N_INSNS (22), /* fp_div_df */
149 COSTS_N_INSNS (8), /* int_mult_si */
150 COSTS_N_INSNS (12), /* int_mult_di */
151 COSTS_N_INSNS (1) + 1, /* int_shift */
152 COSTS_N_INSNS (1), /* int_cmov */
153 COSTS_N_INSNS (83), /* int_div */
156 COSTS_N_INSNS (4), /* fp_add */
157 COSTS_N_INSNS (4), /* fp_mult */
158 COSTS_N_INSNS (12), /* fp_div_sf */
159 COSTS_N_INSNS (15), /* fp_div_df */
160 COSTS_N_INSNS (7), /* int_mult_si */
161 COSTS_N_INSNS (7), /* int_mult_di */
162 COSTS_N_INSNS (1), /* int_shift */
163 COSTS_N_INSNS (2), /* int_cmov */
164 COSTS_N_INSNS (86), /* int_div */
168 /* Similar but tuned for code size instead of execution latency. The
169 extra +N is fractional cost tuning based on latency. It's used to
170 encourage use of cheaper insns like shift, but only if there's just
173 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
175 COSTS_N_INSNS (1), /* fp_add */
176 COSTS_N_INSNS (1), /* fp_mult */
177 COSTS_N_INSNS (1), /* fp_div_sf */
178 COSTS_N_INSNS (1) + 1, /* fp_div_df */
179 COSTS_N_INSNS (1) + 1, /* int_mult_si */
180 COSTS_N_INSNS (1) + 2, /* int_mult_di */
181 COSTS_N_INSNS (1), /* int_shift */
182 COSTS_N_INSNS (1), /* int_cmov */
183 COSTS_N_INSNS (6), /* int_div */
186 /* Get the number of args of a function in one of two ways. */
187 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
188 #define NUM_ARGS crtl->args.info.num_args
190 #define NUM_ARGS crtl->args.info
196 /* Declarations of static functions. */
197 static struct machine_function *alpha_init_machine_status (void);
198 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
200 #if TARGET_ABI_OPEN_VMS
201 static void alpha_write_linkage (FILE *, const char *, tree);
202 static bool vms_valid_pointer_mode (enum machine_mode);
205 static void unicosmk_output_deferred_case_vectors (FILE *);
206 static void unicosmk_gen_dsib (unsigned long *);
207 static void unicosmk_output_ssib (FILE *, const char *);
208 static int unicosmk_need_dex (rtx);
210 /* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
211 static const struct default_options alpha_option_optimization_table[] =
213 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
214 { OPT_LEVELS_NONE, 0, NULL, 0 }
217 /* Implement TARGET_HANDLE_OPTION. */
220 alpha_handle_option (size_t code, const char *arg, int value)
226 target_flags |= MASK_SOFT_FP;
230 case OPT_mieee_with_inexact:
231 target_flags |= MASK_IEEE_CONFORMANT;
235 if (value != 16 && value != 32 && value != 64)
236 error ("bad value %qs for -mtls-size switch", arg);
243 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
244 /* Implement TARGET_MANGLE_TYPE. */
247 alpha_mangle_type (const_tree type)
249 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
250 && TARGET_LONG_DOUBLE_128)
253 /* For all other types, use normal C++ mangling. */
258 /* Parse target option strings. */
261 alpha_option_override (void)
263 static const struct cpu_table {
264 const char *const name;
265 const enum processor_type processor;
268 { "ev4", PROCESSOR_EV4, 0 },
269 { "ev45", PROCESSOR_EV4, 0 },
270 { "21064", PROCESSOR_EV4, 0 },
271 { "ev5", PROCESSOR_EV5, 0 },
272 { "21164", PROCESSOR_EV5, 0 },
273 { "ev56", PROCESSOR_EV5, MASK_BWX },
274 { "21164a", PROCESSOR_EV5, MASK_BWX },
275 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
276 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
277 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
278 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
279 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
280 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
281 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX }
284 int const ct_size = ARRAY_SIZE (cpu_table);
287 #ifdef SUBTARGET_OVERRIDE_OPTIONS
288 SUBTARGET_OVERRIDE_OPTIONS;
291 /* Unicos/Mk doesn't have shared libraries. */
292 if (TARGET_ABI_UNICOSMK && flag_pic)
294 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
295 (flag_pic > 1) ? "PIC" : "pic");
299 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
300 floating-point instructions. Make that the default for this target. */
301 if (TARGET_ABI_UNICOSMK)
302 alpha_fprm = ALPHA_FPRM_DYN;
304 alpha_fprm = ALPHA_FPRM_NORM;
306 alpha_tp = ALPHA_TP_PROG;
307 alpha_fptm = ALPHA_FPTM_N;
309 /* We cannot use su and sui qualifiers for conversion instructions on
310 Unicos/Mk. I'm not sure if this is due to assembler or hardware
311 limitations. Right now, we issue a warning if -mieee is specified
312 and then ignore it; eventually, we should either get it right or
313 disable the option altogether. */
317 if (TARGET_ABI_UNICOSMK)
318 warning (0, "-mieee not supported on Unicos/Mk");
321 alpha_tp = ALPHA_TP_INSN;
322 alpha_fptm = ALPHA_FPTM_SU;
326 if (TARGET_IEEE_WITH_INEXACT)
328 if (TARGET_ABI_UNICOSMK)
329 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
332 alpha_tp = ALPHA_TP_INSN;
333 alpha_fptm = ALPHA_FPTM_SUI;
339 if (! strcmp (alpha_tp_string, "p"))
340 alpha_tp = ALPHA_TP_PROG;
341 else if (! strcmp (alpha_tp_string, "f"))
342 alpha_tp = ALPHA_TP_FUNC;
343 else if (! strcmp (alpha_tp_string, "i"))
344 alpha_tp = ALPHA_TP_INSN;
346 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
349 if (alpha_fprm_string)
351 if (! strcmp (alpha_fprm_string, "n"))
352 alpha_fprm = ALPHA_FPRM_NORM;
353 else if (! strcmp (alpha_fprm_string, "m"))
354 alpha_fprm = ALPHA_FPRM_MINF;
355 else if (! strcmp (alpha_fprm_string, "c"))
356 alpha_fprm = ALPHA_FPRM_CHOP;
357 else if (! strcmp (alpha_fprm_string,"d"))
358 alpha_fprm = ALPHA_FPRM_DYN;
360 error ("bad value %qs for -mfp-rounding-mode switch",
364 if (alpha_fptm_string)
366 if (strcmp (alpha_fptm_string, "n") == 0)
367 alpha_fptm = ALPHA_FPTM_N;
368 else if (strcmp (alpha_fptm_string, "u") == 0)
369 alpha_fptm = ALPHA_FPTM_U;
370 else if (strcmp (alpha_fptm_string, "su") == 0)
371 alpha_fptm = ALPHA_FPTM_SU;
372 else if (strcmp (alpha_fptm_string, "sui") == 0)
373 alpha_fptm = ALPHA_FPTM_SUI;
375 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
378 if (alpha_cpu_string)
380 for (i = 0; i < ct_size; i++)
381 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
383 alpha_tune = alpha_cpu = cpu_table [i].processor;
384 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
385 target_flags |= cpu_table [i].flags;
389 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
392 if (alpha_tune_string)
394 for (i = 0; i < ct_size; i++)
395 if (! strcmp (alpha_tune_string, cpu_table [i].name))
397 alpha_tune = cpu_table [i].processor;
401 error ("bad value %qs for -mtune switch", alpha_tune_string);
404 /* Do some sanity checks on the above options. */
406 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
408 warning (0, "trap mode not supported on Unicos/Mk");
409 alpha_fptm = ALPHA_FPTM_N;
412 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
413 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
415 warning (0, "fp software completion requires -mtrap-precision=i");
416 alpha_tp = ALPHA_TP_INSN;
419 if (alpha_cpu == PROCESSOR_EV6)
421 /* Except for EV6 pass 1 (not released), we always have precise
422 arithmetic traps. Which means we can do software completion
423 without minding trap shadows. */
424 alpha_tp = ALPHA_TP_PROG;
427 if (TARGET_FLOAT_VAX)
429 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
431 warning (0, "rounding mode not supported for VAX floats");
432 alpha_fprm = ALPHA_FPRM_NORM;
434 if (alpha_fptm == ALPHA_FPTM_SUI)
436 warning (0, "trap mode not supported for VAX floats");
437 alpha_fptm = ALPHA_FPTM_SU;
439 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
440 warning (0, "128-bit long double not supported for VAX floats");
441 target_flags &= ~MASK_LONG_DOUBLE_128;
448 if (!alpha_mlat_string)
449 alpha_mlat_string = "L1";
451 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
452 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
454 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
455 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
456 && alpha_mlat_string[2] == '\0')
458 static int const cache_latency[][4] =
460 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
461 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
462 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
465 lat = alpha_mlat_string[1] - '0';
466 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
468 warning (0, "L%d cache latency unknown for %s",
469 lat, alpha_cpu_name[alpha_tune]);
473 lat = cache_latency[alpha_tune][lat-1];
475 else if (! strcmp (alpha_mlat_string, "main"))
477 /* Most current memories have about 370ns latency. This is
478 a reasonable guess for a fast cpu. */
483 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
487 alpha_memory_latency = lat;
490 /* Default the definition of "small data" to 8 bytes. */
491 if (!global_options_set.x_g_switch_value)
494 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
496 target_flags |= MASK_SMALL_DATA;
497 else if (flag_pic == 2)
498 target_flags &= ~MASK_SMALL_DATA;
500 /* Align labels and loops for optimal branching. */
501 /* ??? Kludge these by not doing anything if we don't optimize and also if
502 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
503 if (optimize > 0 && write_symbols != SDB_DEBUG)
505 if (align_loops <= 0)
507 if (align_jumps <= 0)
510 if (align_functions <= 0)
511 align_functions = 16;
513 /* Acquire a unique set number for our register saves and restores. */
514 alpha_sr_alias_set = new_alias_set ();
516 /* Register variables and functions with the garbage collector. */
518 /* Set up function hooks. */
519 init_machine_status = alpha_init_machine_status;
521 /* Tell the compiler when we're using VAX floating point. */
522 if (TARGET_FLOAT_VAX)
524 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
525 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
526 REAL_MODE_FORMAT (TFmode) = NULL;
529 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
530 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
531 target_flags |= MASK_LONG_DOUBLE_128;
534 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
535 can be optimized to ap = __builtin_next_arg (0). */
536 if (TARGET_ABI_UNICOSMK)
537 targetm.expand_builtin_va_start = NULL;
540 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
543 zap_mask (HOST_WIDE_INT value)
547 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
549 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
555 /* Return true if OP is valid for a particular TLS relocation.
556 We are already guaranteed that OP is a CONST. */
559 tls_symbolic_operand_1 (rtx op, int size, int unspec)
563 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
565 op = XVECEXP (op, 0, 0);
567 if (GET_CODE (op) != SYMBOL_REF)
570 switch (SYMBOL_REF_TLS_MODEL (op))
572 case TLS_MODEL_LOCAL_DYNAMIC:
573 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
574 case TLS_MODEL_INITIAL_EXEC:
575 return unspec == UNSPEC_TPREL && size == 64;
576 case TLS_MODEL_LOCAL_EXEC:
577 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
583 /* Used by aligned_memory_operand and unaligned_memory_operand to
584 resolve what reload is going to do with OP if it's a register. */
587 resolve_reload_operand (rtx op)
589 if (reload_in_progress)
592 if (GET_CODE (tmp) == SUBREG)
593 tmp = SUBREG_REG (tmp);
595 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
597 op = reg_equiv_memory_loc[REGNO (tmp)];
605 /* The scalar modes supported differs from the default check-what-c-supports
606 version in that sometimes TFmode is available even when long double
607 indicates only DFmode. On unicosmk, we have the situation that HImode
608 doesn't map to any C type, but of course we still support that. */
611 alpha_scalar_mode_supported_p (enum machine_mode mode)
619 case TImode: /* via optabs.c */
627 return TARGET_HAS_XFLOATING_LIBS;
634 /* Alpha implements a couple of integer vector mode operations when
635 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
636 which allows the vectorizer to operate on e.g. move instructions,
637 or when expand_vector_operations can do something useful. */
640 alpha_vector_mode_supported_p (enum machine_mode mode)
642 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
645 /* Return 1 if this function can directly return via $26. */
650 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
652 && alpha_sa_size () == 0
653 && get_frame_size () == 0
654 && crtl->outgoing_args_size == 0
655 && crtl->args.pretend_args_size == 0);
658 /* Return the ADDR_VEC associated with a tablejump insn. */
661 alpha_tablejump_addr_vec (rtx insn)
665 tmp = JUMP_LABEL (insn);
668 tmp = NEXT_INSN (tmp);
672 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
673 return PATTERN (tmp);
677 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
680 alpha_tablejump_best_label (rtx insn)
682 rtx jump_table = alpha_tablejump_addr_vec (insn);
683 rtx best_label = NULL_RTX;
685 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
686 there for edge frequency counts from profile data. */
690 int n_labels = XVECLEN (jump_table, 1);
694 for (i = 0; i < n_labels; i++)
698 for (j = i + 1; j < n_labels; j++)
699 if (XEXP (XVECEXP (jump_table, 1, i), 0)
700 == XEXP (XVECEXP (jump_table, 1, j), 0))
703 if (count > best_count)
704 best_count = count, best_label = XVECEXP (jump_table, 1, i);
708 return best_label ? best_label : const0_rtx;
711 /* Return the TLS model to use for SYMBOL. */
713 static enum tls_model
714 tls_symbolic_operand_type (rtx symbol)
716 enum tls_model model;
718 if (GET_CODE (symbol) != SYMBOL_REF)
719 return TLS_MODEL_NONE;
720 model = SYMBOL_REF_TLS_MODEL (symbol);
722 /* Local-exec with a 64-bit size is the same code as initial-exec. */
723 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
724 model = TLS_MODEL_INITIAL_EXEC;
729 /* Return true if the function DECL will share the same GP as any
730 function in the current unit of translation. */
733 decl_has_samegp (const_tree decl)
735 /* Functions that are not local can be overridden, and thus may
736 not share the same gp. */
737 if (!(*targetm.binds_local_p) (decl))
740 /* If -msmall-data is in effect, assume that there is only one GP
741 for the module, and so any local symbol has this property. We
742 need explicit relocations to be able to enforce this for symbols
743 not defined in this unit of translation, however. */
744 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
747 /* Functions that are not external are defined in this UoT. */
748 /* ??? Irritatingly, static functions not yet emitted are still
749 marked "external". Apply this to non-static functions only. */
750 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
753 /* Return true if EXP should be placed in the small data section. */
756 alpha_in_small_data_p (const_tree exp)
758 /* We want to merge strings, so we never consider them small data. */
759 if (TREE_CODE (exp) == STRING_CST)
762 /* Functions are never in the small data area. Duh. */
763 if (TREE_CODE (exp) == FUNCTION_DECL)
766 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
768 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
769 if (strcmp (section, ".sdata") == 0
770 || strcmp (section, ".sbss") == 0)
775 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
777 /* If this is an incomplete type with size 0, then we can't put it
778 in sdata because it might be too big when completed. */
779 if (size > 0 && size <= g_switch_value)
786 #if TARGET_ABI_OPEN_VMS
788 vms_valid_pointer_mode (enum machine_mode mode)
790 return (mode == SImode || mode == DImode);
794 alpha_linkage_symbol_p (const char *symname)
796 int symlen = strlen (symname);
799 return strcmp (&symname [symlen - 4], "..lk") == 0;
804 #define LINKAGE_SYMBOL_REF_P(X) \
805 ((GET_CODE (X) == SYMBOL_REF \
806 && alpha_linkage_symbol_p (XSTR (X, 0))) \
807 || (GET_CODE (X) == CONST \
808 && GET_CODE (XEXP (X, 0)) == PLUS \
809 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
810 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
813 /* legitimate_address_p recognizes an RTL expression that is a valid
814 memory address for an instruction. The MODE argument is the
815 machine mode for the MEM expression that wants to use this address.
817 For Alpha, we have either a constant address or the sum of a
818 register and a constant address, or just a register. For DImode,
819 any of those forms can be surrounded with an AND that clear the
820 low-order three bits; this is an "unaligned" access. */
823 alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
825 /* If this is an ldq_u type address, discard the outer AND. */
827 && GET_CODE (x) == AND
828 && CONST_INT_P (XEXP (x, 1))
829 && INTVAL (XEXP (x, 1)) == -8)
832 /* Discard non-paradoxical subregs. */
833 if (GET_CODE (x) == SUBREG
834 && (GET_MODE_SIZE (GET_MODE (x))
835 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
838 /* Unadorned general registers are valid. */
841 ? STRICT_REG_OK_FOR_BASE_P (x)
842 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
845 /* Constant addresses (i.e. +/- 32k) are valid. */
846 if (CONSTANT_ADDRESS_P (x))
849 #if TARGET_ABI_OPEN_VMS
850 if (LINKAGE_SYMBOL_REF_P (x))
854 /* Register plus a small constant offset is valid. */
855 if (GET_CODE (x) == PLUS)
857 rtx ofs = XEXP (x, 1);
860 /* Discard non-paradoxical subregs. */
861 if (GET_CODE (x) == SUBREG
862 && (GET_MODE_SIZE (GET_MODE (x))
863 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
869 && NONSTRICT_REG_OK_FP_BASE_P (x)
870 && CONST_INT_P (ofs))
873 ? STRICT_REG_OK_FOR_BASE_P (x)
874 : NONSTRICT_REG_OK_FOR_BASE_P (x))
875 && CONSTANT_ADDRESS_P (ofs))
880 /* If we're managing explicit relocations, LO_SUM is valid, as are small
881 data symbols. Avoid explicit relocations of modes larger than word
882 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
883 else if (TARGET_EXPLICIT_RELOCS
884 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
886 if (small_symbolic_operand (x, Pmode))
889 if (GET_CODE (x) == LO_SUM)
891 rtx ofs = XEXP (x, 1);
894 /* Discard non-paradoxical subregs. */
895 if (GET_CODE (x) == SUBREG
896 && (GET_MODE_SIZE (GET_MODE (x))
897 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
900 /* Must have a valid base register. */
903 ? STRICT_REG_OK_FOR_BASE_P (x)
904 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
907 /* The symbol must be local. */
908 if (local_symbolic_operand (ofs, Pmode)
909 || dtp32_symbolic_operand (ofs, Pmode)
910 || tp32_symbolic_operand (ofs, Pmode))
918 /* Build the SYMBOL_REF for __tls_get_addr. */
920 static GTY(()) rtx tls_get_addr_libfunc;
923 get_tls_get_addr (void)
925 if (!tls_get_addr_libfunc)
926 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
927 return tls_get_addr_libfunc;
930 /* Try machine-dependent ways of modifying an illegitimate address
931 to be legitimate. If we find one, return the new, valid address. */
934 alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
936 HOST_WIDE_INT addend;
938 /* If the address is (plus reg const_int) and the CONST_INT is not a
939 valid offset, compute the high part of the constant and add it to
940 the register. Then our address is (plus temp low-part-const). */
941 if (GET_CODE (x) == PLUS
942 && REG_P (XEXP (x, 0))
943 && CONST_INT_P (XEXP (x, 1))
944 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
946 addend = INTVAL (XEXP (x, 1));
951 /* If the address is (const (plus FOO const_int)), find the low-order
952 part of the CONST_INT. Then load FOO plus any high-order part of the
953 CONST_INT into a register. Our address is (plus reg low-part-const).
954 This is done to reduce the number of GOT entries. */
955 if (can_create_pseudo_p ()
956 && GET_CODE (x) == CONST
957 && GET_CODE (XEXP (x, 0)) == PLUS
958 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
960 addend = INTVAL (XEXP (XEXP (x, 0), 1));
961 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
965 /* If we have a (plus reg const), emit the load as in (2), then add
966 the two registers, and finally generate (plus reg low-part-const) as
968 if (can_create_pseudo_p ()
969 && GET_CODE (x) == PLUS
970 && REG_P (XEXP (x, 0))
971 && GET_CODE (XEXP (x, 1)) == CONST
972 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
973 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
975 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
976 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
977 XEXP (XEXP (XEXP (x, 1), 0), 0),
978 NULL_RTX, 1, OPTAB_LIB_WIDEN);
982 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
983 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
984 around +/- 32k offset. */
985 if (TARGET_EXPLICIT_RELOCS
986 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
987 && symbolic_operand (x, Pmode))
989 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
991 switch (tls_symbolic_operand_type (x))
996 case TLS_MODEL_GLOBAL_DYNAMIC:
999 r0 = gen_rtx_REG (Pmode, 0);
1000 r16 = gen_rtx_REG (Pmode, 16);
1001 tga = get_tls_get_addr ();
1002 dest = gen_reg_rtx (Pmode);
1003 seq = GEN_INT (alpha_next_sequence_number++);
1005 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1006 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1007 insn = emit_call_insn (insn);
1008 RTL_CONST_CALL_P (insn) = 1;
1009 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1011 insn = get_insns ();
1014 emit_libcall_block (insn, dest, r0, x);
1017 case TLS_MODEL_LOCAL_DYNAMIC:
1020 r0 = gen_rtx_REG (Pmode, 0);
1021 r16 = gen_rtx_REG (Pmode, 16);
1022 tga = get_tls_get_addr ();
1023 scratch = gen_reg_rtx (Pmode);
1024 seq = GEN_INT (alpha_next_sequence_number++);
1026 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1027 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1028 insn = emit_call_insn (insn);
1029 RTL_CONST_CALL_P (insn) = 1;
1030 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1032 insn = get_insns ();
1035 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1036 UNSPEC_TLSLDM_CALL);
1037 emit_libcall_block (insn, scratch, r0, eqv);
1039 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1040 eqv = gen_rtx_CONST (Pmode, eqv);
1042 if (alpha_tls_size == 64)
1044 dest = gen_reg_rtx (Pmode);
1045 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1046 emit_insn (gen_adddi3 (dest, dest, scratch));
1049 if (alpha_tls_size == 32)
1051 insn = gen_rtx_HIGH (Pmode, eqv);
1052 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1053 scratch = gen_reg_rtx (Pmode);
1054 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1056 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1058 case TLS_MODEL_INITIAL_EXEC:
1059 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1060 eqv = gen_rtx_CONST (Pmode, eqv);
1061 tp = gen_reg_rtx (Pmode);
1062 scratch = gen_reg_rtx (Pmode);
1063 dest = gen_reg_rtx (Pmode);
1065 emit_insn (gen_load_tp (tp));
1066 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1067 emit_insn (gen_adddi3 (dest, tp, scratch));
1070 case TLS_MODEL_LOCAL_EXEC:
1071 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1072 eqv = gen_rtx_CONST (Pmode, eqv);
1073 tp = gen_reg_rtx (Pmode);
1075 emit_insn (gen_load_tp (tp));
1076 if (alpha_tls_size == 32)
1078 insn = gen_rtx_HIGH (Pmode, eqv);
1079 insn = gen_rtx_PLUS (Pmode, tp, insn);
1080 tp = gen_reg_rtx (Pmode);
1081 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1083 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1089 if (local_symbolic_operand (x, Pmode))
1091 if (small_symbolic_operand (x, Pmode))
1095 if (can_create_pseudo_p ())
1096 scratch = gen_reg_rtx (Pmode);
1097 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1098 gen_rtx_HIGH (Pmode, x)));
1099 return gen_rtx_LO_SUM (Pmode, scratch, x);
1108 HOST_WIDE_INT low, high;
1110 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1112 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1116 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1117 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1118 1, OPTAB_LIB_WIDEN);
1120 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1121 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1122 1, OPTAB_LIB_WIDEN);
1124 return plus_constant (x, low);
1129 /* Try machine-dependent ways of modifying an illegitimate address
1130 to be legitimate. Return X or the new, valid address. */
1133 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1134 enum machine_mode mode)
1136 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1137 return new_x ? new_x : x;
1140 /* Primarily this is required for TLS symbols, but given that our move
1141 patterns *ought* to be able to handle any symbol at any time, we
1142 should never be spilling symbolic operands to the constant pool, ever. */
1145 alpha_cannot_force_const_mem (rtx x)
1147 enum rtx_code code = GET_CODE (x);
1148 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1151 /* We do not allow indirect calls to be optimized into sibling calls, nor
1152 can we allow a call to a function with a different GP to be optimized
1156 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1158 /* Can't do indirect tail calls, since we don't know if the target
1159 uses the same GP. */
1163 /* Otherwise, we can make a tail call if the target function shares
1165 return decl_has_samegp (decl);
1169 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1173 /* Don't re-split. */
1174 if (GET_CODE (x) == LO_SUM)
1177 return small_symbolic_operand (x, Pmode) != 0;
1181 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1185 /* Don't re-split. */
1186 if (GET_CODE (x) == LO_SUM)
1189 if (small_symbolic_operand (x, Pmode))
1191 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1200 split_small_symbolic_operand (rtx x)
1203 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1207 /* Indicate that INSN cannot be duplicated. This is true for any insn
1208 that we've marked with gpdisp relocs, since those have to stay in
1209 1-1 correspondence with one another.
1211 Technically we could copy them if we could set up a mapping from one
1212 sequence number to another, across the set of insns to be duplicated.
1213 This seems overly complicated and error-prone since interblock motion
1214 from sched-ebb could move one of the pair of insns to a different block.
1216 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1217 then they'll be in a different block from their ldgp. Which could lead
1218 the bb reorder code to think that it would be ok to copy just the block
1219 containing the call and branch to the block containing the ldgp. */
1222 alpha_cannot_copy_insn_p (rtx insn)
1224 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1226 if (recog_memoized (insn) >= 0)
1227 return get_attr_cannot_copy (insn);
1233 /* Try a machine-dependent way of reloading an illegitimate address
1234 operand. If we find one, push the reload and return the new rtx. */
1237 alpha_legitimize_reload_address (rtx x,
1238 enum machine_mode mode ATTRIBUTE_UNUSED,
1239 int opnum, int type,
1240 int ind_levels ATTRIBUTE_UNUSED)
1242 /* We must recognize output that we have already generated ourselves. */
1243 if (GET_CODE (x) == PLUS
1244 && GET_CODE (XEXP (x, 0)) == PLUS
1245 && REG_P (XEXP (XEXP (x, 0), 0))
1246 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1247 && CONST_INT_P (XEXP (x, 1)))
1249 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1250 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1251 opnum, (enum reload_type) type);
1255 /* We wish to handle large displacements off a base register by
1256 splitting the addend across an ldah and the mem insn. This
1257 cuts number of extra insns needed from 3 to 1. */
1258 if (GET_CODE (x) == PLUS
1259 && REG_P (XEXP (x, 0))
1260 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1261 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1262 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1264 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1265 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1267 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1269 /* Check for 32-bit overflow. */
1270 if (high + low != val)
1273 /* Reload the high part into a base reg; leave the low part
1274 in the mem directly. */
1275 x = gen_rtx_PLUS (GET_MODE (x),
1276 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1280 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1281 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1282 opnum, (enum reload_type) type);
1289 /* Compute a (partial) cost for rtx X. Return true if the complete
1290 cost has been computed, and false if subexpressions should be
1291 scanned. In either case, *TOTAL contains the cost result. */
1294 alpha_rtx_costs (rtx x, int code, int outer_code, int *total,
1297 enum machine_mode mode = GET_MODE (x);
1298 bool float_mode_p = FLOAT_MODE_P (mode);
1299 const struct alpha_rtx_cost_data *cost_data;
1302 cost_data = &alpha_rtx_cost_size;
1304 cost_data = &alpha_rtx_cost_data[alpha_tune];
1309 /* If this is an 8-bit constant, return zero since it can be used
1310 nearly anywhere with no cost. If it is a valid operand for an
1311 ADD or AND, likewise return 0 if we know it will be used in that
1312 context. Otherwise, return 2 since it might be used there later.
1313 All other constants take at least two insns. */
1314 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1322 if (x == CONST0_RTX (mode))
1324 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1325 || (outer_code == AND && and_operand (x, VOIDmode)))
1327 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1330 *total = COSTS_N_INSNS (2);
1336 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1337 *total = COSTS_N_INSNS (outer_code != MEM);
1338 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1339 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1340 else if (tls_symbolic_operand_type (x))
1341 /* Estimate of cost for call_pal rduniq. */
1342 /* ??? How many insns do we emit here? More than one... */
1343 *total = COSTS_N_INSNS (15);
1345 /* Otherwise we do a load from the GOT. */
1346 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1350 /* This is effectively an add_operand. */
1357 *total = cost_data->fp_add;
1358 else if (GET_CODE (XEXP (x, 0)) == MULT
1359 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1361 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1362 (enum rtx_code) outer_code, speed)
1363 + rtx_cost (XEXP (x, 1),
1364 (enum rtx_code) outer_code, speed)
1365 + COSTS_N_INSNS (1));
1372 *total = cost_data->fp_mult;
1373 else if (mode == DImode)
1374 *total = cost_data->int_mult_di;
1376 *total = cost_data->int_mult_si;
1380 if (CONST_INT_P (XEXP (x, 1))
1381 && INTVAL (XEXP (x, 1)) <= 3)
1383 *total = COSTS_N_INSNS (1);
1390 *total = cost_data->int_shift;
1395 *total = cost_data->fp_add;
1397 *total = cost_data->int_cmov;
1405 *total = cost_data->int_div;
1406 else if (mode == SFmode)
1407 *total = cost_data->fp_div_sf;
1409 *total = cost_data->fp_div_df;
1413 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1419 *total = COSTS_N_INSNS (1);
1427 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1433 case UNSIGNED_FLOAT:
1436 case FLOAT_TRUNCATE:
1437 *total = cost_data->fp_add;
1441 if (MEM_P (XEXP (x, 0)))
1444 *total = cost_data->fp_add;
1452 /* REF is an alignable memory location. Place an aligned SImode
1453 reference into *PALIGNED_MEM and the number of bits to shift into
1454 *PBITNUM. SCRATCH is a free register for use in reloading out
1455 of range stack slots. */
1458 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1461 HOST_WIDE_INT disp, offset;
1463 gcc_assert (MEM_P (ref));
1465 if (reload_in_progress
1466 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1468 base = find_replacement (&XEXP (ref, 0));
1469 gcc_assert (memory_address_p (GET_MODE (ref), base));
1472 base = XEXP (ref, 0);
1474 if (GET_CODE (base) == PLUS)
1475 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1479 /* Find the byte offset within an aligned word. If the memory itself is
1480 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1481 will have examined the base register and determined it is aligned, and
1482 thus displacements from it are naturally alignable. */
1483 if (MEM_ALIGN (ref) >= 32)
1488 /* The location should not cross aligned word boundary. */
1489 gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1490 <= GET_MODE_SIZE (SImode));
1492 /* Access the entire aligned word. */
1493 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1495 /* Convert the byte offset within the word to a bit offset. */
1496 if (WORDS_BIG_ENDIAN)
1497 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1500 *pbitnum = GEN_INT (offset);
1503 /* Similar, but just get the address. Handle the two reload cases.
1504 Add EXTRA_OFFSET to the address we return. */
1507 get_unaligned_address (rtx ref)
1510 HOST_WIDE_INT offset = 0;
1512 gcc_assert (MEM_P (ref));
1514 if (reload_in_progress
1515 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1517 base = find_replacement (&XEXP (ref, 0));
1519 gcc_assert (memory_address_p (GET_MODE (ref), base));
1522 base = XEXP (ref, 0);
1524 if (GET_CODE (base) == PLUS)
1525 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1527 return plus_constant (base, offset);
1530 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1531 X is always returned in a register. */
1534 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1536 if (GET_CODE (addr) == PLUS)
1538 ofs += INTVAL (XEXP (addr, 1));
1539 addr = XEXP (addr, 0);
1542 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1543 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1546 /* On the Alpha, all (non-symbolic) constants except zero go into
1547 a floating-point register via memory. Note that we cannot
1548 return anything that is not a subset of RCLASS, and that some
1549 symbolic constants cannot be dropped to memory. */
1552 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1554 /* Zero is present in any register class. */
1555 if (x == CONST0_RTX (GET_MODE (x)))
1558 /* These sorts of constants we can easily drop to memory. */
1560 || GET_CODE (x) == CONST_DOUBLE
1561 || GET_CODE (x) == CONST_VECTOR)
1563 if (rclass == FLOAT_REGS)
1565 if (rclass == ALL_REGS)
1566 return GENERAL_REGS;
1570 /* All other kinds of constants should not (and in the case of HIGH
1571 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1572 secondary reload. */
1574 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1579 /* Inform reload about cases where moving X with a mode MODE to a register in
1580 RCLASS requires an extra scratch or immediate register. Return the class
1581 needed for the immediate register. */
1584 alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1585 enum machine_mode mode, secondary_reload_info *sri)
1587 enum reg_class rclass = (enum reg_class) rclass_i;
1589 /* Loading and storing HImode or QImode values to and from memory
1590 usually requires a scratch register. */
1591 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1593 if (any_memory_operand (x, mode))
1597 if (!aligned_memory_operand (x, mode))
1598 sri->icode = direct_optab_handler (reload_in_optab, mode);
1601 sri->icode = direct_optab_handler (reload_out_optab, mode);
1606 /* We also cannot do integral arithmetic into FP regs, as might result
1607 from register elimination into a DImode fp register. */
1608 if (rclass == FLOAT_REGS)
1610 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1611 return GENERAL_REGS;
1612 if (in_p && INTEGRAL_MODE_P (mode)
1613 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1614 return GENERAL_REGS;
1620 /* Subfunction of the following function. Update the flags of any MEM
1621 found in part of X. */
1624 alpha_set_memflags_1 (rtx *xp, void *data)
1626 rtx x = *xp, orig = (rtx) data;
1631 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1632 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1633 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1634 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1635 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1637 /* Sadly, we cannot use alias sets because the extra aliasing
1638 produced by the AND interferes. Given that two-byte quantities
1639 are the only thing we would be able to differentiate anyway,
1640 there does not seem to be any point in convoluting the early
1641 out of the alias check. */
1646 /* Given SEQ, which is an INSN list, look for any MEMs in either
1647 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1648 volatile flags from REF into each of the MEMs found. If REF is not
1649 a MEM, don't do anything. */
1652 alpha_set_memflags (rtx seq, rtx ref)
1659 /* This is only called from alpha.md, after having had something
1660 generated from one of the insn patterns. So if everything is
1661 zero, the pattern is already up-to-date. */
1662 if (!MEM_VOLATILE_P (ref)
1663 && !MEM_IN_STRUCT_P (ref)
1664 && !MEM_SCALAR_P (ref)
1665 && !MEM_NOTRAP_P (ref)
1666 && !MEM_READONLY_P (ref))
1669 for (insn = seq; insn; insn = NEXT_INSN (insn))
1671 for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1676 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1679 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1680 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1681 and return pc_rtx if successful. */
1684 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1685 HOST_WIDE_INT c, int n, bool no_output)
1687 HOST_WIDE_INT new_const;
1689 /* Use a pseudo if highly optimizing and still generating RTL. */
1691 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1694 /* If this is a sign-extended 32-bit constant, we can do this in at most
1695 three insns, so do it if we have enough insns left. We always have
1696 a sign-extended 32-bit constant when compiling on a narrow machine. */
1698 if (HOST_BITS_PER_WIDE_INT != 64
1699 || c >> 31 == -1 || c >> 31 == 0)
1701 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1702 HOST_WIDE_INT tmp1 = c - low;
1703 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1704 HOST_WIDE_INT extra = 0;
1706 /* If HIGH will be interpreted as negative but the constant is
1707 positive, we must adjust it to do two ldha insns. */
1709 if ((high & 0x8000) != 0 && c >= 0)
1713 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1716 if (c == low || (low == 0 && extra == 0))
1718 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1719 but that meant that we can't handle INT_MIN on 32-bit machines
1720 (like NT/Alpha), because we recurse indefinitely through
1721 emit_move_insn to gen_movdi. So instead, since we know exactly
1722 what we want, create it explicitly. */
1727 target = gen_reg_rtx (mode);
1728 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1731 else if (n >= 2 + (extra != 0))
1735 if (!can_create_pseudo_p ())
1737 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1741 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1744 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1745 This means that if we go through expand_binop, we'll try to
1746 generate extensions, etc, which will require new pseudos, which
1747 will fail during some split phases. The SImode add patterns
1748 still exist, but are not named. So build the insns by hand. */
1753 subtarget = gen_reg_rtx (mode);
1754 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1755 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1761 target = gen_reg_rtx (mode);
1762 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1763 insn = gen_rtx_SET (VOIDmode, target, insn);
1769 /* If we couldn't do it that way, try some other methods. But if we have
1770 no instructions left, don't bother. Likewise, if this is SImode and
1771 we can't make pseudos, we can't do anything since the expand_binop
1772 and expand_unop calls will widen and try to make pseudos. */
1774 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1777 /* Next, see if we can load a related constant and then shift and possibly
1778 negate it to get the constant we want. Try this once each increasing
1779 numbers of insns. */
1781 for (i = 1; i < n; i++)
1783 /* First, see if minus some low bits, we've an easy load of
1786 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1789 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1794 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1795 target, 0, OPTAB_WIDEN);
1799 /* Next try complementing. */
1800 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1805 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1808 /* Next try to form a constant and do a left shift. We can do this
1809 if some low-order bits are zero; the exact_log2 call below tells
1810 us that information. The bits we are shifting out could be any
1811 value, but here we'll just try the 0- and sign-extended forms of
1812 the constant. To try to increase the chance of having the same
1813 constant in more than one insn, start at the highest number of
1814 bits to shift, but try all possibilities in case a ZAPNOT will
1817 bits = exact_log2 (c & -c);
1819 for (; bits > 0; bits--)
1821 new_const = c >> bits;
1822 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1825 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1826 temp = alpha_emit_set_const (subtarget, mode, new_const,
1833 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1834 target, 0, OPTAB_WIDEN);
1838 /* Now try high-order zero bits. Here we try the shifted-in bits as
1839 all zero and all ones. Be careful to avoid shifting outside the
1840 mode and to avoid shifting outside the host wide int size. */
1841 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1842 confuse the recursive call and set all of the high 32 bits. */
1844 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1845 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1847 for (; bits > 0; bits--)
1849 new_const = c << bits;
1850 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1853 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1854 temp = alpha_emit_set_const (subtarget, mode, new_const,
1861 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1862 target, 1, OPTAB_WIDEN);
1866 /* Now try high-order 1 bits. We get that with a sign-extension.
1867 But one bit isn't enough here. Be careful to avoid shifting outside
1868 the mode and to avoid shifting outside the host wide int size. */
1870 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1871 - floor_log2 (~ c) - 2);
1873 for (; bits > 0; bits--)
1875 new_const = c << bits;
1876 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1879 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1880 temp = alpha_emit_set_const (subtarget, mode, new_const,
1887 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1888 target, 0, OPTAB_WIDEN);
1893 #if HOST_BITS_PER_WIDE_INT == 64
1894 /* Finally, see if can load a value into the target that is the same as the
1895 constant except that all bytes that are 0 are changed to be 0xff. If we
1896 can, then we can do a ZAPNOT to obtain the desired constant. */
1899 for (i = 0; i < 64; i += 8)
1900 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1901 new_const |= (HOST_WIDE_INT) 0xff << i;
1903 /* We are only called for SImode and DImode. If this is SImode, ensure that
1904 we are sign extended to a full word. */
1907 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1911 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1916 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1917 target, 0, OPTAB_WIDEN);
1925 /* Try to output insns to set TARGET equal to the constant C if it can be
1926 done in less than N insns. Do all computations in MODE. Returns the place
1927 where the output has been placed if it can be done and the insns have been
1928 emitted. If it would take more than N insns, zero is returned and no
1929 insns and emitted. */
1932 alpha_emit_set_const (rtx target, enum machine_mode mode,
1933 HOST_WIDE_INT c, int n, bool no_output)
1935 enum machine_mode orig_mode = mode;
1936 rtx orig_target = target;
1940 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1941 can't load this constant in one insn, do this in DImode. */
1942 if (!can_create_pseudo_p () && mode == SImode
1943 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1945 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1949 target = no_output ? NULL : gen_lowpart (DImode, target);
1952 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1954 target = no_output ? NULL : gen_lowpart (DImode, target);
1958 /* Try 1 insn, then 2, then up to N. */
1959 for (i = 1; i <= n; i++)
1961 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1969 insn = get_last_insn ();
1970 set = single_set (insn);
1971 if (! CONSTANT_P (SET_SRC (set)))
1972 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1977 /* Allow for the case where we changed the mode of TARGET. */
1980 if (result == target)
1981 result = orig_target;
1982 else if (mode != orig_mode)
1983 result = gen_lowpart (orig_mode, result);
1989 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1990 fall back to a straight forward decomposition. We do this to avoid
1991 exponential run times encountered when looking for longer sequences
1992 with alpha_emit_set_const. */
1995 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1997 HOST_WIDE_INT d1, d2, d3, d4;
1999 /* Decompose the entire word */
2000 #if HOST_BITS_PER_WIDE_INT >= 64
2001 gcc_assert (c2 == -(c1 < 0));
2002 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2004 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2005 c1 = (c1 - d2) >> 32;
2006 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2008 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2009 gcc_assert (c1 == d4);
2011 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2013 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2014 gcc_assert (c1 == d2);
2016 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2018 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2019 gcc_assert (c2 == d4);
2022 /* Construct the high word */
2025 emit_move_insn (target, GEN_INT (d4));
2027 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2030 emit_move_insn (target, GEN_INT (d3));
2032 /* Shift it into place */
2033 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2035 /* Add in the low bits. */
2037 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2039 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2044 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2048 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2050 HOST_WIDE_INT i0, i1;
2052 if (GET_CODE (x) == CONST_VECTOR)
2053 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2056 if (CONST_INT_P (x))
2061 else if (HOST_BITS_PER_WIDE_INT >= 64)
2063 i0 = CONST_DOUBLE_LOW (x);
2068 i0 = CONST_DOUBLE_LOW (x);
2069 i1 = CONST_DOUBLE_HIGH (x);
2076 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2077 are willing to load the value into a register via a move pattern.
2078 Normally this is all symbolic constants, integral constants that
2079 take three or fewer instructions, and floating-point zero. */
2082 alpha_legitimate_constant_p (rtx x)
2084 enum machine_mode mode = GET_MODE (x);
2085 HOST_WIDE_INT i0, i1;
2087 switch (GET_CODE (x))
2094 if (GET_CODE (XEXP (x, 0)) == PLUS
2095 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2096 x = XEXP (XEXP (x, 0), 0);
2100 if (GET_CODE (x) != SYMBOL_REF)
2106 /* TLS symbols are never valid. */
2107 return SYMBOL_REF_TLS_MODEL (x) == 0;
2110 if (x == CONST0_RTX (mode))
2112 if (FLOAT_MODE_P (mode))
2117 if (x == CONST0_RTX (mode))
2119 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2121 if (GET_MODE_SIZE (mode) != 8)
2127 if (TARGET_BUILD_CONSTANTS)
2129 alpha_extract_integer (x, &i0, &i1);
2130 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2131 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2139 /* Operand 1 is known to be a constant, and should require more than one
2140 instruction to load. Emit that multi-part load. */
2143 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2145 HOST_WIDE_INT i0, i1;
2146 rtx temp = NULL_RTX;
2148 alpha_extract_integer (operands[1], &i0, &i1);
2150 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2151 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2153 if (!temp && TARGET_BUILD_CONSTANTS)
2154 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2158 if (!rtx_equal_p (operands[0], temp))
2159 emit_move_insn (operands[0], temp);
2166 /* Expand a move instruction; return true if all work is done.
2167 We don't handle non-bwx subword loads here. */
2170 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2174 /* If the output is not a register, the input must be. */
2175 if (MEM_P (operands[0])
2176 && ! reg_or_0_operand (operands[1], mode))
2177 operands[1] = force_reg (mode, operands[1]);
2179 /* Allow legitimize_address to perform some simplifications. */
2180 if (mode == Pmode && symbolic_operand (operands[1], mode))
2182 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2185 if (tmp == operands[0])
2192 /* Early out for non-constants and valid constants. */
2193 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2196 /* Split large integers. */
2197 if (CONST_INT_P (operands[1])
2198 || GET_CODE (operands[1]) == CONST_DOUBLE
2199 || GET_CODE (operands[1]) == CONST_VECTOR)
2201 if (alpha_split_const_mov (mode, operands))
2205 /* Otherwise we've nothing left but to drop the thing to memory. */
2206 tmp = force_const_mem (mode, operands[1]);
2208 if (tmp == NULL_RTX)
2211 if (reload_in_progress)
2213 emit_move_insn (operands[0], XEXP (tmp, 0));
2214 operands[1] = replace_equiv_address (tmp, operands[0]);
2217 operands[1] = validize_mem (tmp);
2221 /* Expand a non-bwx QImode or HImode move instruction;
2222 return true if all work is done. */
2225 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2229 /* If the output is not a register, the input must be. */
2230 if (MEM_P (operands[0]))
2231 operands[1] = force_reg (mode, operands[1]);
2233 /* Handle four memory cases, unaligned and aligned for either the input
2234 or the output. The only case where we can be called during reload is
2235 for aligned loads; all other cases require temporaries. */
2237 if (any_memory_operand (operands[1], mode))
2239 if (aligned_memory_operand (operands[1], mode))
2241 if (reload_in_progress)
2244 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2246 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2251 rtx aligned_mem, bitnum;
2252 rtx scratch = gen_reg_rtx (SImode);
2256 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2258 subtarget = operands[0];
2259 if (REG_P (subtarget))
2260 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2262 subtarget = gen_reg_rtx (DImode), copyout = true;
2265 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2268 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2273 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2278 /* Don't pass these as parameters since that makes the generated
2279 code depend on parameter evaluation order which will cause
2280 bootstrap failures. */
2282 rtx temp1, temp2, subtarget, ua;
2285 temp1 = gen_reg_rtx (DImode);
2286 temp2 = gen_reg_rtx (DImode);
2288 subtarget = operands[0];
2289 if (REG_P (subtarget))
2290 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2292 subtarget = gen_reg_rtx (DImode), copyout = true;
2294 ua = get_unaligned_address (operands[1]);
2296 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2298 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2300 alpha_set_memflags (seq, operands[1]);
2304 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2309 if (any_memory_operand (operands[0], mode))
2311 if (aligned_memory_operand (operands[0], mode))
2313 rtx aligned_mem, bitnum;
2314 rtx temp1 = gen_reg_rtx (SImode);
2315 rtx temp2 = gen_reg_rtx (SImode);
2317 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2319 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2324 rtx temp1 = gen_reg_rtx (DImode);
2325 rtx temp2 = gen_reg_rtx (DImode);
2326 rtx temp3 = gen_reg_rtx (DImode);
2327 rtx ua = get_unaligned_address (operands[0]);
2330 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2332 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2334 alpha_set_memflags (seq, operands[0]);
2343 /* Implement the movmisalign patterns. One of the operands is a memory
2344 that is not naturally aligned. Emit instructions to load it. */
2347 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2349 /* Honor misaligned loads, for those we promised to do so. */
2350 if (MEM_P (operands[1]))
2354 if (register_operand (operands[0], mode))
2357 tmp = gen_reg_rtx (mode);
2359 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2360 if (tmp != operands[0])
2361 emit_move_insn (operands[0], tmp);
2363 else if (MEM_P (operands[0]))
2365 if (!reg_or_0_operand (operands[1], mode))
2366 operands[1] = force_reg (mode, operands[1]);
2367 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2373 /* Generate an unsigned DImode to FP conversion. This is the same code
2374 optabs would emit if we didn't have TFmode patterns.
2376 For SFmode, this is the only construction I've found that can pass
2377 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2378 intermediates will work, because you'll get intermediate rounding
2379 that ruins the end result. Some of this could be fixed by turning
2380 on round-to-positive-infinity, but that requires diddling the fpsr,
2381 which kills performance. I tried turning this around and converting
2382 to a negative number, so that I could turn on /m, but either I did
2383 it wrong or there's something else cause I wound up with the exact
2384 same single-bit error. There is a branch-less form of this same code:
2395 fcmoveq $f10,$f11,$f0
2397 I'm not using it because it's the same number of instructions as
2398 this branch-full form, and it has more serialized long latency
2399 instructions on the critical path.
2401 For DFmode, we can avoid rounding errors by breaking up the word
2402 into two pieces, converting them separately, and adding them back:
2404 LC0: .long 0,0x5f800000
2409 cpyse $f11,$f31,$f10
2410 cpyse $f31,$f11,$f11
2418 This doesn't seem to be a clear-cut win over the optabs form.
2419 It probably all depends on the distribution of numbers being
2420 converted -- in the optabs form, all but high-bit-set has a
2421 much lower minimum execution time. */
2424 alpha_emit_floatuns (rtx operands[2])
2426 rtx neglab, donelab, i0, i1, f0, in, out;
2427 enum machine_mode mode;
2430 in = force_reg (DImode, operands[1]);
2431 mode = GET_MODE (out);
2432 neglab = gen_label_rtx ();
2433 donelab = gen_label_rtx ();
2434 i0 = gen_reg_rtx (DImode);
2435 i1 = gen_reg_rtx (DImode);
2436 f0 = gen_reg_rtx (mode);
2438 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2440 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2441 emit_jump_insn (gen_jump (donelab));
2444 emit_label (neglab);
2446 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2447 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2448 emit_insn (gen_iordi3 (i0, i0, i1));
2449 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2450 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2452 emit_label (donelab);
2455 /* Generate the comparison for a conditional branch. */
2458 alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
2460 enum rtx_code cmp_code, branch_code;
2461 enum machine_mode branch_mode = VOIDmode;
2462 enum rtx_code code = GET_CODE (operands[0]);
2463 rtx op0 = operands[1], op1 = operands[2];
2466 if (cmp_mode == TFmode)
2468 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2473 /* The general case: fold the comparison code to the types of compares
2474 that we have, choosing the branch as necessary. */
2477 case EQ: case LE: case LT: case LEU: case LTU:
2479 /* We have these compares: */
2480 cmp_code = code, branch_code = NE;
2485 /* These must be reversed. */
2486 cmp_code = reverse_condition (code), branch_code = EQ;
2489 case GE: case GT: case GEU: case GTU:
2490 /* For FP, we swap them, for INT, we reverse them. */
2491 if (cmp_mode == DFmode)
2493 cmp_code = swap_condition (code);
2495 tem = op0, op0 = op1, op1 = tem;
2499 cmp_code = reverse_condition (code);
2508 if (cmp_mode == DFmode)
2510 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2512 /* When we are not as concerned about non-finite values, and we
2513 are comparing against zero, we can branch directly. */
2514 if (op1 == CONST0_RTX (DFmode))
2515 cmp_code = UNKNOWN, branch_code = code;
2516 else if (op0 == CONST0_RTX (DFmode))
2518 /* Undo the swap we probably did just above. */
2519 tem = op0, op0 = op1, op1 = tem;
2520 branch_code = swap_condition (cmp_code);
2526 /* ??? We mark the branch mode to be CCmode to prevent the
2527 compare and branch from being combined, since the compare
2528 insn follows IEEE rules that the branch does not. */
2529 branch_mode = CCmode;
2534 /* The following optimizations are only for signed compares. */
2535 if (code != LEU && code != LTU && code != GEU && code != GTU)
2537 /* Whee. Compare and branch against 0 directly. */
2538 if (op1 == const0_rtx)
2539 cmp_code = UNKNOWN, branch_code = code;
2541 /* If the constants doesn't fit into an immediate, but can
2542 be generated by lda/ldah, we adjust the argument and
2543 compare against zero, so we can use beq/bne directly. */
2544 /* ??? Don't do this when comparing against symbols, otherwise
2545 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2546 be declared false out of hand (at least for non-weak). */
2547 else if (CONST_INT_P (op1)
2548 && (code == EQ || code == NE)
2549 && !(symbolic_operand (op0, VOIDmode)
2550 || (REG_P (op0) && REG_POINTER (op0))))
2552 rtx n_op1 = GEN_INT (-INTVAL (op1));
2554 if (! satisfies_constraint_I (op1)
2555 && (satisfies_constraint_K (n_op1)
2556 || satisfies_constraint_L (n_op1)))
2557 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2561 if (!reg_or_0_operand (op0, DImode))
2562 op0 = force_reg (DImode, op0);
2563 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2564 op1 = force_reg (DImode, op1);
2567 /* Emit an initial compare instruction, if necessary. */
2569 if (cmp_code != UNKNOWN)
2571 tem = gen_reg_rtx (cmp_mode);
2572 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2575 /* Emit the branch instruction. */
2576 tem = gen_rtx_SET (VOIDmode, pc_rtx,
2577 gen_rtx_IF_THEN_ELSE (VOIDmode,
2578 gen_rtx_fmt_ee (branch_code,
2580 CONST0_RTX (cmp_mode)),
2581 gen_rtx_LABEL_REF (VOIDmode,
2584 emit_jump_insn (tem);
2587 /* Certain simplifications can be done to make invalid setcc operations
2588 valid. Return the final comparison, or NULL if we can't work. */
2591 alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
2593 enum rtx_code cmp_code;
2594 enum rtx_code code = GET_CODE (operands[1]);
2595 rtx op0 = operands[2], op1 = operands[3];
2598 if (cmp_mode == TFmode)
2600 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2605 if (cmp_mode == DFmode && !TARGET_FIX)
2608 /* The general case: fold the comparison code to the types of compares
2609 that we have, choosing the branch as necessary. */
2614 case EQ: case LE: case LT: case LEU: case LTU:
2616 /* We have these compares. */
2617 if (cmp_mode == DFmode)
2618 cmp_code = code, code = NE;
2622 if (cmp_mode == DImode && op1 == const0_rtx)
2627 cmp_code = reverse_condition (code);
2631 case GE: case GT: case GEU: case GTU:
2632 /* These normally need swapping, but for integer zero we have
2633 special patterns that recognize swapped operands. */
2634 if (cmp_mode == DImode && op1 == const0_rtx)
2636 code = swap_condition (code);
2637 if (cmp_mode == DFmode)
2638 cmp_code = code, code = NE;
2639 tmp = op0, op0 = op1, op1 = tmp;
2646 if (cmp_mode == DImode)
2648 if (!register_operand (op0, DImode))
2649 op0 = force_reg (DImode, op0);
2650 if (!reg_or_8bit_operand (op1, DImode))
2651 op1 = force_reg (DImode, op1);
2654 /* Emit an initial compare instruction, if necessary. */
2655 if (cmp_code != UNKNOWN)
2657 tmp = gen_reg_rtx (cmp_mode);
2658 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2659 gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2661 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2665 /* Emit the setcc instruction. */
2666 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2667 gen_rtx_fmt_ee (code, DImode, op0, op1)));
2672 /* Rewrite a comparison against zero CMP of the form
2673 (CODE (cc0) (const_int 0)) so it can be written validly in
2674 a conditional move (if_then_else CMP ...).
2675 If both of the operands that set cc0 are nonzero we must emit
2676 an insn to perform the compare (it can't be done within
2677 the conditional move). */
2680 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2682 enum rtx_code code = GET_CODE (cmp);
2683 enum rtx_code cmov_code = NE;
2684 rtx op0 = XEXP (cmp, 0);
2685 rtx op1 = XEXP (cmp, 1);
2686 enum machine_mode cmp_mode
2687 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2688 enum machine_mode cmov_mode = VOIDmode;
2689 int local_fast_math = flag_unsafe_math_optimizations;
2692 if (cmp_mode == TFmode)
2694 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2699 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2701 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2703 enum rtx_code cmp_code;
2708 /* If we have fp<->int register move instructions, do a cmov by
2709 performing the comparison in fp registers, and move the
2710 zero/nonzero value to integer registers, where we can then
2711 use a normal cmov, or vice-versa. */
2715 case EQ: case LE: case LT: case LEU: case LTU:
2716 /* We have these compares. */
2717 cmp_code = code, code = NE;
2721 /* This must be reversed. */
2722 cmp_code = EQ, code = EQ;
2725 case GE: case GT: case GEU: case GTU:
2726 /* These normally need swapping, but for integer zero we have
2727 special patterns that recognize swapped operands. */
2728 if (cmp_mode == DImode && op1 == const0_rtx)
2729 cmp_code = code, code = NE;
2732 cmp_code = swap_condition (code);
2734 tem = op0, op0 = op1, op1 = tem;
2742 tem = gen_reg_rtx (cmp_mode);
2743 emit_insn (gen_rtx_SET (VOIDmode, tem,
2744 gen_rtx_fmt_ee (cmp_code, cmp_mode,
2747 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2748 op0 = gen_lowpart (cmp_mode, tem);
2749 op1 = CONST0_RTX (cmp_mode);
2750 local_fast_math = 1;
2753 /* We may be able to use a conditional move directly.
2754 This avoids emitting spurious compares. */
2755 if (signed_comparison_operator (cmp, VOIDmode)
2756 && (cmp_mode == DImode || local_fast_math)
2757 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2758 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2760 /* We can't put the comparison inside the conditional move;
2761 emit a compare instruction and put that inside the
2762 conditional move. Make sure we emit only comparisons we have;
2763 swap or reverse as necessary. */
2765 if (!can_create_pseudo_p ())
2770 case EQ: case LE: case LT: case LEU: case LTU:
2771 /* We have these compares: */
2775 /* This must be reversed. */
2776 code = reverse_condition (code);
2780 case GE: case GT: case GEU: case GTU:
2781 /* These must be swapped. */
2782 if (op1 != CONST0_RTX (cmp_mode))
2784 code = swap_condition (code);
2785 tem = op0, op0 = op1, op1 = tem;
2793 if (cmp_mode == DImode)
2795 if (!reg_or_0_operand (op0, DImode))
2796 op0 = force_reg (DImode, op0);
2797 if (!reg_or_8bit_operand (op1, DImode))
2798 op1 = force_reg (DImode, op1);
2801 /* ??? We mark the branch mode to be CCmode to prevent the compare
2802 and cmov from being combined, since the compare insn follows IEEE
2803 rules that the cmov does not. */
2804 if (cmp_mode == DFmode && !local_fast_math)
2807 tem = gen_reg_rtx (cmp_mode);
2808 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2809 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2812 /* Simplify a conditional move of two constants into a setcc with
2813 arithmetic. This is done with a splitter since combine would
2814 just undo the work if done during code generation. It also catches
2815 cases we wouldn't have before cse. */
2818 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2819 rtx t_rtx, rtx f_rtx)
2821 HOST_WIDE_INT t, f, diff;
2822 enum machine_mode mode;
2823 rtx target, subtarget, tmp;
2825 mode = GET_MODE (dest);
2830 if (((code == NE || code == EQ) && diff < 0)
2831 || (code == GE || code == GT))
2833 code = reverse_condition (code);
2834 diff = t, t = f, f = diff;
2838 subtarget = target = dest;
2841 target = gen_lowpart (DImode, dest);
2842 if (can_create_pseudo_p ())
2843 subtarget = gen_reg_rtx (DImode);
2847 /* Below, we must be careful to use copy_rtx on target and subtarget
2848 in intermediate insns, as they may be a subreg rtx, which may not
2851 if (f == 0 && exact_log2 (diff) > 0
2852 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2853 viable over a longer latency cmove. On EV5, the E0 slot is a
2854 scarce resource, and on EV4 shift has the same latency as a cmove. */
2855 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2857 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2858 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2860 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2861 GEN_INT (exact_log2 (t)));
2862 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2864 else if (f == 0 && t == -1)
2866 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2867 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2869 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2871 else if (diff == 1 || diff == 4 || diff == 8)
2875 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2876 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2879 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2882 add_op = GEN_INT (f);
2883 if (sext_add_operand (add_op, mode))
2885 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2887 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2888 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2900 /* Look up the function X_floating library function name for the
2903 struct GTY(()) xfloating_op
2905 const enum rtx_code code;
2906 const char *const GTY((skip)) osf_func;
2907 const char *const GTY((skip)) vms_func;
2911 static GTY(()) struct xfloating_op xfloating_ops[] =
2913 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2914 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2915 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2916 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2917 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2918 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2919 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2920 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2921 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2922 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2923 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2924 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2925 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2926 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2927 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2930 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2932 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2933 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2937 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2939 struct xfloating_op *ops = xfloating_ops;
2940 long n = ARRAY_SIZE (xfloating_ops);
2943 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2945 /* How irritating. Nothing to key off for the main table. */
2946 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2949 n = ARRAY_SIZE (vax_cvt_ops);
2952 for (i = 0; i < n; ++i, ++ops)
2953 if (ops->code == code)
2955 rtx func = ops->libcall;
2958 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2959 ? ops->vms_func : ops->osf_func);
2960 ops->libcall = func;
2968 /* Most X_floating operations take the rounding mode as an argument.
2969 Compute that here. */
2972 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2973 enum alpha_fp_rounding_mode round)
2979 case ALPHA_FPRM_NORM:
2982 case ALPHA_FPRM_MINF:
2985 case ALPHA_FPRM_CHOP:
2988 case ALPHA_FPRM_DYN:
2994 /* XXX For reference, round to +inf is mode = 3. */
2997 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3003 /* Emit an X_floating library function call.
3005 Note that these functions do not follow normal calling conventions:
3006 TFmode arguments are passed in two integer registers (as opposed to
3007 indirect); TFmode return values appear in R16+R17.
3009 FUNC is the function to call.
3010 TARGET is where the output belongs.
3011 OPERANDS are the inputs.
3012 NOPERANDS is the count of inputs.
3013 EQUIV is the expression equivalent for the function.
3017 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3018 int noperands, rtx equiv)
3020 rtx usage = NULL_RTX, tmp, reg;
3025 for (i = 0; i < noperands; ++i)
3027 switch (GET_MODE (operands[i]))
3030 reg = gen_rtx_REG (TFmode, regno);
3035 reg = gen_rtx_REG (DFmode, regno + 32);
3040 gcc_assert (CONST_INT_P (operands[i]));
3043 reg = gen_rtx_REG (DImode, regno);
3051 emit_move_insn (reg, operands[i]);
3052 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3055 switch (GET_MODE (target))
3058 reg = gen_rtx_REG (TFmode, 16);
3061 reg = gen_rtx_REG (DFmode, 32);
3064 reg = gen_rtx_REG (DImode, 0);
3070 tmp = gen_rtx_MEM (QImode, func);
3071 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3072 const0_rtx, const0_rtx));
3073 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3074 RTL_CONST_CALL_P (tmp) = 1;
3079 emit_libcall_block (tmp, target, reg, equiv);
3082 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3085 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3089 rtx out_operands[3];
3091 func = alpha_lookup_xfloating_lib_func (code);
3092 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3094 out_operands[0] = operands[1];
3095 out_operands[1] = operands[2];
3096 out_operands[2] = GEN_INT (mode);
3097 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3098 gen_rtx_fmt_ee (code, TFmode, operands[1],
3102 /* Emit an X_floating library function call for a comparison. */
3105 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3107 enum rtx_code cmp_code, res_code;
3108 rtx func, out, operands[2], note;
3110 /* X_floating library comparison functions return
3114 Convert the compare against the raw return value. */
3142 func = alpha_lookup_xfloating_lib_func (cmp_code);
3146 out = gen_reg_rtx (DImode);
3148 /* What's actually returned is -1,0,1, not a proper boolean value,
3149 so use an EXPR_LIST as with a generic libcall instead of a
3150 comparison type expression. */
3151 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3152 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3153 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3154 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3159 /* Emit an X_floating library function call for a conversion. */
3162 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3164 int noperands = 1, mode;
3165 rtx out_operands[2];
3167 enum rtx_code code = orig_code;
3169 if (code == UNSIGNED_FIX)
3172 func = alpha_lookup_xfloating_lib_func (code);
3174 out_operands[0] = operands[1];
3179 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3180 out_operands[1] = GEN_INT (mode);
3183 case FLOAT_TRUNCATE:
3184 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3185 out_operands[1] = GEN_INT (mode);
3192 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3193 gen_rtx_fmt_e (orig_code,
3194 GET_MODE (operands[0]),
3198 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3199 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3200 guarantee that the sequence
3203 is valid. Naturally, output operand ordering is little-endian.
3204 This is used by *movtf_internal and *movti_internal. */
3207 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3210 switch (GET_CODE (operands[1]))
3213 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3214 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3218 operands[3] = adjust_address (operands[1], DImode, 8);
3219 operands[2] = adjust_address (operands[1], DImode, 0);
3224 gcc_assert (operands[1] == CONST0_RTX (mode));
3225 operands[2] = operands[3] = const0_rtx;
3232 switch (GET_CODE (operands[0]))
3235 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3236 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3240 operands[1] = adjust_address (operands[0], DImode, 8);
3241 operands[0] = adjust_address (operands[0], DImode, 0);
3248 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3251 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3252 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3256 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3257 op2 is a register containing the sign bit, operation is the
3258 logical operation to be performed. */
3261 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3263 rtx high_bit = operands[2];
3267 alpha_split_tmode_pair (operands, TFmode, false);
3269 /* Detect three flavors of operand overlap. */
3271 if (rtx_equal_p (operands[0], operands[2]))
3273 else if (rtx_equal_p (operands[1], operands[2]))
3275 if (rtx_equal_p (operands[0], high_bit))
3282 emit_move_insn (operands[0], operands[2]);
3284 /* ??? If the destination overlaps both source tf and high_bit, then
3285 assume source tf is dead in its entirety and use the other half
3286 for a scratch register. Otherwise "scratch" is just the proper
3287 destination register. */
3288 scratch = operands[move < 2 ? 1 : 3];
3290 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3294 emit_move_insn (operands[0], operands[2]);
3296 emit_move_insn (operands[1], scratch);
3300 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3304 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3305 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3306 lda r3,X(r11) lda r3,X+2(r11)
3307 extwl r1,r3,r1 extql r1,r3,r1
3308 extwh r2,r3,r2 extqh r2,r3,r2
3309 or r1.r2.r1 or r1,r2,r1
3312 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3313 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3314 lda r3,X(r11) lda r3,X(r11)
3315 extll r1,r3,r1 extll r1,r3,r1
3316 extlh r2,r3,r2 extlh r2,r3,r2
3317 or r1.r2.r1 addl r1,r2,r1
3319 quad: ldq_u r1,X(r11)
3328 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3329 HOST_WIDE_INT ofs, int sign)
3331 rtx meml, memh, addr, extl, exth, tmp, mema;
3332 enum machine_mode mode;
3334 if (TARGET_BWX && size == 2)
3336 meml = adjust_address (mem, QImode, ofs);
3337 memh = adjust_address (mem, QImode, ofs+1);
3338 if (BYTES_BIG_ENDIAN)
3339 tmp = meml, meml = memh, memh = tmp;
3340 extl = gen_reg_rtx (DImode);
3341 exth = gen_reg_rtx (DImode);
3342 emit_insn (gen_zero_extendqidi2 (extl, meml));
3343 emit_insn (gen_zero_extendqidi2 (exth, memh));
3344 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3345 NULL, 1, OPTAB_LIB_WIDEN);
3346 addr = expand_simple_binop (DImode, IOR, extl, exth,
3347 NULL, 1, OPTAB_LIB_WIDEN);
3349 if (sign && GET_MODE (tgt) != HImode)
3351 addr = gen_lowpart (HImode, addr);
3352 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3356 if (GET_MODE (tgt) != DImode)
3357 addr = gen_lowpart (GET_MODE (tgt), addr);
3358 emit_move_insn (tgt, addr);
3363 meml = gen_reg_rtx (DImode);
3364 memh = gen_reg_rtx (DImode);
3365 addr = gen_reg_rtx (DImode);
3366 extl = gen_reg_rtx (DImode);
3367 exth = gen_reg_rtx (DImode);
3369 mema = XEXP (mem, 0);
3370 if (GET_CODE (mema) == LO_SUM)
3371 mema = force_reg (Pmode, mema);
3373 /* AND addresses cannot be in any alias set, since they may implicitly
3374 alias surrounding code. Ideally we'd have some alias set that
3375 covered all types except those with alignment 8 or higher. */
3377 tmp = change_address (mem, DImode,
3378 gen_rtx_AND (DImode,
3379 plus_constant (mema, ofs),
3381 set_mem_alias_set (tmp, 0);
3382 emit_move_insn (meml, tmp);
3384 tmp = change_address (mem, DImode,
3385 gen_rtx_AND (DImode,
3386 plus_constant (mema, ofs + size - 1),
3388 set_mem_alias_set (tmp, 0);
3389 emit_move_insn (memh, tmp);
3391 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3393 emit_move_insn (addr, plus_constant (mema, -1));
3395 emit_insn (gen_extqh_be (extl, meml, addr));
3396 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3398 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3399 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3400 addr, 1, OPTAB_WIDEN);
3402 else if (sign && size == 2)
3404 emit_move_insn (addr, plus_constant (mema, ofs+2));
3406 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3407 emit_insn (gen_extqh_le (exth, memh, addr));
3409 /* We must use tgt here for the target. Alpha-vms port fails if we use
3410 addr for the target, because addr is marked as a pointer and combine
3411 knows that pointers are always sign-extended 32-bit values. */
3412 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3413 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3414 addr, 1, OPTAB_WIDEN);
3418 if (WORDS_BIG_ENDIAN)
3420 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3424 emit_insn (gen_extwh_be (extl, meml, addr));
3429 emit_insn (gen_extlh_be (extl, meml, addr));
3434 emit_insn (gen_extqh_be (extl, meml, addr));
3441 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3445 emit_move_insn (addr, plus_constant (mema, ofs));
3446 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3450 emit_insn (gen_extwh_le (exth, memh, addr));
3455 emit_insn (gen_extlh_le (exth, memh, addr));
3460 emit_insn (gen_extqh_le (exth, memh, addr));
3469 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3470 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3475 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3478 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3481 alpha_expand_unaligned_store (rtx dst, rtx src,
3482 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3484 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3486 if (TARGET_BWX && size == 2)
3488 if (src != const0_rtx)
3490 dstl = gen_lowpart (QImode, src);
3491 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3492 NULL, 1, OPTAB_LIB_WIDEN);
3493 dsth = gen_lowpart (QImode, dsth);
3496 dstl = dsth = const0_rtx;
3498 meml = adjust_address (dst, QImode, ofs);
3499 memh = adjust_address (dst, QImode, ofs+1);
3500 if (BYTES_BIG_ENDIAN)
3501 addr = meml, meml = memh, memh = addr;
3503 emit_move_insn (meml, dstl);
3504 emit_move_insn (memh, dsth);
3508 dstl = gen_reg_rtx (DImode);
3509 dsth = gen_reg_rtx (DImode);
3510 insl = gen_reg_rtx (DImode);
3511 insh = gen_reg_rtx (DImode);
3513 dsta = XEXP (dst, 0);
3514 if (GET_CODE (dsta) == LO_SUM)
3515 dsta = force_reg (Pmode, dsta);
3517 /* AND addresses cannot be in any alias set, since they may implicitly
3518 alias surrounding code. Ideally we'd have some alias set that
3519 covered all types except those with alignment 8 or higher. */
3521 meml = change_address (dst, DImode,
3522 gen_rtx_AND (DImode,
3523 plus_constant (dsta, ofs),
3525 set_mem_alias_set (meml, 0);
3527 memh = change_address (dst, DImode,
3528 gen_rtx_AND (DImode,
3529 plus_constant (dsta, ofs + size - 1),
3531 set_mem_alias_set (memh, 0);
3533 emit_move_insn (dsth, memh);
3534 emit_move_insn (dstl, meml);
3535 if (WORDS_BIG_ENDIAN)
3537 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3539 if (src != const0_rtx)
3544 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3547 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3550 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3553 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3554 GEN_INT (size*8), addr));
3560 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3564 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3565 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3569 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3573 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3577 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3579 if (src != CONST0_RTX (GET_MODE (src)))
3581 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3582 GEN_INT (size*8), addr));
3587 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3590 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3593 emit_insn (gen_insql_le (insl, gen_lowpart (DImode, src), addr));
3598 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3603 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3607 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3608 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3612 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3617 if (src != CONST0_RTX (GET_MODE (src)))
3619 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3620 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3623 if (WORDS_BIG_ENDIAN)
3625 emit_move_insn (meml, dstl);
3626 emit_move_insn (memh, dsth);
3630 /* Must store high before low for degenerate case of aligned. */
3631 emit_move_insn (memh, dsth);
3632 emit_move_insn (meml, dstl);
3636 /* The block move code tries to maximize speed by separating loads and
3637 stores at the expense of register pressure: we load all of the data
3638 before we store it back out. There are two secondary effects worth
3639 mentioning, that this speeds copying to/from aligned and unaligned
3640 buffers, and that it makes the code significantly easier to write. */
3642 #define MAX_MOVE_WORDS 8
3644 /* Load an integral number of consecutive unaligned quadwords. */
3647 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3648 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3650 rtx const im8 = GEN_INT (-8);
3651 rtx const i64 = GEN_INT (64);
3652 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3653 rtx sreg, areg, tmp, smema;
3656 smema = XEXP (smem, 0);
3657 if (GET_CODE (smema) == LO_SUM)
3658 smema = force_reg (Pmode, smema);
3660 /* Generate all the tmp registers we need. */
3661 for (i = 0; i < words; ++i)
3663 data_regs[i] = out_regs[i];
3664 ext_tmps[i] = gen_reg_rtx (DImode);
3666 data_regs[words] = gen_reg_rtx (DImode);
3669 smem = adjust_address (smem, GET_MODE (smem), ofs);
3671 /* Load up all of the source data. */
3672 for (i = 0; i < words; ++i)
3674 tmp = change_address (smem, DImode,
3675 gen_rtx_AND (DImode,
3676 plus_constant (smema, 8*i),
3678 set_mem_alias_set (tmp, 0);
3679 emit_move_insn (data_regs[i], tmp);
3682 tmp = change_address (smem, DImode,
3683 gen_rtx_AND (DImode,
3684 plus_constant (smema, 8*words - 1),
3686 set_mem_alias_set (tmp, 0);
3687 emit_move_insn (data_regs[words], tmp);
3689 /* Extract the half-word fragments. Unfortunately DEC decided to make
3690 extxh with offset zero a noop instead of zeroing the register, so
3691 we must take care of that edge condition ourselves with cmov. */
3693 sreg = copy_addr_to_reg (smema);
3694 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3696 if (WORDS_BIG_ENDIAN)
3697 emit_move_insn (sreg, plus_constant (sreg, 7));
3698 for (i = 0; i < words; ++i)
3700 if (WORDS_BIG_ENDIAN)
3702 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3703 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3707 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3708 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3710 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3711 gen_rtx_IF_THEN_ELSE (DImode,
3712 gen_rtx_EQ (DImode, areg,
3714 const0_rtx, ext_tmps[i])));
3717 /* Merge the half-words into whole words. */
3718 for (i = 0; i < words; ++i)
3720 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3721 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3725 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3726 may be NULL to store zeros. */
3729 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3730 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3732 rtx const im8 = GEN_INT (-8);
3733 rtx const i64 = GEN_INT (64);
3734 rtx ins_tmps[MAX_MOVE_WORDS];
3735 rtx st_tmp_1, st_tmp_2, dreg;
3736 rtx st_addr_1, st_addr_2, dmema;
3739 dmema = XEXP (dmem, 0);
3740 if (GET_CODE (dmema) == LO_SUM)
3741 dmema = force_reg (Pmode, dmema);
3743 /* Generate all the tmp registers we need. */
3744 if (data_regs != NULL)
3745 for (i = 0; i < words; ++i)
3746 ins_tmps[i] = gen_reg_rtx(DImode);
3747 st_tmp_1 = gen_reg_rtx(DImode);
3748 st_tmp_2 = gen_reg_rtx(DImode);
3751 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3753 st_addr_2 = change_address (dmem, DImode,
3754 gen_rtx_AND (DImode,
3755 plus_constant (dmema, words*8 - 1),
3757 set_mem_alias_set (st_addr_2, 0);
3759 st_addr_1 = change_address (dmem, DImode,
3760 gen_rtx_AND (DImode, dmema, im8));
3761 set_mem_alias_set (st_addr_1, 0);
3763 /* Load up the destination end bits. */
3764 emit_move_insn (st_tmp_2, st_addr_2);
3765 emit_move_insn (st_tmp_1, st_addr_1);
3767 /* Shift the input data into place. */
3768 dreg = copy_addr_to_reg (dmema);
3769 if (WORDS_BIG_ENDIAN)
3770 emit_move_insn (dreg, plus_constant (dreg, 7));
3771 if (data_regs != NULL)
3773 for (i = words-1; i >= 0; --i)
3775 if (WORDS_BIG_ENDIAN)
3777 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3778 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3782 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3783 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3786 for (i = words-1; i > 0; --i)
3788 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3789 ins_tmps[i-1], ins_tmps[i-1], 1,
3794 /* Split and merge the ends with the destination data. */
3795 if (WORDS_BIG_ENDIAN)
3797 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3798 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3802 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3803 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3806 if (data_regs != NULL)
3808 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3809 st_tmp_2, 1, OPTAB_WIDEN);
3810 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3811 st_tmp_1, 1, OPTAB_WIDEN);
3815 if (WORDS_BIG_ENDIAN)
3816 emit_move_insn (st_addr_1, st_tmp_1);
3818 emit_move_insn (st_addr_2, st_tmp_2);
3819 for (i = words-1; i > 0; --i)
3821 rtx tmp = change_address (dmem, DImode,
3822 gen_rtx_AND (DImode,
3823 plus_constant(dmema,
3824 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3826 set_mem_alias_set (tmp, 0);
3827 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3829 if (WORDS_BIG_ENDIAN)
3830 emit_move_insn (st_addr_2, st_tmp_2);
3832 emit_move_insn (st_addr_1, st_tmp_1);
3836 /* Expand string/block move operations.
3838 operands[0] is the pointer to the destination.
3839 operands[1] is the pointer to the source.
3840 operands[2] is the number of bytes to move.
3841 operands[3] is the alignment. */
3844 alpha_expand_block_move (rtx operands[])
3846 rtx bytes_rtx = operands[2];
3847 rtx align_rtx = operands[3];
3848 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3849 HOST_WIDE_INT bytes = orig_bytes;
3850 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3851 HOST_WIDE_INT dst_align = src_align;
3852 rtx orig_src = operands[1];
3853 rtx orig_dst = operands[0];
3854 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3856 unsigned int i, words, ofs, nregs = 0;
3858 if (orig_bytes <= 0)
3860 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3863 /* Look for additional alignment information from recorded register info. */
3865 tmp = XEXP (orig_src, 0);
3867 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3868 else if (GET_CODE (tmp) == PLUS
3869 && REG_P (XEXP (tmp, 0))
3870 && CONST_INT_P (XEXP (tmp, 1)))
3872 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3873 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3877 if (a >= 64 && c % 8 == 0)
3879 else if (a >= 32 && c % 4 == 0)
3881 else if (a >= 16 && c % 2 == 0)
3886 tmp = XEXP (orig_dst, 0);
3888 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3889 else if (GET_CODE (tmp) == PLUS
3890 && REG_P (XEXP (tmp, 0))
3891 && CONST_INT_P (XEXP (tmp, 1)))
3893 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3894 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3898 if (a >= 64 && c % 8 == 0)
3900 else if (a >= 32 && c % 4 == 0)
3902 else if (a >= 16 && c % 2 == 0)
3908 if (src_align >= 64 && bytes >= 8)
3912 for (i = 0; i < words; ++i)
3913 data_regs[nregs + i] = gen_reg_rtx (DImode);
3915 for (i = 0; i < words; ++i)
3916 emit_move_insn (data_regs[nregs + i],
3917 adjust_address (orig_src, DImode, ofs + i * 8));
3924 if (src_align >= 32 && bytes >= 4)
3928 for (i = 0; i < words; ++i)
3929 data_regs[nregs + i] = gen_reg_rtx (SImode);
3931 for (i = 0; i < words; ++i)
3932 emit_move_insn (data_regs[nregs + i],
3933 adjust_address (orig_src, SImode, ofs + i * 4));
3944 for (i = 0; i < words+1; ++i)
3945 data_regs[nregs + i] = gen_reg_rtx (DImode);
3947 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3955 if (! TARGET_BWX && bytes >= 4)
3957 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3958 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3965 if (src_align >= 16)
3968 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3969 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3972 } while (bytes >= 2);
3974 else if (! TARGET_BWX)
3976 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3977 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3985 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3986 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3991 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3993 /* Now save it back out again. */
3997 /* Write out the data in whatever chunks reading the source allowed. */
3998 if (dst_align >= 64)
4000 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4002 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
4009 if (dst_align >= 32)
4011 /* If the source has remaining DImode regs, write them out in
4013 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4015 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4016 NULL_RTX, 1, OPTAB_WIDEN);
4018 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4019 gen_lowpart (SImode, data_regs[i]));
4020 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4021 gen_lowpart (SImode, tmp));
4026 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4028 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4035 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4037 /* Write out a remaining block of words using unaligned methods. */
4039 for (words = 1; i + words < nregs; words++)
4040 if (GET_MODE (data_regs[i + words]) != DImode)
4044 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4046 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4053 /* Due to the above, this won't be aligned. */
4054 /* ??? If we have more than one of these, consider constructing full
4055 words in registers and using alpha_expand_unaligned_store_words. */
4056 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4058 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4063 if (dst_align >= 16)
4064 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4066 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4071 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4073 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4078 /* The remainder must be byte copies. */
4081 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4082 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4091 alpha_expand_block_clear (rtx operands[])
4093 rtx bytes_rtx = operands[1];
4094 rtx align_rtx = operands[3];
4095 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4096 HOST_WIDE_INT bytes = orig_bytes;
4097 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4098 HOST_WIDE_INT alignofs = 0;
4099 rtx orig_dst = operands[0];
4101 int i, words, ofs = 0;
4103 if (orig_bytes <= 0)
4105 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4108 /* Look for stricter alignment. */
4109 tmp = XEXP (orig_dst, 0);
4111 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4112 else if (GET_CODE (tmp) == PLUS
4113 && REG_P (XEXP (tmp, 0))
4114 && CONST_INT_P (XEXP (tmp, 1)))
4116 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4117 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4122 align = a, alignofs = 8 - c % 8;
4124 align = a, alignofs = 4 - c % 4;
4126 align = a, alignofs = 2 - c % 2;
4130 /* Handle an unaligned prefix first. */
4134 #if HOST_BITS_PER_WIDE_INT >= 64
4135 /* Given that alignofs is bounded by align, the only time BWX could
4136 generate three stores is for a 7 byte fill. Prefer two individual
4137 stores over a load/mask/store sequence. */
4138 if ((!TARGET_BWX || alignofs == 7)
4140 && !(alignofs == 4 && bytes >= 4))
4142 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4143 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4147 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4148 set_mem_alias_set (mem, 0);
4150 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4151 if (bytes < alignofs)
4153 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4164 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4165 NULL_RTX, 1, OPTAB_WIDEN);
4167 emit_move_insn (mem, tmp);
4171 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4173 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4178 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4180 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4185 if (alignofs == 4 && bytes >= 4)
4187 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4193 /* If we've not used the extra lead alignment information by now,
4194 we won't be able to. Downgrade align to match what's left over. */
4197 alignofs = alignofs & -alignofs;
4198 align = MIN (align, alignofs * BITS_PER_UNIT);
4202 /* Handle a block of contiguous long-words. */
4204 if (align >= 64 && bytes >= 8)
4208 for (i = 0; i < words; ++i)
4209 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4216 /* If the block is large and appropriately aligned, emit a single
4217 store followed by a sequence of stq_u insns. */
4219 if (align >= 32 && bytes > 16)
4223 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4227 orig_dsta = XEXP (orig_dst, 0);
4228 if (GET_CODE (orig_dsta) == LO_SUM)
4229 orig_dsta = force_reg (Pmode, orig_dsta);
4232 for (i = 0; i < words; ++i)
4235 = change_address (orig_dst, DImode,
4236 gen_rtx_AND (DImode,
4237 plus_constant (orig_dsta, ofs + i*8),
4239 set_mem_alias_set (mem, 0);
4240 emit_move_insn (mem, const0_rtx);
4243 /* Depending on the alignment, the first stq_u may have overlapped
4244 with the initial stl, which means that the last stq_u didn't
4245 write as much as it would appear. Leave those questionable bytes
4247 bytes -= words * 8 - 4;
4248 ofs += words * 8 - 4;
4251 /* Handle a smaller block of aligned words. */
4253 if ((align >= 64 && bytes == 4)
4254 || (align == 32 && bytes >= 4))
4258 for (i = 0; i < words; ++i)
4259 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4266 /* An unaligned block uses stq_u stores for as many as possible. */
4272 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4278 /* Next clean up any trailing pieces. */
4280 #if HOST_BITS_PER_WIDE_INT >= 64
4281 /* Count the number of bits in BYTES for which aligned stores could
4284 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4288 /* If we have appropriate alignment (and it wouldn't take too many
4289 instructions otherwise), mask out the bytes we need. */
4290 if (TARGET_BWX ? words > 2 : bytes > 0)
4297 mem = adjust_address (orig_dst, DImode, ofs);
4298 set_mem_alias_set (mem, 0);
4300 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4302 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4303 NULL_RTX, 1, OPTAB_WIDEN);
4305 emit_move_insn (mem, tmp);
4308 else if (align >= 32 && bytes < 4)
4313 mem = adjust_address (orig_dst, SImode, ofs);
4314 set_mem_alias_set (mem, 0);
4316 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4318 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4319 NULL_RTX, 1, OPTAB_WIDEN);
4321 emit_move_insn (mem, tmp);
4327 if (!TARGET_BWX && bytes >= 4)
4329 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4339 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4343 } while (bytes >= 2);
4345 else if (! TARGET_BWX)
4347 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4355 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4363 /* Returns a mask so that zap(x, value) == x & mask. */
4366 alpha_expand_zap_mask (HOST_WIDE_INT value)
4371 if (HOST_BITS_PER_WIDE_INT >= 64)
4373 HOST_WIDE_INT mask = 0;
4375 for (i = 7; i >= 0; --i)
4378 if (!((value >> i) & 1))
4382 result = gen_int_mode (mask, DImode);
4386 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4388 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4390 for (i = 7; i >= 4; --i)
4393 if (!((value >> i) & 1))
4397 for (i = 3; i >= 0; --i)
4400 if (!((value >> i) & 1))
4404 result = immed_double_const (mask_lo, mask_hi, DImode);
4411 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4412 enum machine_mode mode,
4413 rtx op0, rtx op1, rtx op2)
4415 op0 = gen_lowpart (mode, op0);
4417 if (op1 == const0_rtx)
4418 op1 = CONST0_RTX (mode);
4420 op1 = gen_lowpart (mode, op1);
4422 if (op2 == const0_rtx)
4423 op2 = CONST0_RTX (mode);
4425 op2 = gen_lowpart (mode, op2);
4427 emit_insn ((*gen) (op0, op1, op2));
4430 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4431 COND is true. Mark the jump as unlikely to be taken. */
4434 emit_unlikely_jump (rtx cond, rtx label)
4436 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4439 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4440 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4441 add_reg_note (x, REG_BR_PROB, very_unlikely);
4444 /* A subroutine of the atomic operation splitters. Emit a load-locked
4445 instruction in MODE. */
4448 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4450 rtx (*fn) (rtx, rtx) = NULL;
4452 fn = gen_load_locked_si;
4453 else if (mode == DImode)
4454 fn = gen_load_locked_di;
4455 emit_insn (fn (reg, mem));
4458 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4459 instruction in MODE. */
4462 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4464 rtx (*fn) (rtx, rtx, rtx) = NULL;
4466 fn = gen_store_conditional_si;
4467 else if (mode == DImode)
4468 fn = gen_store_conditional_di;
4469 emit_insn (fn (res, mem, val));
4472 /* A subroutine of the atomic operation splitters. Emit an insxl
4473 instruction in MODE. */
4476 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4478 rtx ret = gen_reg_rtx (DImode);
4479 rtx (*fn) (rtx, rtx, rtx);
4481 if (WORDS_BIG_ENDIAN)
4495 /* The insbl and inswl patterns require a register operand. */
4496 op1 = force_reg (mode, op1);
4497 emit_insn (fn (ret, op1, op2));
4502 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4503 to perform. MEM is the memory on which to operate. VAL is the second
4504 operand of the binary operator. BEFORE and AFTER are optional locations to
4505 return the value of MEM either before of after the operation. SCRATCH is
4506 a scratch register. */
4509 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4510 rtx before, rtx after, rtx scratch)
4512 enum machine_mode mode = GET_MODE (mem);
4513 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4515 emit_insn (gen_memory_barrier ());
4517 label = gen_label_rtx ();
4519 label = gen_rtx_LABEL_REF (DImode, label);
4523 emit_load_locked (mode, before, mem);
4527 x = gen_rtx_AND (mode, before, val);
4528 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4530 x = gen_rtx_NOT (mode, val);
4533 x = gen_rtx_fmt_ee (code, mode, before, val);
4535 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4536 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4538 emit_store_conditional (mode, cond, mem, scratch);
4540 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4541 emit_unlikely_jump (x, label);
4543 emit_insn (gen_memory_barrier ());
4546 /* Expand a compare and swap operation. */
4549 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4552 enum machine_mode mode = GET_MODE (mem);
4553 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4555 emit_insn (gen_memory_barrier ());
4557 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4558 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4559 emit_label (XEXP (label1, 0));
4561 emit_load_locked (mode, retval, mem);
4563 x = gen_lowpart (DImode, retval);
4564 if (oldval == const0_rtx)
4565 x = gen_rtx_NE (DImode, x, const0_rtx);
4568 x = gen_rtx_EQ (DImode, x, oldval);
4569 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4570 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4572 emit_unlikely_jump (x, label2);
4574 emit_move_insn (scratch, newval);
4575 emit_store_conditional (mode, cond, mem, scratch);
4577 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4578 emit_unlikely_jump (x, label1);
4580 emit_insn (gen_memory_barrier ());
4581 emit_label (XEXP (label2, 0));
4585 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4587 enum machine_mode mode = GET_MODE (mem);
4588 rtx addr, align, wdst;
4589 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4591 addr = force_reg (DImode, XEXP (mem, 0));
4592 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4593 NULL_RTX, 1, OPTAB_DIRECT);
4595 oldval = convert_modes (DImode, mode, oldval, 1);
4596 newval = emit_insxl (mode, newval, addr);
4598 wdst = gen_reg_rtx (DImode);
4600 fn5 = gen_sync_compare_and_swapqi_1;
4602 fn5 = gen_sync_compare_and_swaphi_1;
4603 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4605 emit_move_insn (dst, gen_lowpart (mode, wdst));
4609 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4610 rtx oldval, rtx newval, rtx align,
4611 rtx scratch, rtx cond)
4613 rtx label1, label2, mem, width, mask, x;
4615 mem = gen_rtx_MEM (DImode, align);
4616 MEM_VOLATILE_P (mem) = 1;
4618 emit_insn (gen_memory_barrier ());
4619 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4620 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4621 emit_label (XEXP (label1, 0));
4623 emit_load_locked (DImode, scratch, mem);
4625 width = GEN_INT (GET_MODE_BITSIZE (mode));
4626 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4627 if (WORDS_BIG_ENDIAN)
4628 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4630 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4632 if (oldval == const0_rtx)
4633 x = gen_rtx_NE (DImode, dest, const0_rtx);
4636 x = gen_rtx_EQ (DImode, dest, oldval);
4637 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4638 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4640 emit_unlikely_jump (x, label2);
4642 if (WORDS_BIG_ENDIAN)
4643 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4645 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4646 emit_insn (gen_iordi3 (scratch, scratch, newval));
4648 emit_store_conditional (DImode, scratch, mem, scratch);
4650 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4651 emit_unlikely_jump (x, label1);
4653 emit_insn (gen_memory_barrier ());
4654 emit_label (XEXP (label2, 0));
4657 /* Expand an atomic exchange operation. */
4660 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4662 enum machine_mode mode = GET_MODE (mem);
4663 rtx label, x, cond = gen_lowpart (DImode, scratch);
4665 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4666 emit_label (XEXP (label, 0));
4668 emit_load_locked (mode, retval, mem);
4669 emit_move_insn (scratch, val);
4670 emit_store_conditional (mode, cond, mem, scratch);
4672 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4673 emit_unlikely_jump (x, label);
4675 emit_insn (gen_memory_barrier ());
4679 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4681 enum machine_mode mode = GET_MODE (mem);
4682 rtx addr, align, wdst;
4683 rtx (*fn4) (rtx, rtx, rtx, rtx);
4685 /* Force the address into a register. */
4686 addr = force_reg (DImode, XEXP (mem, 0));
4688 /* Align it to a multiple of 8. */
4689 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4690 NULL_RTX, 1, OPTAB_DIRECT);
4692 /* Insert val into the correct byte location within the word. */
4693 val = emit_insxl (mode, val, addr);
4695 wdst = gen_reg_rtx (DImode);
4697 fn4 = gen_sync_lock_test_and_setqi_1;
4699 fn4 = gen_sync_lock_test_and_sethi_1;
4700 emit_insn (fn4 (wdst, addr, val, align));
4702 emit_move_insn (dst, gen_lowpart (mode, wdst));
4706 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4707 rtx val, rtx align, rtx scratch)
4709 rtx label, mem, width, mask, x;
4711 mem = gen_rtx_MEM (DImode, align);
4712 MEM_VOLATILE_P (mem) = 1;
4714 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4715 emit_label (XEXP (label, 0));
4717 emit_load_locked (DImode, scratch, mem);
4719 width = GEN_INT (GET_MODE_BITSIZE (mode));
4720 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4721 if (WORDS_BIG_ENDIAN)
4723 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4724 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4728 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4729 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4731 emit_insn (gen_iordi3 (scratch, scratch, val));
4733 emit_store_conditional (DImode, scratch, mem, scratch);
4735 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4736 emit_unlikely_jump (x, label);
4738 emit_insn (gen_memory_barrier ());
4741 /* Adjust the cost of a scheduling dependency. Return the new cost of
4742 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4745 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4747 enum attr_type dep_insn_type;
4749 /* If the dependence is an anti-dependence, there is no cost. For an
4750 output dependence, there is sometimes a cost, but it doesn't seem
4751 worth handling those few cases. */
4752 if (REG_NOTE_KIND (link) != 0)
4755 /* If we can't recognize the insns, we can't really do anything. */
4756 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4759 dep_insn_type = get_attr_type (dep_insn);
4761 /* Bring in the user-defined memory latency. */
4762 if (dep_insn_type == TYPE_ILD
4763 || dep_insn_type == TYPE_FLD
4764 || dep_insn_type == TYPE_LDSYM)
4765 cost += alpha_memory_latency-1;
4767 /* Everything else handled in DFA bypasses now. */
4772 /* The number of instructions that can be issued per cycle. */
4775 alpha_issue_rate (void)
4777 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4780 /* How many alternative schedules to try. This should be as wide as the
4781 scheduling freedom in the DFA, but no wider. Making this value too
4782 large results extra work for the scheduler.
4784 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4785 alternative schedules. For EV5, we can choose between E0/E1 and
4786 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4789 alpha_multipass_dfa_lookahead (void)
4791 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4794 /* Machine-specific function data. */
4796 struct GTY(()) machine_function
4799 /* List of call information words for calls from this function. */
4800 struct rtx_def *first_ciw;
4801 struct rtx_def *last_ciw;
4804 /* List of deferred case vectors. */
4805 struct rtx_def *addr_list;
4808 const char *some_ld_name;
4810 /* For TARGET_LD_BUGGY_LDGP. */
4811 struct rtx_def *gp_save_rtx;
4813 /* For VMS condition handlers. */
4814 bool uses_condition_handler;
4817 /* How to allocate a 'struct machine_function'. */
4819 static struct machine_function *
4820 alpha_init_machine_status (void)
4822 return ggc_alloc_cleared_machine_function ();
4825 /* Support for frame based VMS condition handlers. */
4827 /* A VMS condition handler may be established for a function with a call to
4828 __builtin_establish_vms_condition_handler, and cancelled with a call to
4829 __builtin_revert_vms_condition_handler.
4831 The VMS Condition Handling Facility knows about the existence of a handler
4832 from the procedure descriptor .handler field. As the VMS native compilers,
4833 we store the user specified handler's address at a fixed location in the
4834 stack frame and point the procedure descriptor at a common wrapper which
4835 fetches the real handler's address and issues an indirect call.
4837 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4839 We force the procedure kind to PT_STACK, and the fixed frame location is
4840 fp+8, just before the register save area. We use the handler_data field in
4841 the procedure descriptor to state the fp offset at which the installed
4842 handler address can be found. */
4844 #define VMS_COND_HANDLER_FP_OFFSET 8
4846 /* Expand code to store the currently installed user VMS condition handler
4847 into TARGET and install HANDLER as the new condition handler. */
4850 alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4852 rtx handler_slot_address
4853 = plus_constant (hard_frame_pointer_rtx, VMS_COND_HANDLER_FP_OFFSET);
4856 = gen_rtx_MEM (DImode, handler_slot_address);
4858 emit_move_insn (target, handler_slot);
4859 emit_move_insn (handler_slot, handler);
4861 /* Notify the start/prologue/epilogue emitters that the condition handler
4862 slot is needed. In addition to reserving the slot space, this will force
4863 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4864 use above is correct. */
4865 cfun->machine->uses_condition_handler = true;
4868 /* Expand code to store the current VMS condition handler into TARGET and
4872 alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4874 /* We implement this by establishing a null condition handler, with the tiny
4875 side effect of setting uses_condition_handler. This is a little bit
4876 pessimistic if no actual builtin_establish call is ever issued, which is
4877 not a real problem and expected never to happen anyway. */
4879 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4882 /* Functions to save and restore alpha_return_addr_rtx. */
4884 /* Start the ball rolling with RETURN_ADDR_RTX. */
4887 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4892 return get_hard_reg_initial_val (Pmode, REG_RA);
4895 /* Return or create a memory slot containing the gp value for the current
4896 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4899 alpha_gp_save_rtx (void)
4901 rtx seq, m = cfun->machine->gp_save_rtx;
4907 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4908 m = validize_mem (m);
4909 emit_move_insn (m, pic_offset_table_rtx);
4914 /* We used to simply emit the sequence after entry_of_function.
4915 However this breaks the CFG if the first instruction in the
4916 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4917 label. Emit the sequence properly on the edge. We are only
4918 invoked from dw2_build_landing_pads and finish_eh_generation
4919 will call commit_edge_insertions thanks to a kludge. */
4920 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4922 cfun->machine->gp_save_rtx = m;
4929 alpha_ra_ever_killed (void)
4933 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4934 return (int)df_regs_ever_live_p (REG_RA);
4936 push_topmost_sequence ();
4938 pop_topmost_sequence ();
4940 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4944 /* Return the trap mode suffix applicable to the current
4945 instruction, or NULL. */
4948 get_trap_mode_suffix (void)
4950 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4954 case TRAP_SUFFIX_NONE:
4957 case TRAP_SUFFIX_SU:
4958 if (alpha_fptm >= ALPHA_FPTM_SU)
4962 case TRAP_SUFFIX_SUI:
4963 if (alpha_fptm >= ALPHA_FPTM_SUI)
4967 case TRAP_SUFFIX_V_SV:
4975 case ALPHA_FPTM_SUI:
4981 case TRAP_SUFFIX_V_SV_SVI:
4990 case ALPHA_FPTM_SUI:
4997 case TRAP_SUFFIX_U_SU_SUI:
5006 case ALPHA_FPTM_SUI:
5019 /* Return the rounding mode suffix applicable to the current
5020 instruction, or NULL. */
5023 get_round_mode_suffix (void)
5025 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
5029 case ROUND_SUFFIX_NONE:
5031 case ROUND_SUFFIX_NORMAL:
5034 case ALPHA_FPRM_NORM:
5036 case ALPHA_FPRM_MINF:
5038 case ALPHA_FPRM_CHOP:
5040 case ALPHA_FPRM_DYN:
5047 case ROUND_SUFFIX_C:
5056 /* Locate some local-dynamic symbol still in use by this function
5057 so that we can print its name in some movdi_er_tlsldm pattern. */
5060 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5064 if (GET_CODE (x) == SYMBOL_REF
5065 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
5067 cfun->machine->some_ld_name = XSTR (x, 0);
5075 get_some_local_dynamic_name (void)
5079 if (cfun->machine->some_ld_name)
5080 return cfun->machine->some_ld_name;
5082 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5084 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5085 return cfun->machine->some_ld_name;
5090 /* Print an operand. Recognize special options, documented below. */
5093 print_operand (FILE *file, rtx x, int code)
5100 /* Print the assembler name of the current function. */
5101 assemble_name (file, alpha_fnname);
5105 assemble_name (file, get_some_local_dynamic_name ());
5110 const char *trap = get_trap_mode_suffix ();
5111 const char *round = get_round_mode_suffix ();
5114 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5115 (trap ? trap : ""), (round ? round : ""));
5120 /* Generates single precision instruction suffix. */
5121 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5125 /* Generates double precision instruction suffix. */
5126 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5130 if (alpha_this_literal_sequence_number == 0)
5131 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5132 fprintf (file, "%d", alpha_this_literal_sequence_number);
5136 if (alpha_this_gpdisp_sequence_number == 0)
5137 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5138 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5142 if (GET_CODE (x) == HIGH)
5143 output_addr_const (file, XEXP (x, 0));
5145 output_operand_lossage ("invalid %%H value");
5152 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5154 x = XVECEXP (x, 0, 0);
5155 lituse = "lituse_tlsgd";
5157 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5159 x = XVECEXP (x, 0, 0);
5160 lituse = "lituse_tlsldm";
5162 else if (CONST_INT_P (x))
5163 lituse = "lituse_jsr";
5166 output_operand_lossage ("invalid %%J value");
5170 if (x != const0_rtx)
5171 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5179 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5180 lituse = "lituse_jsrdirect";
5182 lituse = "lituse_jsr";
5185 gcc_assert (INTVAL (x) != 0);
5186 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5190 /* If this operand is the constant zero, write it as "$31". */
5192 fprintf (file, "%s", reg_names[REGNO (x)]);
5193 else if (x == CONST0_RTX (GET_MODE (x)))
5194 fprintf (file, "$31");
5196 output_operand_lossage ("invalid %%r value");
5200 /* Similar, but for floating-point. */
5202 fprintf (file, "%s", reg_names[REGNO (x)]);
5203 else if (x == CONST0_RTX (GET_MODE (x)))
5204 fprintf (file, "$f31");
5206 output_operand_lossage ("invalid %%R value");
5210 /* Write the 1's complement of a constant. */
5211 if (!CONST_INT_P (x))
5212 output_operand_lossage ("invalid %%N value");
5214 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5218 /* Write 1 << C, for a constant C. */
5219 if (!CONST_INT_P (x))
5220 output_operand_lossage ("invalid %%P value");
5222 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5226 /* Write the high-order 16 bits of a constant, sign-extended. */
5227 if (!CONST_INT_P (x))
5228 output_operand_lossage ("invalid %%h value");
5230 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5234 /* Write the low-order 16 bits of a constant, sign-extended. */
5235 if (!CONST_INT_P (x))
5236 output_operand_lossage ("invalid %%L value");
5238 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5239 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5243 /* Write mask for ZAP insn. */
5244 if (GET_CODE (x) == CONST_DOUBLE)
5246 HOST_WIDE_INT mask = 0;
5247 HOST_WIDE_INT value;
5249 value = CONST_DOUBLE_LOW (x);
5250 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5255 value = CONST_DOUBLE_HIGH (x);
5256 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5259 mask |= (1 << (i + sizeof (int)));
5261 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5264 else if (CONST_INT_P (x))
5266 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5268 for (i = 0; i < 8; i++, value >>= 8)
5272 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5275 output_operand_lossage ("invalid %%m value");
5279 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5280 if (!CONST_INT_P (x)
5281 || (INTVAL (x) != 8 && INTVAL (x) != 16
5282 && INTVAL (x) != 32 && INTVAL (x) != 64))
5283 output_operand_lossage ("invalid %%M value");
5285 fprintf (file, "%s",
5286 (INTVAL (x) == 8 ? "b"
5287 : INTVAL (x) == 16 ? "w"
5288 : INTVAL (x) == 32 ? "l"
5293 /* Similar, except do it from the mask. */
5294 if (CONST_INT_P (x))
5296 HOST_WIDE_INT value = INTVAL (x);
5303 if (value == 0xffff)
5308 if (value == 0xffffffff)
5319 else if (HOST_BITS_PER_WIDE_INT == 32
5320 && GET_CODE (x) == CONST_DOUBLE
5321 && CONST_DOUBLE_LOW (x) == 0xffffffff
5322 && CONST_DOUBLE_HIGH (x) == 0)
5327 output_operand_lossage ("invalid %%U value");
5331 /* Write the constant value divided by 8 for little-endian mode or
5332 (56 - value) / 8 for big-endian mode. */
5334 if (!CONST_INT_P (x)
5335 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5338 || (INTVAL (x) & 7) != 0)
5339 output_operand_lossage ("invalid %%s value");
5341 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5343 ? (56 - INTVAL (x)) / 8
5348 /* Same, except compute (64 - c) / 8 */
5350 if (!CONST_INT_P (x)
5351 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5352 && (INTVAL (x) & 7) != 8)
5353 output_operand_lossage ("invalid %%s value");
5355 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5360 /* On Unicos/Mk systems: use a DEX expression if the symbol
5361 clashes with a register name. */
5362 int dex = unicosmk_need_dex (x);
5364 fprintf (file, "DEX(%d)", dex);
5366 output_addr_const (file, x);
5370 case 'C': case 'D': case 'c': case 'd':
5371 /* Write out comparison name. */
5373 enum rtx_code c = GET_CODE (x);
5375 if (!COMPARISON_P (x))
5376 output_operand_lossage ("invalid %%C value");
5378 else if (code == 'D')
5379 c = reverse_condition (c);
5380 else if (code == 'c')
5381 c = swap_condition (c);
5382 else if (code == 'd')
5383 c = swap_condition (reverse_condition (c));
5386 fprintf (file, "ule");
5388 fprintf (file, "ult");
5389 else if (c == UNORDERED)
5390 fprintf (file, "un");
5392 fprintf (file, "%s", GET_RTX_NAME (c));
5397 /* Write the divide or modulus operator. */
5398 switch (GET_CODE (x))
5401 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5404 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5407 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5410 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5413 output_operand_lossage ("invalid %%E value");
5419 /* Write "_u" for unaligned access. */
5420 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5421 fprintf (file, "_u");
5426 fprintf (file, "%s", reg_names[REGNO (x)]);
5428 output_address (XEXP (x, 0));
5429 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5431 switch (XINT (XEXP (x, 0), 1))
5435 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5438 output_operand_lossage ("unknown relocation unspec");
5443 output_addr_const (file, x);
5447 output_operand_lossage ("invalid %%xn code");
5452 print_operand_address (FILE *file, rtx addr)
5455 HOST_WIDE_INT offset = 0;
5457 if (GET_CODE (addr) == AND)
5458 addr = XEXP (addr, 0);
5460 if (GET_CODE (addr) == PLUS
5461 && CONST_INT_P (XEXP (addr, 1)))
5463 offset = INTVAL (XEXP (addr, 1));
5464 addr = XEXP (addr, 0);
5467 if (GET_CODE (addr) == LO_SUM)
5469 const char *reloc16, *reloclo;
5470 rtx op1 = XEXP (addr, 1);
5472 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5474 op1 = XEXP (op1, 0);
5475 switch (XINT (op1, 1))
5479 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5483 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5486 output_operand_lossage ("unknown relocation unspec");
5490 output_addr_const (file, XVECEXP (op1, 0, 0));
5495 reloclo = "gprellow";
5496 output_addr_const (file, op1);
5500 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5502 addr = XEXP (addr, 0);
5503 switch (GET_CODE (addr))
5506 basereg = REGNO (addr);
5510 basereg = subreg_regno (addr);
5517 fprintf (file, "($%d)\t\t!%s", basereg,
5518 (basereg == 29 ? reloc16 : reloclo));
5522 switch (GET_CODE (addr))
5525 basereg = REGNO (addr);
5529 basereg = subreg_regno (addr);
5533 offset = INTVAL (addr);
5536 #if TARGET_ABI_OPEN_VMS
5538 fprintf (file, "%s", XSTR (addr, 0));
5542 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5543 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5544 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5545 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5546 INTVAL (XEXP (XEXP (addr, 0), 1)));
5554 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5557 /* Emit RTL insns to initialize the variable parts of a trampoline at
5558 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5559 for the static chain value for the function. */
5562 alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5564 rtx fnaddr, mem, word1, word2;
5566 fnaddr = XEXP (DECL_RTL (fndecl), 0);
5568 #ifdef POINTERS_EXTEND_UNSIGNED
5569 fnaddr = convert_memory_address (Pmode, fnaddr);
5570 chain_value = convert_memory_address (Pmode, chain_value);
5573 if (TARGET_ABI_OPEN_VMS)
5578 /* Construct the name of the trampoline entry point. */
5579 fnname = XSTR (fnaddr, 0);
5580 trname = (char *) alloca (strlen (fnname) + 5);
5581 strcpy (trname, fnname);
5582 strcat (trname, "..tr");
5583 fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5584 word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
5586 /* Trampoline (or "bounded") procedure descriptor is constructed from
5587 the function's procedure descriptor with certain fields zeroed IAW
5588 the VMS calling standard. This is stored in the first quadword. */
5589 word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
5590 word1 = expand_and (DImode, word1, GEN_INT (0xffff0fff0000fff0), NULL);
5594 /* These 4 instructions are:
5599 We don't bother setting the HINT field of the jump; the nop
5600 is merely there for padding. */
5601 word1 = GEN_INT (0xa77b0010a43b0018);
5602 word2 = GEN_INT (0x47ff041f6bfb0000);
5605 /* Store the first two words, as computed above. */
5606 mem = adjust_address (m_tramp, DImode, 0);
5607 emit_move_insn (mem, word1);
5608 mem = adjust_address (m_tramp, DImode, 8);
5609 emit_move_insn (mem, word2);
5611 /* Store function address and static chain value. */
5612 mem = adjust_address (m_tramp, Pmode, 16);
5613 emit_move_insn (mem, fnaddr);
5614 mem = adjust_address (m_tramp, Pmode, 24);
5615 emit_move_insn (mem, chain_value);
5617 if (!TARGET_ABI_OPEN_VMS)
5619 emit_insn (gen_imb ());
5620 #ifdef ENABLE_EXECUTE_STACK
5621 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5622 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
5627 /* Determine where to put an argument to a function.
5628 Value is zero to push the argument on the stack,
5629 or a hard register in which to store the argument.
5631 MODE is the argument's machine mode.
5632 TYPE is the data type of the argument (as a tree).
5633 This is null for libcalls where that information may
5635 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5636 the preceding args and about the function being called.
5637 NAMED is nonzero if this argument is a named parameter
5638 (otherwise it is an extra parameter matching an ellipsis).
5640 On Alpha the first 6 words of args are normally in registers
5641 and the rest are pushed. */
5644 alpha_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5645 const_tree type, bool named ATTRIBUTE_UNUSED)
5650 /* Don't get confused and pass small structures in FP registers. */
5651 if (type && AGGREGATE_TYPE_P (type))
5655 #ifdef ENABLE_CHECKING
5656 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5658 gcc_assert (!COMPLEX_MODE_P (mode));
5661 /* Set up defaults for FP operands passed in FP registers, and
5662 integral operands passed in integer registers. */
5663 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5669 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5670 the two platforms, so we can't avoid conditional compilation. */
5671 #if TARGET_ABI_OPEN_VMS
5673 if (mode == VOIDmode)
5674 return alpha_arg_info_reg_val (*cum);
5676 num_args = cum->num_args;
5678 || targetm.calls.must_pass_in_stack (mode, type))
5681 #elif TARGET_ABI_OSF
5687 /* VOID is passed as a special flag for "last argument". */
5688 if (type == void_type_node)
5690 else if (targetm.calls.must_pass_in_stack (mode, type))
5694 #error Unhandled ABI
5697 return gen_rtx_REG (mode, num_args + basereg);
5700 /* Update the data in CUM to advance over an argument
5701 of mode MODE and data type TYPE.
5702 (TYPE is null for libcalls where that information may not be available.) */
5705 alpha_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5706 const_tree type, bool named ATTRIBUTE_UNUSED)
5708 bool onstack = targetm.calls.must_pass_in_stack (mode, type);
5709 int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type, named);
5714 if (!onstack && cum->num_args < 6)
5715 cum->atypes[cum->num_args] = alpha_arg_type (mode);
5716 cum->num_args += increment;
5721 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5722 enum machine_mode mode ATTRIBUTE_UNUSED,
5723 tree type ATTRIBUTE_UNUSED,
5724 bool named ATTRIBUTE_UNUSED)
5728 #if TARGET_ABI_OPEN_VMS
5729 if (cum->num_args < 6
5730 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5731 words = 6 - cum->num_args;
5732 #elif TARGET_ABI_UNICOSMK
5733 /* Never any split arguments. */
5734 #elif TARGET_ABI_OSF
5735 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5738 #error Unhandled ABI
5741 return words * UNITS_PER_WORD;
5745 /* Return true if TYPE must be returned in memory, instead of in registers. */
5748 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5750 enum machine_mode mode = VOIDmode;
5755 mode = TYPE_MODE (type);
5757 /* All aggregates are returned in memory, except on OpenVMS where
5758 records that fit 64 bits should be returned by immediate value
5759 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5760 if (TARGET_ABI_OPEN_VMS
5761 && TREE_CODE (type) != ARRAY_TYPE
5762 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5765 if (AGGREGATE_TYPE_P (type))
5769 size = GET_MODE_SIZE (mode);
5770 switch (GET_MODE_CLASS (mode))
5772 case MODE_VECTOR_FLOAT:
5773 /* Pass all float vectors in memory, like an aggregate. */
5776 case MODE_COMPLEX_FLOAT:
5777 /* We judge complex floats on the size of their element,
5778 not the size of the whole type. */
5779 size = GET_MODE_UNIT_SIZE (mode);
5784 case MODE_COMPLEX_INT:
5785 case MODE_VECTOR_INT:
5789 /* ??? We get called on all sorts of random stuff from
5790 aggregate_value_p. We must return something, but it's not
5791 clear what's safe to return. Pretend it's a struct I
5796 /* Otherwise types must fit in one register. */
5797 return size > UNITS_PER_WORD;
5800 /* Return true if TYPE should be passed by invisible reference. */
5803 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5804 enum machine_mode mode,
5805 const_tree type ATTRIBUTE_UNUSED,
5806 bool named ATTRIBUTE_UNUSED)
5808 return mode == TFmode || mode == TCmode;
5811 /* Define how to find the value returned by a function. VALTYPE is the
5812 data type of the value (as a tree). If the precise function being
5813 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5814 MODE is set instead of VALTYPE for libcalls.
5816 On Alpha the value is found in $0 for integer functions and
5817 $f0 for floating-point functions. */
5820 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5821 enum machine_mode mode)
5823 unsigned int regnum, dummy ATTRIBUTE_UNUSED;
5824 enum mode_class mclass;
5826 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5829 mode = TYPE_MODE (valtype);
5831 mclass = GET_MODE_CLASS (mode);
5835 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5836 where we have them returning both SImode and DImode. */
5837 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5838 PROMOTE_MODE (mode, dummy, valtype);
5841 case MODE_COMPLEX_INT:
5842 case MODE_VECTOR_INT:
5850 case MODE_COMPLEX_FLOAT:
5852 enum machine_mode cmode = GET_MODE_INNER (mode);
5854 return gen_rtx_PARALLEL
5857 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5859 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5860 GEN_INT (GET_MODE_SIZE (cmode)))));
5864 /* We should only reach here for BLKmode on VMS. */
5865 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5873 return gen_rtx_REG (mode, regnum);
5876 /* TCmode complex values are passed by invisible reference. We
5877 should not split these values. */
5880 alpha_split_complex_arg (const_tree type)
5882 return TYPE_MODE (type) != TCmode;
5886 alpha_build_builtin_va_list (void)
5888 tree base, ofs, space, record, type_decl;
5890 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5891 return ptr_type_node;
5893 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5894 type_decl = build_decl (BUILTINS_LOCATION,
5895 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5896 TYPE_STUB_DECL (record) = type_decl;
5897 TYPE_NAME (record) = type_decl;
5899 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5901 /* Dummy field to prevent alignment warnings. */
5902 space = build_decl (BUILTINS_LOCATION,
5903 FIELD_DECL, NULL_TREE, integer_type_node);
5904 DECL_FIELD_CONTEXT (space) = record;
5905 DECL_ARTIFICIAL (space) = 1;
5906 DECL_IGNORED_P (space) = 1;
5908 ofs = build_decl (BUILTINS_LOCATION,
5909 FIELD_DECL, get_identifier ("__offset"),
5911 DECL_FIELD_CONTEXT (ofs) = record;
5912 DECL_CHAIN (ofs) = space;
5913 /* ??? This is a hack, __offset is marked volatile to prevent
5914 DCE that confuses stdarg optimization and results in
5915 gcc.c-torture/execute/stdarg-1.c failure. See PR 41089. */
5916 TREE_THIS_VOLATILE (ofs) = 1;
5918 base = build_decl (BUILTINS_LOCATION,
5919 FIELD_DECL, get_identifier ("__base"),
5921 DECL_FIELD_CONTEXT (base) = record;
5922 DECL_CHAIN (base) = ofs;
5924 TYPE_FIELDS (record) = base;
5925 layout_type (record);
5927 va_list_gpr_counter_field = ofs;
5932 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5933 and constant additions. */
5936 va_list_skip_additions (tree lhs)
5942 enum tree_code code;
5944 stmt = SSA_NAME_DEF_STMT (lhs);
5946 if (gimple_code (stmt) == GIMPLE_PHI)
5949 if (!is_gimple_assign (stmt)
5950 || gimple_assign_lhs (stmt) != lhs)
5953 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5955 code = gimple_assign_rhs_code (stmt);
5956 if (!CONVERT_EXPR_CODE_P (code)
5957 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5958 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5959 || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5962 lhs = gimple_assign_rhs1 (stmt);
5966 /* Check if LHS = RHS statement is
5967 LHS = *(ap.__base + ap.__offset + cst)
5970 + ((ap.__offset + cst <= 47)
5971 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5972 If the former, indicate that GPR registers are needed,
5973 if the latter, indicate that FPR registers are needed.
5975 Also look for LHS = (*ptr).field, where ptr is one of the forms
5978 On alpha, cfun->va_list_gpr_size is used as size of the needed
5979 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5980 registers are needed and bit 1 set if FPR registers are needed.
5981 Return true if va_list references should not be scanned for the
5982 current statement. */
5985 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
5987 tree base, offset, rhs;
5991 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5992 != GIMPLE_SINGLE_RHS)
5995 rhs = gimple_assign_rhs1 (stmt);
5996 while (handled_component_p (rhs))
5997 rhs = TREE_OPERAND (rhs, 0);
5998 if (TREE_CODE (rhs) != MEM_REF
5999 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
6002 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
6004 || !is_gimple_assign (stmt)
6005 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
6008 base = gimple_assign_rhs1 (stmt);
6009 if (TREE_CODE (base) == SSA_NAME)
6011 base_stmt = va_list_skip_additions (base);
6013 && is_gimple_assign (base_stmt)
6014 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6015 base = gimple_assign_rhs1 (base_stmt);
6018 if (TREE_CODE (base) != COMPONENT_REF
6019 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6021 base = gimple_assign_rhs2 (stmt);
6022 if (TREE_CODE (base) == SSA_NAME)
6024 base_stmt = va_list_skip_additions (base);
6026 && is_gimple_assign (base_stmt)
6027 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6028 base = gimple_assign_rhs1 (base_stmt);
6031 if (TREE_CODE (base) != COMPONENT_REF
6032 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6038 base = get_base_address (base);
6039 if (TREE_CODE (base) != VAR_DECL
6040 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
6043 offset = gimple_op (stmt, 1 + offset_arg);
6044 if (TREE_CODE (offset) == SSA_NAME)
6046 gimple offset_stmt = va_list_skip_additions (offset);
6049 && gimple_code (offset_stmt) == GIMPLE_PHI)
6052 gimple arg1_stmt, arg2_stmt;
6054 enum tree_code code1, code2;
6056 if (gimple_phi_num_args (offset_stmt) != 2)
6060 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
6062 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
6063 if (arg1_stmt == NULL
6064 || !is_gimple_assign (arg1_stmt)
6065 || arg2_stmt == NULL
6066 || !is_gimple_assign (arg2_stmt))
6069 code1 = gimple_assign_rhs_code (arg1_stmt);
6070 code2 = gimple_assign_rhs_code (arg2_stmt);
6071 if (code1 == COMPONENT_REF
6072 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
6074 else if (code2 == COMPONENT_REF
6075 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
6077 gimple tem = arg1_stmt;
6079 arg1_stmt = arg2_stmt;
6085 if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
6088 sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
6089 if (code2 == MINUS_EXPR)
6091 if (sub < -48 || sub > -32)
6094 arg1 = gimple_assign_rhs1 (arg1_stmt);
6095 arg2 = gimple_assign_rhs1 (arg2_stmt);
6096 if (TREE_CODE (arg2) == SSA_NAME)
6098 arg2_stmt = va_list_skip_additions (arg2);
6099 if (arg2_stmt == NULL
6100 || !is_gimple_assign (arg2_stmt)
6101 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
6103 arg2 = gimple_assign_rhs1 (arg2_stmt);
6108 if (TREE_CODE (arg1) != COMPONENT_REF
6109 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6110 || get_base_address (arg1) != base)
6113 /* Need floating point regs. */
6114 cfun->va_list_fpr_size |= 2;
6118 && is_gimple_assign (offset_stmt)
6119 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6120 offset = gimple_assign_rhs1 (offset_stmt);
6122 if (TREE_CODE (offset) != COMPONENT_REF
6123 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6124 || get_base_address (offset) != base)
6127 /* Need general regs. */
6128 cfun->va_list_fpr_size |= 1;
6132 si->va_list_escapes = true;
6137 /* Perform any needed actions needed for a function that is receiving a
6138 variable number of arguments. */
6141 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6142 tree type, int *pretend_size, int no_rtl)
6144 CUMULATIVE_ARGS cum = *pcum;
6146 /* Skip the current argument. */
6147 targetm.calls.function_arg_advance (&cum, mode, type, true);
6149 #if TARGET_ABI_UNICOSMK
6150 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6151 arguments on the stack. Unfortunately, it doesn't always store the first
6152 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6153 with stdargs as we always have at least one named argument there. */
6154 if (cum.num_reg_words < 6)
6158 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6159 emit_insn (gen_arg_home_umk ());
6163 #elif TARGET_ABI_OPEN_VMS
6164 /* For VMS, we allocate space for all 6 arg registers plus a count.
6166 However, if NO registers need to be saved, don't allocate any space.
6167 This is not only because we won't need the space, but because AP
6168 includes the current_pretend_args_size and we don't want to mess up
6169 any ap-relative addresses already made. */
6170 if (cum.num_args < 6)
6174 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6175 emit_insn (gen_arg_home ());
6177 *pretend_size = 7 * UNITS_PER_WORD;
6180 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6181 only push those that are remaining. However, if NO registers need to
6182 be saved, don't allocate any space. This is not only because we won't
6183 need the space, but because AP includes the current_pretend_args_size
6184 and we don't want to mess up any ap-relative addresses already made.
6186 If we are not to use the floating-point registers, save the integer
6187 registers where we would put the floating-point registers. This is
6188 not the most efficient way to implement varargs with just one register
6189 class, but it isn't worth doing anything more efficient in this rare
6197 alias_set_type set = get_varargs_alias_set ();
6200 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6201 if (count > 6 - cum)
6204 /* Detect whether integer registers or floating-point registers
6205 are needed by the detected va_arg statements. See above for
6206 how these values are computed. Note that the "escape" value
6207 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6209 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6211 if (cfun->va_list_fpr_size & 1)
6213 tmp = gen_rtx_MEM (BLKmode,
6214 plus_constant (virtual_incoming_args_rtx,
6215 (cum + 6) * UNITS_PER_WORD));
6216 MEM_NOTRAP_P (tmp) = 1;
6217 set_mem_alias_set (tmp, set);
6218 move_block_from_reg (16 + cum, tmp, count);
6221 if (cfun->va_list_fpr_size & 2)
6223 tmp = gen_rtx_MEM (BLKmode,
6224 plus_constant (virtual_incoming_args_rtx,
6225 cum * UNITS_PER_WORD));
6226 MEM_NOTRAP_P (tmp) = 1;
6227 set_mem_alias_set (tmp, set);
6228 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6231 *pretend_size = 12 * UNITS_PER_WORD;
6236 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6238 HOST_WIDE_INT offset;
6239 tree t, offset_field, base_field;
6241 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6244 if (TARGET_ABI_UNICOSMK)
6245 std_expand_builtin_va_start (valist, nextarg);
6247 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6248 up by 48, storing fp arg registers in the first 48 bytes, and the
6249 integer arg registers in the next 48 bytes. This is only done,
6250 however, if any integer registers need to be stored.
6252 If no integer registers need be stored, then we must subtract 48
6253 in order to account for the integer arg registers which are counted
6254 in argsize above, but which are not actually stored on the stack.
6255 Must further be careful here about structures straddling the last
6256 integer argument register; that futzes with pretend_args_size,
6257 which changes the meaning of AP. */
6260 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6262 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6264 if (TARGET_ABI_OPEN_VMS)
6266 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6267 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6268 size_int (offset + NUM_ARGS * UNITS_PER_WORD));
6269 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6270 TREE_SIDE_EFFECTS (t) = 1;
6271 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6275 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6276 offset_field = DECL_CHAIN (base_field);
6278 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6279 valist, base_field, NULL_TREE);
6280 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6281 valist, offset_field, NULL_TREE);
6283 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6284 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6286 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6287 TREE_SIDE_EFFECTS (t) = 1;
6288 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6290 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6291 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6292 TREE_SIDE_EFFECTS (t) = 1;
6293 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6298 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6301 tree type_size, ptr_type, addend, t, addr;
6302 gimple_seq internal_post;
6304 /* If the type could not be passed in registers, skip the block
6305 reserved for the registers. */
6306 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6308 t = build_int_cst (TREE_TYPE (offset), 6*8);
6309 gimplify_assign (offset,
6310 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6315 ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
6317 if (TREE_CODE (type) == COMPLEX_TYPE)
6319 tree real_part, imag_part, real_temp;
6321 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6324 /* Copy the value into a new temporary, lest the formal temporary
6325 be reused out from under us. */
6326 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6328 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6331 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6333 else if (TREE_CODE (type) == REAL_TYPE)
6335 tree fpaddend, cond, fourtyeight;
6337 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6338 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6339 addend, fourtyeight);
6340 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6341 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6345 /* Build the final address and force that value into a temporary. */
6346 addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6347 fold_convert (sizetype, addend));
6348 internal_post = NULL;
6349 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6350 gimple_seq_add_seq (pre_p, internal_post);
6352 /* Update the offset field. */
6353 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6354 if (type_size == NULL || TREE_OVERFLOW (type_size))
6358 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6359 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6360 t = size_binop (MULT_EXPR, t, size_int (8));
6362 t = fold_convert (TREE_TYPE (offset), t);
6363 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6366 return build_va_arg_indirect_ref (addr);
6370 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6373 tree offset_field, base_field, offset, base, t, r;
6376 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6377 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6379 base_field = TYPE_FIELDS (va_list_type_node);
6380 offset_field = DECL_CHAIN (base_field);
6381 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6382 valist, base_field, NULL_TREE);
6383 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6384 valist, offset_field, NULL_TREE);
6386 /* Pull the fields of the structure out into temporaries. Since we never
6387 modify the base field, we can use a formal temporary. Sign-extend the
6388 offset field so that it's the proper width for pointer arithmetic. */
6389 base = get_formal_tmp_var (base_field, pre_p);
6391 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6392 offset = get_initialized_tmp_var (t, pre_p, NULL);
6394 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6396 type = build_pointer_type_for_mode (type, ptr_mode, true);
6398 /* Find the value. Note that this will be a stable indirection, or
6399 a composite of stable indirections in the case of complex. */
6400 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6402 /* Stuff the offset temporary back into its field. */
6403 gimplify_assign (unshare_expr (offset_field),
6404 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6407 r = build_va_arg_indirect_ref (r);
6416 ALPHA_BUILTIN_CMPBGE,
6417 ALPHA_BUILTIN_EXTBL,
6418 ALPHA_BUILTIN_EXTWL,
6419 ALPHA_BUILTIN_EXTLL,
6420 ALPHA_BUILTIN_EXTQL,
6421 ALPHA_BUILTIN_EXTWH,
6422 ALPHA_BUILTIN_EXTLH,
6423 ALPHA_BUILTIN_EXTQH,
6424 ALPHA_BUILTIN_INSBL,
6425 ALPHA_BUILTIN_INSWL,
6426 ALPHA_BUILTIN_INSLL,
6427 ALPHA_BUILTIN_INSQL,
6428 ALPHA_BUILTIN_INSWH,
6429 ALPHA_BUILTIN_INSLH,
6430 ALPHA_BUILTIN_INSQH,
6431 ALPHA_BUILTIN_MSKBL,
6432 ALPHA_BUILTIN_MSKWL,
6433 ALPHA_BUILTIN_MSKLL,
6434 ALPHA_BUILTIN_MSKQL,
6435 ALPHA_BUILTIN_MSKWH,
6436 ALPHA_BUILTIN_MSKLH,
6437 ALPHA_BUILTIN_MSKQH,
6438 ALPHA_BUILTIN_UMULH,
6440 ALPHA_BUILTIN_ZAPNOT,
6441 ALPHA_BUILTIN_AMASK,
6442 ALPHA_BUILTIN_IMPLVER,
6444 ALPHA_BUILTIN_THREAD_POINTER,
6445 ALPHA_BUILTIN_SET_THREAD_POINTER,
6446 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6447 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6450 ALPHA_BUILTIN_MINUB8,
6451 ALPHA_BUILTIN_MINSB8,
6452 ALPHA_BUILTIN_MINUW4,
6453 ALPHA_BUILTIN_MINSW4,
6454 ALPHA_BUILTIN_MAXUB8,
6455 ALPHA_BUILTIN_MAXSB8,
6456 ALPHA_BUILTIN_MAXUW4,
6457 ALPHA_BUILTIN_MAXSW4,
6461 ALPHA_BUILTIN_UNPKBL,
6462 ALPHA_BUILTIN_UNPKBW,
6467 ALPHA_BUILTIN_CTPOP,
6472 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6473 CODE_FOR_builtin_cmpbge,
6474 CODE_FOR_builtin_extbl,
6475 CODE_FOR_builtin_extwl,
6476 CODE_FOR_builtin_extll,
6477 CODE_FOR_builtin_extql,
6478 CODE_FOR_builtin_extwh,
6479 CODE_FOR_builtin_extlh,
6480 CODE_FOR_builtin_extqh,
6481 CODE_FOR_builtin_insbl,
6482 CODE_FOR_builtin_inswl,
6483 CODE_FOR_builtin_insll,
6484 CODE_FOR_builtin_insql,
6485 CODE_FOR_builtin_inswh,
6486 CODE_FOR_builtin_inslh,
6487 CODE_FOR_builtin_insqh,
6488 CODE_FOR_builtin_mskbl,
6489 CODE_FOR_builtin_mskwl,
6490 CODE_FOR_builtin_mskll,
6491 CODE_FOR_builtin_mskql,
6492 CODE_FOR_builtin_mskwh,
6493 CODE_FOR_builtin_msklh,
6494 CODE_FOR_builtin_mskqh,
6495 CODE_FOR_umuldi3_highpart,
6496 CODE_FOR_builtin_zap,
6497 CODE_FOR_builtin_zapnot,
6498 CODE_FOR_builtin_amask,
6499 CODE_FOR_builtin_implver,
6500 CODE_FOR_builtin_rpcc,
6503 CODE_FOR_builtin_establish_vms_condition_handler,
6504 CODE_FOR_builtin_revert_vms_condition_handler,
6507 CODE_FOR_builtin_minub8,
6508 CODE_FOR_builtin_minsb8,
6509 CODE_FOR_builtin_minuw4,
6510 CODE_FOR_builtin_minsw4,
6511 CODE_FOR_builtin_maxub8,
6512 CODE_FOR_builtin_maxsb8,
6513 CODE_FOR_builtin_maxuw4,
6514 CODE_FOR_builtin_maxsw4,
6515 CODE_FOR_builtin_perr,
6516 CODE_FOR_builtin_pklb,
6517 CODE_FOR_builtin_pkwb,
6518 CODE_FOR_builtin_unpkbl,
6519 CODE_FOR_builtin_unpkbw,
6524 CODE_FOR_popcountdi2
6527 struct alpha_builtin_def
6530 enum alpha_builtin code;
6531 unsigned int target_mask;
6535 static struct alpha_builtin_def const zero_arg_builtins[] = {
6536 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6537 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6540 static struct alpha_builtin_def const one_arg_builtins[] = {
6541 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6542 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6543 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6544 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6545 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6546 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6547 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6548 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6551 static struct alpha_builtin_def const two_arg_builtins[] = {
6552 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6553 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6554 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6555 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6556 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6557 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6558 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6559 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6560 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6561 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6562 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6563 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6564 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6565 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6566 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6567 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6568 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6569 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6570 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6571 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6572 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6573 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6574 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6575 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6576 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6577 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6578 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6579 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6580 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6581 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6582 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6583 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6584 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6585 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6588 static GTY(()) tree alpha_v8qi_u;
6589 static GTY(()) tree alpha_v8qi_s;
6590 static GTY(()) tree alpha_v4hi_u;
6591 static GTY(()) tree alpha_v4hi_s;
6593 static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6595 /* Return the alpha builtin for CODE. */
6598 alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6600 if (code >= ALPHA_BUILTIN_max)
6601 return error_mark_node;
6602 return alpha_builtins[code];
6605 /* Helper function of alpha_init_builtins. Add the built-in specified
6606 by NAME, TYPE, CODE, and ECF. */
6609 alpha_builtin_function (const char *name, tree ftype,
6610 enum alpha_builtin code, unsigned ecf)
6612 tree decl = add_builtin_function (name, ftype, (int) code,
6613 BUILT_IN_MD, NULL, NULL_TREE);
6615 if (ecf & ECF_CONST)
6616 TREE_READONLY (decl) = 1;
6617 if (ecf & ECF_NOTHROW)
6618 TREE_NOTHROW (decl) = 1;
6620 alpha_builtins [(int) code] = decl;
6623 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6624 functions pointed to by P, with function type FTYPE. */
6627 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6632 for (i = 0; i < count; ++i, ++p)
6633 if ((target_flags & p->target_mask) == p->target_mask)
6634 alpha_builtin_function (p->name, ftype, p->code,
6635 (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
6639 alpha_init_builtins (void)
6641 tree dimode_integer_type_node;
6644 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6646 /* Fwrite on VMS is non-standard. */
6647 #if TARGET_ABI_OPEN_VMS
6648 implicit_built_in_decls[(int) BUILT_IN_FWRITE] = NULL_TREE;
6649 implicit_built_in_decls[(int) BUILT_IN_FWRITE_UNLOCKED] = NULL_TREE;
6652 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6653 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6656 ftype = build_function_type_list (dimode_integer_type_node,
6657 dimode_integer_type_node, NULL_TREE);
6658 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6661 ftype = build_function_type_list (dimode_integer_type_node,
6662 dimode_integer_type_node,
6663 dimode_integer_type_node, NULL_TREE);
6664 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6667 ftype = build_function_type (ptr_type_node, void_list_node);
6668 alpha_builtin_function ("__builtin_thread_pointer", ftype,
6669 ALPHA_BUILTIN_THREAD_POINTER, ECF_NOTHROW);
6671 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6672 alpha_builtin_function ("__builtin_set_thread_pointer", ftype,
6673 ALPHA_BUILTIN_SET_THREAD_POINTER, ECF_NOTHROW);
6675 if (TARGET_ABI_OPEN_VMS)
6677 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6679 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6681 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6684 ftype = build_function_type_list (ptr_type_node, void_type_node,
6686 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6687 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
6690 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6691 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6692 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6693 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6696 /* Expand an expression EXP that calls a built-in function,
6697 with result going to TARGET if that's convenient
6698 (and in mode MODE if that's convenient).
6699 SUBTARGET may be used as the target for computing one of EXP's operands.
6700 IGNORE is nonzero if the value is to be ignored. */
6703 alpha_expand_builtin (tree exp, rtx target,
6704 rtx subtarget ATTRIBUTE_UNUSED,
6705 enum machine_mode mode ATTRIBUTE_UNUSED,
6706 int ignore ATTRIBUTE_UNUSED)
6710 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6711 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6713 call_expr_arg_iterator iter;
6714 enum insn_code icode;
6715 rtx op[MAX_ARGS], pat;
6719 if (fcode >= ALPHA_BUILTIN_max)
6720 internal_error ("bad builtin fcode");
6721 icode = code_for_builtin[fcode];
6723 internal_error ("bad builtin fcode");
6725 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6728 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6730 const struct insn_operand_data *insn_op;
6732 if (arg == error_mark_node)
6734 if (arity > MAX_ARGS)
6737 insn_op = &insn_data[icode].operand[arity + nonvoid];
6739 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6741 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6742 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6748 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6750 || GET_MODE (target) != tmode
6751 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6752 target = gen_reg_rtx (tmode);
6758 pat = GEN_FCN (icode) (target);
6762 pat = GEN_FCN (icode) (target, op[0]);
6764 pat = GEN_FCN (icode) (op[0]);
6767 pat = GEN_FCN (icode) (target, op[0], op[1]);
6783 /* Several bits below assume HWI >= 64 bits. This should be enforced
6785 #if HOST_BITS_PER_WIDE_INT < 64
6786 # error "HOST_WIDE_INT too small"
6789 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6790 with an 8-bit output vector. OPINT contains the integer operands; bit N
6791 of OP_CONST is set if OPINT[N] is valid. */
6794 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6799 for (i = 0, val = 0; i < 8; ++i)
6801 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6802 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6806 return build_int_cst (long_integer_type_node, val);
6808 else if (op_const == 2 && opint[1] == 0)
6809 return build_int_cst (long_integer_type_node, 0xff);
6813 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6814 specialized form of an AND operation. Other byte manipulation instructions
6815 are defined in terms of this instruction, so this is also used as a
6816 subroutine for other builtins.
6818 OP contains the tree operands; OPINT contains the extracted integer values.
6819 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6820 OPINT may be considered. */
6823 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6828 unsigned HOST_WIDE_INT mask = 0;
6831 for (i = 0; i < 8; ++i)
6832 if ((opint[1] >> i) & 1)
6833 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6836 return build_int_cst (long_integer_type_node, opint[0] & mask);
6839 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6840 build_int_cst (long_integer_type_node, mask));
6842 else if ((op_const & 1) && opint[0] == 0)
6843 return build_int_cst (long_integer_type_node, 0);
6847 /* Fold the builtins for the EXT family of instructions. */
6850 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6851 long op_const, unsigned HOST_WIDE_INT bytemask,
6855 tree *zap_op = NULL;
6859 unsigned HOST_WIDE_INT loc;
6862 if (BYTES_BIG_ENDIAN)
6870 unsigned HOST_WIDE_INT temp = opint[0];
6883 opint[1] = bytemask;
6884 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6887 /* Fold the builtins for the INS family of instructions. */
6890 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6891 long op_const, unsigned HOST_WIDE_INT bytemask,
6894 if ((op_const & 1) && opint[0] == 0)
6895 return build_int_cst (long_integer_type_node, 0);
6899 unsigned HOST_WIDE_INT temp, loc, byteloc;
6900 tree *zap_op = NULL;
6903 if (BYTES_BIG_ENDIAN)
6910 byteloc = (64 - (loc * 8)) & 0x3f;
6927 opint[1] = bytemask;
6928 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6935 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6936 long op_const, unsigned HOST_WIDE_INT bytemask,
6941 unsigned HOST_WIDE_INT loc;
6944 if (BYTES_BIG_ENDIAN)
6951 opint[1] = bytemask ^ 0xff;
6954 return alpha_fold_builtin_zapnot (op, opint, op_const);
6958 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6964 unsigned HOST_WIDE_INT l;
6967 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6969 #if HOST_BITS_PER_WIDE_INT > 64
6973 return build_int_cst (long_integer_type_node, h);
6977 opint[1] = opint[0];
6980 /* Note that (X*1) >> 64 == 0. */
6981 if (opint[1] == 0 || opint[1] == 1)
6982 return build_int_cst (long_integer_type_node, 0);
6989 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6991 tree op0 = fold_convert (vtype, op[0]);
6992 tree op1 = fold_convert (vtype, op[1]);
6993 tree val = fold_build2 (code, vtype, op0, op1);
6994 return fold_build1 (VIEW_CONVERT_EXPR, long_integer_type_node, val);
6998 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
7000 unsigned HOST_WIDE_INT temp = 0;
7006 for (i = 0; i < 8; ++i)
7008 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
7009 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
7016 return build_int_cst (long_integer_type_node, temp);
7020 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
7022 unsigned HOST_WIDE_INT temp;
7027 temp = opint[0] & 0xff;
7028 temp |= (opint[0] >> 24) & 0xff00;
7030 return build_int_cst (long_integer_type_node, temp);
7034 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
7036 unsigned HOST_WIDE_INT temp;
7041 temp = opint[0] & 0xff;
7042 temp |= (opint[0] >> 8) & 0xff00;
7043 temp |= (opint[0] >> 16) & 0xff0000;
7044 temp |= (opint[0] >> 24) & 0xff000000;
7046 return build_int_cst (long_integer_type_node, temp);
7050 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
7052 unsigned HOST_WIDE_INT temp;
7057 temp = opint[0] & 0xff;
7058 temp |= (opint[0] & 0xff00) << 24;
7060 return build_int_cst (long_integer_type_node, temp);
7064 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
7066 unsigned HOST_WIDE_INT temp;
7071 temp = opint[0] & 0xff;
7072 temp |= (opint[0] & 0x0000ff00) << 8;
7073 temp |= (opint[0] & 0x00ff0000) << 16;
7074 temp |= (opint[0] & 0xff000000) << 24;
7076 return build_int_cst (long_integer_type_node, temp);
7080 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
7082 unsigned HOST_WIDE_INT temp;
7090 temp = exact_log2 (opint[0] & -opint[0]);
7092 return build_int_cst (long_integer_type_node, temp);
7096 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
7098 unsigned HOST_WIDE_INT temp;
7106 temp = 64 - floor_log2 (opint[0]) - 1;
7108 return build_int_cst (long_integer_type_node, temp);
7112 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
7114 unsigned HOST_WIDE_INT temp, op;
7122 temp++, op &= op - 1;
7124 return build_int_cst (long_integer_type_node, temp);
7127 /* Fold one of our builtin functions. */
7130 alpha_fold_builtin (tree fndecl, int n_args, tree *op,
7131 bool ignore ATTRIBUTE_UNUSED)
7133 unsigned HOST_WIDE_INT opint[MAX_ARGS];
7137 if (n_args >= MAX_ARGS)
7140 for (i = 0; i < n_args; i++)
7143 if (arg == error_mark_node)
7147 if (TREE_CODE (arg) == INTEGER_CST)
7149 op_const |= 1L << i;
7150 opint[i] = int_cst_value (arg);
7154 switch (DECL_FUNCTION_CODE (fndecl))
7156 case ALPHA_BUILTIN_CMPBGE:
7157 return alpha_fold_builtin_cmpbge (opint, op_const);
7159 case ALPHA_BUILTIN_EXTBL:
7160 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7161 case ALPHA_BUILTIN_EXTWL:
7162 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7163 case ALPHA_BUILTIN_EXTLL:
7164 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7165 case ALPHA_BUILTIN_EXTQL:
7166 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7167 case ALPHA_BUILTIN_EXTWH:
7168 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7169 case ALPHA_BUILTIN_EXTLH:
7170 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7171 case ALPHA_BUILTIN_EXTQH:
7172 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7174 case ALPHA_BUILTIN_INSBL:
7175 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7176 case ALPHA_BUILTIN_INSWL:
7177 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7178 case ALPHA_BUILTIN_INSLL:
7179 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7180 case ALPHA_BUILTIN_INSQL:
7181 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7182 case ALPHA_BUILTIN_INSWH:
7183 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7184 case ALPHA_BUILTIN_INSLH:
7185 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7186 case ALPHA_BUILTIN_INSQH:
7187 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7189 case ALPHA_BUILTIN_MSKBL:
7190 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7191 case ALPHA_BUILTIN_MSKWL:
7192 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7193 case ALPHA_BUILTIN_MSKLL:
7194 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7195 case ALPHA_BUILTIN_MSKQL:
7196 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7197 case ALPHA_BUILTIN_MSKWH:
7198 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7199 case ALPHA_BUILTIN_MSKLH:
7200 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7201 case ALPHA_BUILTIN_MSKQH:
7202 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7204 case ALPHA_BUILTIN_UMULH:
7205 return alpha_fold_builtin_umulh (opint, op_const);
7207 case ALPHA_BUILTIN_ZAP:
7210 case ALPHA_BUILTIN_ZAPNOT:
7211 return alpha_fold_builtin_zapnot (op, opint, op_const);
7213 case ALPHA_BUILTIN_MINUB8:
7214 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7215 case ALPHA_BUILTIN_MINSB8:
7216 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7217 case ALPHA_BUILTIN_MINUW4:
7218 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7219 case ALPHA_BUILTIN_MINSW4:
7220 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7221 case ALPHA_BUILTIN_MAXUB8:
7222 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7223 case ALPHA_BUILTIN_MAXSB8:
7224 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7225 case ALPHA_BUILTIN_MAXUW4:
7226 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7227 case ALPHA_BUILTIN_MAXSW4:
7228 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7230 case ALPHA_BUILTIN_PERR:
7231 return alpha_fold_builtin_perr (opint, op_const);
7232 case ALPHA_BUILTIN_PKLB:
7233 return alpha_fold_builtin_pklb (opint, op_const);
7234 case ALPHA_BUILTIN_PKWB:
7235 return alpha_fold_builtin_pkwb (opint, op_const);
7236 case ALPHA_BUILTIN_UNPKBL:
7237 return alpha_fold_builtin_unpkbl (opint, op_const);
7238 case ALPHA_BUILTIN_UNPKBW:
7239 return alpha_fold_builtin_unpkbw (opint, op_const);
7241 case ALPHA_BUILTIN_CTTZ:
7242 return alpha_fold_builtin_cttz (opint, op_const);
7243 case ALPHA_BUILTIN_CTLZ:
7244 return alpha_fold_builtin_ctlz (opint, op_const);
7245 case ALPHA_BUILTIN_CTPOP:
7246 return alpha_fold_builtin_ctpop (opint, op_const);
7248 case ALPHA_BUILTIN_AMASK:
7249 case ALPHA_BUILTIN_IMPLVER:
7250 case ALPHA_BUILTIN_RPCC:
7251 case ALPHA_BUILTIN_THREAD_POINTER:
7252 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7253 /* None of these are foldable at compile-time. */
7259 /* This page contains routines that are used to determine what the function
7260 prologue and epilogue code will do and write them out. */
7262 /* Compute the size of the save area in the stack. */
7264 /* These variables are used for communication between the following functions.
7265 They indicate various things about the current function being compiled
7266 that are used to tell what kind of prologue, epilogue and procedure
7267 descriptor to generate. */
7269 /* Nonzero if we need a stack procedure. */
7270 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7271 static enum alpha_procedure_types alpha_procedure_type;
7273 /* Register number (either FP or SP) that is used to unwind the frame. */
7274 static int vms_unwind_regno;
7276 /* Register number used to save FP. We need not have one for RA since
7277 we don't modify it for register procedures. This is only defined
7278 for register frame procedures. */
7279 static int vms_save_fp_regno;
7281 /* Register number used to reference objects off our PV. */
7282 static int vms_base_regno;
7284 /* Compute register masks for saved registers. */
7287 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7289 unsigned long imask = 0;
7290 unsigned long fmask = 0;
7293 /* When outputting a thunk, we don't have valid register life info,
7294 but assemble_start_function wants to output .frame and .mask
7303 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7304 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7306 /* One for every register we have to save. */
7307 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7308 if (! fixed_regs[i] && ! call_used_regs[i]
7309 && df_regs_ever_live_p (i) && i != REG_RA
7310 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7313 imask |= (1UL << i);
7315 fmask |= (1UL << (i - 32));
7318 /* We need to restore these for the handler. */
7319 if (crtl->calls_eh_return)
7323 unsigned regno = EH_RETURN_DATA_REGNO (i);
7324 if (regno == INVALID_REGNUM)
7326 imask |= 1UL << regno;
7330 /* If any register spilled, then spill the return address also. */
7331 /* ??? This is required by the Digital stack unwind specification
7332 and isn't needed if we're doing Dwarf2 unwinding. */
7333 if (imask || fmask || alpha_ra_ever_killed ())
7334 imask |= (1UL << REG_RA);
7341 alpha_sa_size (void)
7343 unsigned long mask[2];
7347 alpha_sa_mask (&mask[0], &mask[1]);
7349 if (TARGET_ABI_UNICOSMK)
7351 if (mask[0] || mask[1])
7356 for (j = 0; j < 2; ++j)
7357 for (i = 0; i < 32; ++i)
7358 if ((mask[j] >> i) & 1)
7362 if (TARGET_ABI_UNICOSMK)
7364 /* We might not need to generate a frame if we don't make any calls
7365 (including calls to __T3E_MISMATCH if this is a vararg function),
7366 don't have any local variables which require stack slots, don't
7367 use alloca and have not determined that we need a frame for other
7370 alpha_procedure_type
7371 = (sa_size || get_frame_size() != 0
7372 || crtl->outgoing_args_size
7373 || cfun->stdarg || cfun->calls_alloca
7374 || frame_pointer_needed)
7375 ? PT_STACK : PT_REGISTER;
7377 /* Always reserve space for saving callee-saved registers if we
7378 need a frame as required by the calling convention. */
7379 if (alpha_procedure_type == PT_STACK)
7382 else if (TARGET_ABI_OPEN_VMS)
7384 /* Start with a stack procedure if we make any calls (REG_RA used), or
7385 need a frame pointer, with a register procedure if we otherwise need
7386 at least a slot, and with a null procedure in other cases. */
7387 if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7388 alpha_procedure_type = PT_STACK;
7389 else if (get_frame_size() != 0)
7390 alpha_procedure_type = PT_REGISTER;
7392 alpha_procedure_type = PT_NULL;
7394 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7395 made the final decision on stack procedure vs register procedure. */
7396 if (alpha_procedure_type == PT_STACK)
7399 /* Decide whether to refer to objects off our PV via FP or PV.
7400 If we need FP for something else or if we receive a nonlocal
7401 goto (which expects PV to contain the value), we must use PV.
7402 Otherwise, start by assuming we can use FP. */
7405 = (frame_pointer_needed
7406 || cfun->has_nonlocal_label
7407 || alpha_procedure_type == PT_STACK
7408 || crtl->outgoing_args_size)
7409 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7411 /* If we want to copy PV into FP, we need to find some register
7412 in which to save FP. */
7414 vms_save_fp_regno = -1;
7415 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7416 for (i = 0; i < 32; i++)
7417 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7418 vms_save_fp_regno = i;
7420 /* A VMS condition handler requires a stack procedure in our
7421 implementation. (not required by the calling standard). */
7422 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7423 || cfun->machine->uses_condition_handler)
7424 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7425 else if (alpha_procedure_type == PT_NULL)
7426 vms_base_regno = REG_PV;
7428 /* Stack unwinding should be done via FP unless we use it for PV. */
7429 vms_unwind_regno = (vms_base_regno == REG_PV
7430 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7432 /* If this is a stack procedure, allow space for saving FP, RA and
7433 a condition handler slot if needed. */
7434 if (alpha_procedure_type == PT_STACK)
7435 sa_size += 2 + cfun->machine->uses_condition_handler;
7439 /* Our size must be even (multiple of 16 bytes). */
7447 /* Define the offset between two registers, one to be eliminated,
7448 and the other its replacement, at the start of a routine. */
7451 alpha_initial_elimination_offset (unsigned int from,
7452 unsigned int to ATTRIBUTE_UNUSED)
7456 ret = alpha_sa_size ();
7457 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7461 case FRAME_POINTER_REGNUM:
7464 case ARG_POINTER_REGNUM:
7465 ret += (ALPHA_ROUND (get_frame_size ()
7466 + crtl->args.pretend_args_size)
7467 - crtl->args.pretend_args_size);
7477 #if TARGET_ABI_OPEN_VMS
7479 /* Worker function for TARGET_CAN_ELIMINATE. */
7482 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
7484 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7487 switch (alpha_procedure_type)
7490 /* NULL procedures have no frame of their own and we only
7491 know how to resolve from the current stack pointer. */
7492 return to == STACK_POINTER_REGNUM;
7496 /* We always eliminate except to the stack pointer if there is no
7497 usable frame pointer at hand. */
7498 return (to != STACK_POINTER_REGNUM
7499 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7505 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7506 designates the same location as FROM. */
7509 alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7511 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7512 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7513 on the proper computations and will need the register save area size
7516 HOST_WIDE_INT sa_size = alpha_sa_size ();
7518 /* PT_NULL procedures have no frame of their own and we only allow
7519 elimination to the stack pointer. This is the argument pointer and we
7520 resolve the soft frame pointer to that as well. */
7522 if (alpha_procedure_type == PT_NULL)
7525 /* For a PT_STACK procedure the frame layout looks as follows
7527 -----> decreasing addresses
7529 < size rounded up to 16 | likewise >
7530 --------------#------------------------------+++--------------+++-------#
7531 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7532 --------------#---------------------------------------------------------#
7534 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7537 PT_REGISTER procedures are similar in that they may have a frame of their
7538 own. They have no regs-sa/pv/outgoing-args area.
7540 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7541 to STACK_PTR if need be. */
7544 HOST_WIDE_INT offset;
7545 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7549 case FRAME_POINTER_REGNUM:
7550 offset = ALPHA_ROUND (sa_size + pv_save_size);
7552 case ARG_POINTER_REGNUM:
7553 offset = (ALPHA_ROUND (sa_size + pv_save_size
7555 + crtl->args.pretend_args_size)
7556 - crtl->args.pretend_args_size);
7562 if (to == STACK_POINTER_REGNUM)
7563 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7569 #define COMMON_OBJECT "common_object"
7572 common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7573 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7574 bool *no_add_attrs ATTRIBUTE_UNUSED)
7577 gcc_assert (DECL_P (decl));
7579 DECL_COMMON (decl) = 1;
7583 static const struct attribute_spec vms_attribute_table[] =
7585 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7586 { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler },
7587 { NULL, 0, 0, false, false, false, NULL }
7591 vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7592 unsigned HOST_WIDE_INT size,
7595 tree attr = DECL_ATTRIBUTES (decl);
7596 fprintf (file, "%s", COMMON_ASM_OP);
7597 assemble_name (file, name);
7598 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7599 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7600 fprintf (file, ",%u", align / BITS_PER_UNIT);
7603 attr = lookup_attribute (COMMON_OBJECT, attr);
7605 fprintf (file, ",%s",
7606 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7611 #undef COMMON_OBJECT
7616 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7618 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7622 alpha_find_lo_sum_using_gp (rtx insn)
7624 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7628 alpha_does_function_need_gp (void)
7632 /* The GP being variable is an OSF abi thing. */
7633 if (! TARGET_ABI_OSF)
7636 /* We need the gp to load the address of __mcount. */
7637 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7640 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7644 /* The nonlocal receiver pattern assumes that the gp is valid for
7645 the nested function. Reasonable because it's almost always set
7646 correctly already. For the cases where that's wrong, make sure
7647 the nested function loads its gp on entry. */
7648 if (crtl->has_nonlocal_goto)
7651 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7652 Even if we are a static function, we still need to do this in case
7653 our address is taken and passed to something like qsort. */
7655 push_topmost_sequence ();
7656 insn = get_insns ();
7657 pop_topmost_sequence ();
7659 for (; insn; insn = NEXT_INSN (insn))
7660 if (NONDEBUG_INSN_P (insn)
7661 && ! JUMP_TABLE_DATA_P (insn)
7662 && GET_CODE (PATTERN (insn)) != USE
7663 && GET_CODE (PATTERN (insn)) != CLOBBER
7664 && get_attr_usegp (insn))
7671 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7675 set_frame_related_p (void)
7677 rtx seq = get_insns ();
7688 while (insn != NULL_RTX)
7690 RTX_FRAME_RELATED_P (insn) = 1;
7691 insn = NEXT_INSN (insn);
7693 seq = emit_insn (seq);
7697 seq = emit_insn (seq);
7698 RTX_FRAME_RELATED_P (seq) = 1;
7703 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7705 /* Generates a store with the proper unwind info attached. VALUE is
7706 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7707 contains SP+FRAME_BIAS, and that is the unwind info that should be
7708 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7709 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7712 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7713 HOST_WIDE_INT base_ofs, rtx frame_reg)
7715 rtx addr, mem, insn;
7717 addr = plus_constant (base_reg, base_ofs);
7718 mem = gen_rtx_MEM (DImode, addr);
7719 set_mem_alias_set (mem, alpha_sr_alias_set);
7721 insn = emit_move_insn (mem, value);
7722 RTX_FRAME_RELATED_P (insn) = 1;
7724 if (frame_bias || value != frame_reg)
7728 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7729 mem = gen_rtx_MEM (DImode, addr);
7732 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7733 gen_rtx_SET (VOIDmode, mem, frame_reg));
7738 emit_frame_store (unsigned int regno, rtx base_reg,
7739 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7741 rtx reg = gen_rtx_REG (DImode, regno);
7742 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7745 /* Compute the frame size. SIZE is the size of the "naked" frame
7746 and SA_SIZE is the size of the register save area. */
7748 static HOST_WIDE_INT
7749 compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
7751 if (TARGET_ABI_OPEN_VMS)
7752 return ALPHA_ROUND (sa_size
7753 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7755 + crtl->args.pretend_args_size);
7756 else if (TARGET_ABI_UNICOSMK)
7757 /* We have to allocate space for the DSIB if we generate a frame. */
7758 return ALPHA_ROUND (sa_size
7759 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7761 + crtl->outgoing_args_size);
7763 return ALPHA_ROUND (crtl->outgoing_args_size)
7766 + crtl->args.pretend_args_size);
7769 /* Write function prologue. */
7771 /* On vms we have two kinds of functions:
7773 - stack frame (PROC_STACK)
7774 these are 'normal' functions with local vars and which are
7775 calling other functions
7776 - register frame (PROC_REGISTER)
7777 keeps all data in registers, needs no stack
7779 We must pass this to the assembler so it can generate the
7780 proper pdsc (procedure descriptor)
7781 This is done with the '.pdesc' command.
7783 On not-vms, we don't really differentiate between the two, as we can
7784 simply allocate stack without saving registers. */
7787 alpha_expand_prologue (void)
7789 /* Registers to save. */
7790 unsigned long imask = 0;
7791 unsigned long fmask = 0;
7792 /* Stack space needed for pushing registers clobbered by us. */
7793 HOST_WIDE_INT sa_size;
7794 /* Complete stack size needed. */
7795 HOST_WIDE_INT frame_size;
7796 /* Probed stack size; it additionally includes the size of
7797 the "reserve region" if any. */
7798 HOST_WIDE_INT probed_size;
7799 /* Offset from base reg to register save area. */
7800 HOST_WIDE_INT reg_offset;
7804 sa_size = alpha_sa_size ();
7805 frame_size = compute_frame_size (get_frame_size (), sa_size);
7807 if (flag_stack_usage)
7808 current_function_static_stack_size = frame_size;
7810 if (TARGET_ABI_OPEN_VMS)
7811 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7813 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7815 alpha_sa_mask (&imask, &fmask);
7817 /* Emit an insn to reload GP, if needed. */
7820 alpha_function_needs_gp = alpha_does_function_need_gp ();
7821 if (alpha_function_needs_gp)
7822 emit_insn (gen_prologue_ldgp ());
7825 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7826 the call to mcount ourselves, rather than having the linker do it
7827 magically in response to -pg. Since _mcount has special linkage,
7828 don't represent the call as a call. */
7829 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7830 emit_insn (gen_prologue_mcount ());
7832 if (TARGET_ABI_UNICOSMK)
7833 unicosmk_gen_dsib (&imask);
7835 /* Adjust the stack by the frame size. If the frame size is > 4096
7836 bytes, we need to be sure we probe somewhere in the first and last
7837 4096 bytes (we can probably get away without the latter test) and
7838 every 8192 bytes in between. If the frame size is > 32768, we
7839 do this in a loop. Otherwise, we generate the explicit probe
7842 Note that we are only allowed to adjust sp once in the prologue. */
7844 probed_size = frame_size;
7845 if (flag_stack_check)
7846 probed_size += STACK_CHECK_PROTECT;
7848 if (probed_size <= 32768)
7850 if (probed_size > 4096)
7854 for (probed = 4096; probed < probed_size; probed += 8192)
7855 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7859 /* We only have to do this probe if we aren't saving registers or
7860 if we are probing beyond the frame because of -fstack-check. */
7861 if ((sa_size == 0 && probed_size > probed - 4096)
7862 || flag_stack_check)
7863 emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
7866 if (frame_size != 0)
7867 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7868 GEN_INT (TARGET_ABI_UNICOSMK
7874 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7875 number of 8192 byte blocks to probe. We then probe each block
7876 in the loop and then set SP to the proper location. If the
7877 amount remaining is > 4096, we have to do one more probe if we
7878 are not saving any registers or if we are probing beyond the
7879 frame because of -fstack-check. */
7881 HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7882 HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
7883 rtx ptr = gen_rtx_REG (DImode, 22);
7884 rtx count = gen_rtx_REG (DImode, 23);
7887 emit_move_insn (count, GEN_INT (blocks));
7888 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7889 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7891 /* Because of the difficulty in emitting a new basic block this
7892 late in the compilation, generate the loop as a single insn. */
7893 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7895 if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
7897 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7898 MEM_VOLATILE_P (last) = 1;
7899 emit_move_insn (last, const0_rtx);
7902 if (TARGET_ABI_WINDOWS_NT || flag_stack_check)
7904 /* For NT stack unwind (done by 'reverse execution'), it's
7905 not OK to take the result of a loop, even though the value
7906 is already in ptr, so we reload it via a single operation
7907 and subtract it to sp.
7909 Same if -fstack-check is specified, because the probed stack
7910 size is not equal to the frame size.
7912 Yes, that's correct -- we have to reload the whole constant
7913 into a temporary via ldah+lda then subtract from sp. */
7915 HOST_WIDE_INT lo, hi;
7916 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7917 hi = frame_size - lo;
7919 emit_move_insn (ptr, GEN_INT (hi));
7920 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7921 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7926 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7927 GEN_INT (-leftover)));
7930 /* This alternative is special, because the DWARF code cannot
7931 possibly intuit through the loop above. So we invent this
7932 note it looks at instead. */
7933 RTX_FRAME_RELATED_P (seq) = 1;
7934 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7935 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7936 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7937 GEN_INT (TARGET_ABI_UNICOSMK
7942 if (!TARGET_ABI_UNICOSMK)
7944 HOST_WIDE_INT sa_bias = 0;
7946 /* Cope with very large offsets to the register save area. */
7947 sa_reg = stack_pointer_rtx;
7948 if (reg_offset + sa_size > 0x8000)
7950 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7953 if (low + sa_size <= 0x8000)
7954 sa_bias = reg_offset - low, reg_offset = low;
7956 sa_bias = reg_offset, reg_offset = 0;
7958 sa_reg = gen_rtx_REG (DImode, 24);
7959 sa_bias_rtx = GEN_INT (sa_bias);
7961 if (add_operand (sa_bias_rtx, DImode))
7962 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7965 emit_move_insn (sa_reg, sa_bias_rtx);
7966 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7970 /* Save regs in stack order. Beginning with VMS PV. */
7971 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7972 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7974 /* Save register RA next. */
7975 if (imask & (1UL << REG_RA))
7977 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7978 imask &= ~(1UL << REG_RA);
7982 /* Now save any other registers required to be saved. */
7983 for (i = 0; i < 31; i++)
7984 if (imask & (1UL << i))
7986 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7990 for (i = 0; i < 31; i++)
7991 if (fmask & (1UL << i))
7993 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7997 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7999 /* The standard frame on the T3E includes space for saving registers.
8000 We just have to use it. We don't have to save the return address and
8001 the old frame pointer here - they are saved in the DSIB. */
8004 for (i = 9; i < 15; i++)
8005 if (imask & (1UL << i))
8007 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
8010 for (i = 2; i < 10; i++)
8011 if (fmask & (1UL << i))
8013 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
8018 if (TARGET_ABI_OPEN_VMS)
8020 /* Register frame procedures save the fp. */
8021 if (alpha_procedure_type == PT_REGISTER)
8023 rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
8024 hard_frame_pointer_rtx);
8025 add_reg_note (insn, REG_CFA_REGISTER, NULL);
8026 RTX_FRAME_RELATED_P (insn) = 1;
8029 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
8030 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
8031 gen_rtx_REG (DImode, REG_PV)));
8033 if (alpha_procedure_type != PT_NULL
8034 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8035 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
8037 /* If we have to allocate space for outgoing args, do it now. */
8038 if (crtl->outgoing_args_size != 0)
8041 = emit_move_insn (stack_pointer_rtx,
8043 (hard_frame_pointer_rtx,
8045 (crtl->outgoing_args_size))));
8047 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
8048 if ! frame_pointer_needed. Setting the bit will change the CFA
8049 computation rule to use sp again, which would be wrong if we had
8050 frame_pointer_needed, as this means sp might move unpredictably
8054 frame_pointer_needed
8055 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8057 crtl->outgoing_args_size != 0
8058 => alpha_procedure_type != PT_NULL,
8060 so when we are not setting the bit here, we are guaranteed to
8061 have emitted an FRP frame pointer update just before. */
8062 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
8065 else if (!TARGET_ABI_UNICOSMK)
8067 /* If we need a frame pointer, set it from the stack pointer. */
8068 if (frame_pointer_needed)
8070 if (TARGET_CAN_FAULT_IN_PROLOGUE)
8071 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
8073 /* This must always be the last instruction in the
8074 prologue, thus we emit a special move + clobber. */
8075 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
8076 stack_pointer_rtx, sa_reg)));
8080 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
8081 the prologue, for exception handling reasons, we cannot do this for
8082 any insn that might fault. We could prevent this for mems with a
8083 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
8084 have to prevent all such scheduling with a blockage.
8086 Linux, on the other hand, never bothered to implement OSF/1's
8087 exception handling, and so doesn't care about such things. Anyone
8088 planning to use dwarf2 frame-unwind info can also omit the blockage. */
8090 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
8091 emit_insn (gen_blockage ());
8094 /* Count the number of .file directives, so that .loc is up to date. */
8095 int num_source_filenames = 0;
8097 /* Output the textual info surrounding the prologue. */
8100 alpha_start_function (FILE *file, const char *fnname,
8101 tree decl ATTRIBUTE_UNUSED)
8103 unsigned long imask = 0;
8104 unsigned long fmask = 0;
8105 /* Stack space needed for pushing registers clobbered by us. */
8106 HOST_WIDE_INT sa_size;
8107 /* Complete stack size needed. */
8108 unsigned HOST_WIDE_INT frame_size;
8109 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
8110 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
8113 /* Offset from base reg to register save area. */
8114 HOST_WIDE_INT reg_offset;
8115 char *entry_label = (char *) alloca (strlen (fnname) + 6);
8116 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
8119 /* Don't emit an extern directive for functions defined in the same file. */
8120 if (TARGET_ABI_UNICOSMK)
8123 name_tree = get_identifier (fnname);
8124 TREE_ASM_WRITTEN (name_tree) = 1;
8127 #if TARGET_ABI_OPEN_VMS
8129 && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
8131 targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
8132 ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
8133 switch_to_section (text_section);
8134 vms_debug_main = NULL;
8138 alpha_fnname = fnname;
8139 sa_size = alpha_sa_size ();
8140 frame_size = compute_frame_size (get_frame_size (), sa_size);
8142 if (TARGET_ABI_OPEN_VMS)
8143 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8145 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8147 alpha_sa_mask (&imask, &fmask);
8149 /* Ecoff can handle multiple .file directives, so put out file and lineno.
8150 We have to do that before the .ent directive as we cannot switch
8151 files within procedures with native ecoff because line numbers are
8152 linked to procedure descriptors.
8153 Outputting the lineno helps debugging of one line functions as they
8154 would otherwise get no line number at all. Please note that we would
8155 like to put out last_linenum from final.c, but it is not accessible. */
8157 if (write_symbols == SDB_DEBUG)
8159 #ifdef ASM_OUTPUT_SOURCE_FILENAME
8160 ASM_OUTPUT_SOURCE_FILENAME (file,
8161 DECL_SOURCE_FILE (current_function_decl));
8163 #ifdef SDB_OUTPUT_SOURCE_LINE
8164 if (debug_info_level != DINFO_LEVEL_TERSE)
8165 SDB_OUTPUT_SOURCE_LINE (file,
8166 DECL_SOURCE_LINE (current_function_decl));
8170 /* Issue function start and label. */
8171 if (TARGET_ABI_OPEN_VMS
8172 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
8174 fputs ("\t.ent ", file);
8175 assemble_name (file, fnname);
8178 /* If the function needs GP, we'll write the "..ng" label there.
8179 Otherwise, do it here. */
8181 && ! alpha_function_needs_gp
8182 && ! cfun->is_thunk)
8185 assemble_name (file, fnname);
8186 fputs ("..ng:\n", file);
8189 /* Nested functions on VMS that are potentially called via trampoline
8190 get a special transfer entry point that loads the called functions
8191 procedure descriptor and static chain. */
8192 if (TARGET_ABI_OPEN_VMS
8193 && !TREE_PUBLIC (decl)
8194 && DECL_CONTEXT (decl)
8195 && !TYPE_P (DECL_CONTEXT (decl)))
8197 strcpy (tramp_label, fnname);
8198 strcat (tramp_label, "..tr");
8199 ASM_OUTPUT_LABEL (file, tramp_label);
8200 fprintf (file, "\tldq $1,24($27)\n");
8201 fprintf (file, "\tldq $27,16($27)\n");
8204 strcpy (entry_label, fnname);
8205 if (TARGET_ABI_OPEN_VMS)
8206 strcat (entry_label, "..en");
8208 /* For public functions, the label must be globalized by appending an
8209 additional colon. */
8210 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
8211 strcat (entry_label, ":");
8213 ASM_OUTPUT_LABEL (file, entry_label);
8214 inside_function = TRUE;
8216 if (TARGET_ABI_OPEN_VMS)
8217 fprintf (file, "\t.base $%d\n", vms_base_regno);
8219 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
8220 && !flag_inhibit_size_directive)
8222 /* Set flags in procedure descriptor to request IEEE-conformant
8223 math-library routines. The value we set it to is PDSC_EXC_IEEE
8224 (/usr/include/pdsc.h). */
8225 fputs ("\t.eflag 48\n", file);
8228 /* Set up offsets to alpha virtual arg/local debugging pointer. */
8229 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
8230 alpha_arg_offset = -frame_size + 48;
8232 /* Describe our frame. If the frame size is larger than an integer,
8233 print it as zero to avoid an assembler error. We won't be
8234 properly describing such a frame, but that's the best we can do. */
8235 if (TARGET_ABI_UNICOSMK)
8237 else if (TARGET_ABI_OPEN_VMS)
8238 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
8239 HOST_WIDE_INT_PRINT_DEC "\n",
8241 frame_size >= (1UL << 31) ? 0 : frame_size,
8243 else if (!flag_inhibit_size_directive)
8244 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
8245 (frame_pointer_needed
8246 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
8247 frame_size >= max_frame_size ? 0 : frame_size,
8248 crtl->args.pretend_args_size);
8250 /* Describe which registers were spilled. */
8251 if (TARGET_ABI_UNICOSMK)
8253 else if (TARGET_ABI_OPEN_VMS)
8256 /* ??? Does VMS care if mask contains ra? The old code didn't
8257 set it, so I don't here. */
8258 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
8260 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
8261 if (alpha_procedure_type == PT_REGISTER)
8262 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
8264 else if (!flag_inhibit_size_directive)
8268 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
8269 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8271 for (i = 0; i < 32; ++i)
8272 if (imask & (1UL << i))
8277 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
8278 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8281 #if TARGET_ABI_OPEN_VMS
8282 /* If a user condition handler has been installed at some point, emit
8283 the procedure descriptor bits to point the Condition Handling Facility
8284 at the indirection wrapper, and state the fp offset at which the user
8285 handler may be found. */
8286 if (cfun->machine->uses_condition_handler)
8288 fprintf (file, "\t.handler __gcc_shell_handler\n");
8289 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
8292 /* Ifdef'ed cause link_section are only available then. */
8293 switch_to_section (readonly_data_section);
8294 fprintf (file, "\t.align 3\n");
8295 assemble_name (file, fnname); fputs ("..na:\n", file);
8296 fputs ("\t.ascii \"", file);
8297 assemble_name (file, fnname);
8298 fputs ("\\0\"\n", file);
8299 alpha_need_linkage (fnname, 1);
8300 switch_to_section (text_section);
8304 /* Emit the .prologue note at the scheduled end of the prologue. */
8307 alpha_output_function_end_prologue (FILE *file)
8309 if (TARGET_ABI_UNICOSMK)
8311 else if (TARGET_ABI_OPEN_VMS)
8312 fputs ("\t.prologue\n", file);
8313 else if (TARGET_ABI_WINDOWS_NT)
8314 fputs ("\t.prologue 0\n", file);
8315 else if (!flag_inhibit_size_directive)
8316 fprintf (file, "\t.prologue %d\n",
8317 alpha_function_needs_gp || cfun->is_thunk);
8320 /* Write function epilogue. */
8323 alpha_expand_epilogue (void)
8325 /* Registers to save. */
8326 unsigned long imask = 0;
8327 unsigned long fmask = 0;
8328 /* Stack space needed for pushing registers clobbered by us. */
8329 HOST_WIDE_INT sa_size;
8330 /* Complete stack size needed. */
8331 HOST_WIDE_INT frame_size;
8332 /* Offset from base reg to register save area. */
8333 HOST_WIDE_INT reg_offset;
8334 int fp_is_frame_pointer, fp_offset;
8335 rtx sa_reg, sa_reg_exp = NULL;
8336 rtx sp_adj1, sp_adj2, mem, reg, insn;
8338 rtx cfa_restores = NULL_RTX;
8341 sa_size = alpha_sa_size ();
8342 frame_size = compute_frame_size (get_frame_size (), sa_size);
8344 if (TARGET_ABI_OPEN_VMS)
8346 if (alpha_procedure_type == PT_STACK)
8347 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8352 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8354 alpha_sa_mask (&imask, &fmask);
8357 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8358 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8360 sa_reg = stack_pointer_rtx;
8362 if (crtl->calls_eh_return)
8363 eh_ofs = EH_RETURN_STACKADJ_RTX;
8367 if (!TARGET_ABI_UNICOSMK && sa_size)
8369 /* If we have a frame pointer, restore SP from it. */
8370 if ((TARGET_ABI_OPEN_VMS
8371 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8372 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8373 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8375 /* Cope with very large offsets to the register save area. */
8376 if (reg_offset + sa_size > 0x8000)
8378 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8381 if (low + sa_size <= 0x8000)
8382 bias = reg_offset - low, reg_offset = low;
8384 bias = reg_offset, reg_offset = 0;
8386 sa_reg = gen_rtx_REG (DImode, 22);
8387 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8389 emit_move_insn (sa_reg, sa_reg_exp);
8392 /* Restore registers in order, excepting a true frame pointer. */
8394 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8396 set_mem_alias_set (mem, alpha_sr_alias_set);
8397 reg = gen_rtx_REG (DImode, REG_RA);
8398 emit_move_insn (reg, mem);
8399 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8402 imask &= ~(1UL << REG_RA);
8404 for (i = 0; i < 31; ++i)
8405 if (imask & (1UL << i))
8407 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8408 fp_offset = reg_offset;
8411 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8412 set_mem_alias_set (mem, alpha_sr_alias_set);
8413 reg = gen_rtx_REG (DImode, i);
8414 emit_move_insn (reg, mem);
8415 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8421 for (i = 0; i < 31; ++i)
8422 if (fmask & (1UL << i))
8424 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8425 set_mem_alias_set (mem, alpha_sr_alias_set);
8426 reg = gen_rtx_REG (DFmode, i+32);
8427 emit_move_insn (reg, mem);
8428 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8432 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8434 /* Restore callee-saved general-purpose registers. */
8438 for (i = 9; i < 15; i++)
8439 if (imask & (1UL << i))
8441 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8443 set_mem_alias_set (mem, alpha_sr_alias_set);
8444 reg = gen_rtx_REG (DImode, i);
8445 emit_move_insn (reg, mem);
8446 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8450 for (i = 2; i < 10; i++)
8451 if (fmask & (1UL << i))
8453 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8455 set_mem_alias_set (mem, alpha_sr_alias_set);
8456 reg = gen_rtx_REG (DFmode, i+32);
8457 emit_move_insn (reg, mem);
8458 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8462 /* Restore the return address from the DSIB. */
8463 mem = gen_rtx_MEM (DImode, plus_constant (hard_frame_pointer_rtx, -8));
8464 set_mem_alias_set (mem, alpha_sr_alias_set);
8465 reg = gen_rtx_REG (DImode, REG_RA);
8466 emit_move_insn (reg, mem);
8467 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8470 if (frame_size || eh_ofs)
8472 sp_adj1 = stack_pointer_rtx;
8476 sp_adj1 = gen_rtx_REG (DImode, 23);
8477 emit_move_insn (sp_adj1,
8478 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8481 /* If the stack size is large, begin computation into a temporary
8482 register so as not to interfere with a potential fp restore,
8483 which must be consecutive with an SP restore. */
8484 if (frame_size < 32768
8485 && ! (TARGET_ABI_UNICOSMK && cfun->calls_alloca))
8486 sp_adj2 = GEN_INT (frame_size);
8487 else if (TARGET_ABI_UNICOSMK)
8489 sp_adj1 = gen_rtx_REG (DImode, 23);
8490 emit_move_insn (sp_adj1, hard_frame_pointer_rtx);
8491 sp_adj2 = const0_rtx;
8493 else if (frame_size < 0x40007fffL)
8495 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8497 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8498 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8502 sp_adj1 = gen_rtx_REG (DImode, 23);
8503 emit_move_insn (sp_adj1, sp_adj2);
8505 sp_adj2 = GEN_INT (low);
8509 rtx tmp = gen_rtx_REG (DImode, 23);
8510 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8513 /* We can't drop new things to memory this late, afaik,
8514 so build it up by pieces. */
8515 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8517 gcc_assert (sp_adj2);
8521 /* From now on, things must be in order. So emit blockages. */
8523 /* Restore the frame pointer. */
8524 if (TARGET_ABI_UNICOSMK)
8526 emit_insn (gen_blockage ());
8527 mem = gen_rtx_MEM (DImode,
8528 plus_constant (hard_frame_pointer_rtx, -16));
8529 set_mem_alias_set (mem, alpha_sr_alias_set);
8530 emit_move_insn (hard_frame_pointer_rtx, mem);
8531 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8532 hard_frame_pointer_rtx, cfa_restores);
8534 else if (fp_is_frame_pointer)
8536 emit_insn (gen_blockage ());
8537 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8538 set_mem_alias_set (mem, alpha_sr_alias_set);
8539 emit_move_insn (hard_frame_pointer_rtx, mem);
8540 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8541 hard_frame_pointer_rtx, cfa_restores);
8543 else if (TARGET_ABI_OPEN_VMS)
8545 emit_insn (gen_blockage ());
8546 emit_move_insn (hard_frame_pointer_rtx,
8547 gen_rtx_REG (DImode, vms_save_fp_regno));
8548 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8549 hard_frame_pointer_rtx, cfa_restores);
8552 /* Restore the stack pointer. */
8553 emit_insn (gen_blockage ());
8554 if (sp_adj2 == const0_rtx)
8555 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8557 insn = emit_move_insn (stack_pointer_rtx,
8558 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8559 REG_NOTES (insn) = cfa_restores;
8560 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8561 RTX_FRAME_RELATED_P (insn) = 1;
8565 gcc_assert (cfa_restores == NULL);
8567 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8569 emit_insn (gen_blockage ());
8570 insn = emit_move_insn (hard_frame_pointer_rtx,
8571 gen_rtx_REG (DImode, vms_save_fp_regno));
8572 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8573 RTX_FRAME_RELATED_P (insn) = 1;
8575 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8577 /* Decrement the frame pointer if the function does not have a
8579 emit_insn (gen_blockage ());
8580 emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8581 hard_frame_pointer_rtx, constm1_rtx));
8586 /* Output the rest of the textual info surrounding the epilogue. */
8589 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8593 /* We output a nop after noreturn calls at the very end of the function to
8594 ensure that the return address always remains in the caller's code range,
8595 as not doing so might confuse unwinding engines. */
8596 insn = get_last_insn ();
8598 insn = prev_active_insn (insn);
8599 if (insn && CALL_P (insn))
8600 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8602 #if TARGET_ABI_OPEN_VMS
8603 alpha_write_linkage (file, fnname, decl);
8606 /* End the function. */
8607 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8609 fputs ("\t.end ", file);
8610 assemble_name (file, fnname);
8613 inside_function = FALSE;
8615 /* Output jump tables and the static subroutine information block. */
8616 if (TARGET_ABI_UNICOSMK)
8618 unicosmk_output_ssib (file, fnname);
8619 unicosmk_output_deferred_case_vectors (file);
8623 #if TARGET_ABI_OPEN_VMS
8624 void avms_asm_output_external (FILE *file, tree decl ATTRIBUTE_UNUSED, const char *name)
8626 #ifdef DO_CRTL_NAMES
8633 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8635 In order to avoid the hordes of differences between generated code
8636 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8637 lots of code loading up large constants, generate rtl and emit it
8638 instead of going straight to text.
8640 Not sure why this idea hasn't been explored before... */
8643 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8644 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8647 HOST_WIDE_INT hi, lo;
8648 rtx this_rtx, insn, funexp;
8650 /* We always require a valid GP. */
8651 emit_insn (gen_prologue_ldgp ());
8652 emit_note (NOTE_INSN_PROLOGUE_END);
8654 /* Find the "this" pointer. If the function returns a structure,
8655 the structure return pointer is in $16. */
8656 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8657 this_rtx = gen_rtx_REG (Pmode, 17);
8659 this_rtx = gen_rtx_REG (Pmode, 16);
8661 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8662 entire constant for the add. */
8663 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8664 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8665 if (hi + lo == delta)
8668 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8670 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8674 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8675 delta, -(delta < 0));
8676 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8679 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8684 tmp = gen_rtx_REG (Pmode, 0);
8685 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8687 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8688 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8689 if (hi + lo == vcall_offset)
8692 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8696 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8697 vcall_offset, -(vcall_offset < 0));
8698 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8702 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8705 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8707 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8710 /* Generate a tail call to the target function. */
8711 if (! TREE_USED (function))
8713 assemble_external (function);
8714 TREE_USED (function) = 1;
8716 funexp = XEXP (DECL_RTL (function), 0);
8717 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8718 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8719 SIBLING_CALL_P (insn) = 1;
8721 /* Run just enough of rest_of_compilation to get the insns emitted.
8722 There's not really enough bulk here to make other passes such as
8723 instruction scheduling worth while. Note that use_thunk calls
8724 assemble_start_function and assemble_end_function. */
8725 insn = get_insns ();
8726 insn_locators_alloc ();
8727 shorten_branches (insn);
8728 final_start_function (insn, file, 1);
8729 final (insn, file, 1);
8730 final_end_function ();
8732 #endif /* TARGET_ABI_OSF */
8734 /* Debugging support. */
8738 /* Count the number of sdb related labels are generated (to find block
8739 start and end boundaries). */
8741 int sdb_label_count = 0;
8743 /* Name of the file containing the current function. */
8745 static const char *current_function_file = "";
8747 /* Offsets to alpha virtual arg/local debugging pointers. */
8749 long alpha_arg_offset;
8750 long alpha_auto_offset;
8752 /* Emit a new filename to a stream. */
8755 alpha_output_filename (FILE *stream, const char *name)
8757 static int first_time = TRUE;
8762 ++num_source_filenames;
8763 current_function_file = name;
8764 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8765 output_quoted_string (stream, name);
8766 fprintf (stream, "\n");
8767 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8768 fprintf (stream, "\t#@stabs\n");
8771 else if (write_symbols == DBX_DEBUG)
8772 /* dbxout.c will emit an appropriate .stabs directive. */
8775 else if (name != current_function_file
8776 && strcmp (name, current_function_file) != 0)
8778 if (inside_function && ! TARGET_GAS)
8779 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8782 ++num_source_filenames;
8783 current_function_file = name;
8784 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8787 output_quoted_string (stream, name);
8788 fprintf (stream, "\n");
8792 /* Structure to show the current status of registers and memory. */
8794 struct shadow_summary
8797 unsigned int i : 31; /* Mask of int regs */
8798 unsigned int fp : 31; /* Mask of fp regs */
8799 unsigned int mem : 1; /* mem == imem | fpmem */
8803 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8804 to the summary structure. SET is nonzero if the insn is setting the
8805 object, otherwise zero. */
8808 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8810 const char *format_ptr;
8816 switch (GET_CODE (x))
8818 /* ??? Note that this case would be incorrect if the Alpha had a
8819 ZERO_EXTRACT in SET_DEST. */
8821 summarize_insn (SET_SRC (x), sum, 0);
8822 summarize_insn (SET_DEST (x), sum, 1);
8826 summarize_insn (XEXP (x, 0), sum, 1);
8830 summarize_insn (XEXP (x, 0), sum, 0);
8834 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8835 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8839 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8840 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8844 summarize_insn (SUBREG_REG (x), sum, 0);
8849 int regno = REGNO (x);
8850 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8852 if (regno == 31 || regno == 63)
8858 sum->defd.i |= mask;
8860 sum->defd.fp |= mask;
8865 sum->used.i |= mask;
8867 sum->used.fp |= mask;
8878 /* Find the regs used in memory address computation: */
8879 summarize_insn (XEXP (x, 0), sum, 0);
8882 case CONST_INT: case CONST_DOUBLE:
8883 case SYMBOL_REF: case LABEL_REF: case CONST:
8884 case SCRATCH: case ASM_INPUT:
8887 /* Handle common unary and binary ops for efficiency. */
8888 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8889 case MOD: case UDIV: case UMOD: case AND: case IOR:
8890 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8891 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8892 case NE: case EQ: case GE: case GT: case LE:
8893 case LT: case GEU: case GTU: case LEU: case LTU:
8894 summarize_insn (XEXP (x, 0), sum, 0);
8895 summarize_insn (XEXP (x, 1), sum, 0);
8898 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8899 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8900 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8901 case SQRT: case FFS:
8902 summarize_insn (XEXP (x, 0), sum, 0);
8906 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8907 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8908 switch (format_ptr[i])
8911 summarize_insn (XEXP (x, i), sum, 0);
8915 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8916 summarize_insn (XVECEXP (x, i, j), sum, 0);
8928 /* Ensure a sufficient number of `trapb' insns are in the code when
8929 the user requests code with a trap precision of functions or
8932 In naive mode, when the user requests a trap-precision of
8933 "instruction", a trapb is needed after every instruction that may
8934 generate a trap. This ensures that the code is resumption safe but
8937 When optimizations are turned on, we delay issuing a trapb as long
8938 as possible. In this context, a trap shadow is the sequence of
8939 instructions that starts with a (potentially) trap generating
8940 instruction and extends to the next trapb or call_pal instruction
8941 (but GCC never generates call_pal by itself). We can delay (and
8942 therefore sometimes omit) a trapb subject to the following
8945 (a) On entry to the trap shadow, if any Alpha register or memory
8946 location contains a value that is used as an operand value by some
8947 instruction in the trap shadow (live on entry), then no instruction
8948 in the trap shadow may modify the register or memory location.
8950 (b) Within the trap shadow, the computation of the base register
8951 for a memory load or store instruction may not involve using the
8952 result of an instruction that might generate an UNPREDICTABLE
8955 (c) Within the trap shadow, no register may be used more than once
8956 as a destination register. (This is to make life easier for the
8959 (d) The trap shadow may not include any branch instructions. */
8962 alpha_handle_trap_shadows (void)
8964 struct shadow_summary shadow;
8965 int trap_pending, exception_nesting;
8969 exception_nesting = 0;
8972 shadow.used.mem = 0;
8973 shadow.defd = shadow.used;
8975 for (i = get_insns (); i ; i = NEXT_INSN (i))
8979 switch (NOTE_KIND (i))
8981 case NOTE_INSN_EH_REGION_BEG:
8982 exception_nesting++;
8987 case NOTE_INSN_EH_REGION_END:
8988 exception_nesting--;
8993 case NOTE_INSN_EPILOGUE_BEG:
8994 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8999 else if (trap_pending)
9001 if (alpha_tp == ALPHA_TP_FUNC)
9004 && GET_CODE (PATTERN (i)) == RETURN)
9007 else if (alpha_tp == ALPHA_TP_INSN)
9011 struct shadow_summary sum;
9016 sum.defd = sum.used;
9018 switch (GET_CODE (i))
9021 /* Annoyingly, get_attr_trap will die on these. */
9022 if (GET_CODE (PATTERN (i)) == USE
9023 || GET_CODE (PATTERN (i)) == CLOBBER)
9026 summarize_insn (PATTERN (i), &sum, 0);
9028 if ((sum.defd.i & shadow.defd.i)
9029 || (sum.defd.fp & shadow.defd.fp))
9031 /* (c) would be violated */
9035 /* Combine shadow with summary of current insn: */
9036 shadow.used.i |= sum.used.i;
9037 shadow.used.fp |= sum.used.fp;
9038 shadow.used.mem |= sum.used.mem;
9039 shadow.defd.i |= sum.defd.i;
9040 shadow.defd.fp |= sum.defd.fp;
9041 shadow.defd.mem |= sum.defd.mem;
9043 if ((sum.defd.i & shadow.used.i)
9044 || (sum.defd.fp & shadow.used.fp)
9045 || (sum.defd.mem & shadow.used.mem))
9047 /* (a) would be violated (also takes care of (b)) */
9048 gcc_assert (get_attr_trap (i) != TRAP_YES
9049 || (!(sum.defd.i & sum.used.i)
9050 && !(sum.defd.fp & sum.used.fp)));
9068 n = emit_insn_before (gen_trapb (), i);
9069 PUT_MODE (n, TImode);
9070 PUT_MODE (i, TImode);
9074 shadow.used.mem = 0;
9075 shadow.defd = shadow.used;
9080 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
9081 && NONJUMP_INSN_P (i)
9082 && GET_CODE (PATTERN (i)) != USE
9083 && GET_CODE (PATTERN (i)) != CLOBBER
9084 && get_attr_trap (i) == TRAP_YES)
9086 if (optimize && !trap_pending)
9087 summarize_insn (PATTERN (i), &shadow, 0);
9093 /* Alpha can only issue instruction groups simultaneously if they are
9094 suitably aligned. This is very processor-specific. */
9095 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
9096 that are marked "fake". These instructions do not exist on that target,
9097 but it is possible to see these insns with deranged combinations of
9098 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
9099 choose a result at random. */
9101 enum alphaev4_pipe {
9108 enum alphaev5_pipe {
9119 static enum alphaev4_pipe
9120 alphaev4_insn_pipe (rtx insn)
9122 if (recog_memoized (insn) < 0)
9124 if (get_attr_length (insn) != 4)
9127 switch (get_attr_type (insn))
9143 case TYPE_MVI: /* fake */
9158 case TYPE_FSQRT: /* fake */
9159 case TYPE_FTOI: /* fake */
9160 case TYPE_ITOF: /* fake */
9168 static enum alphaev5_pipe
9169 alphaev5_insn_pipe (rtx insn)
9171 if (recog_memoized (insn) < 0)
9173 if (get_attr_length (insn) != 4)
9176 switch (get_attr_type (insn))
9196 case TYPE_FTOI: /* fake */
9197 case TYPE_ITOF: /* fake */
9212 case TYPE_FSQRT: /* fake */
9223 /* IN_USE is a mask of the slots currently filled within the insn group.
9224 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
9225 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
9227 LEN is, of course, the length of the group in bytes. */
9230 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
9237 || GET_CODE (PATTERN (insn)) == CLOBBER
9238 || GET_CODE (PATTERN (insn)) == USE)
9243 enum alphaev4_pipe pipe;
9245 pipe = alphaev4_insn_pipe (insn);
9249 /* Force complex instructions to start new groups. */
9253 /* If this is a completely unrecognized insn, it's an asm.
9254 We don't know how long it is, so record length as -1 to
9255 signal a needed realignment. */
9256 if (recog_memoized (insn) < 0)
9259 len = get_attr_length (insn);
9263 if (in_use & EV4_IB0)
9265 if (in_use & EV4_IB1)
9270 in_use |= EV4_IB0 | EV4_IBX;
9274 if (in_use & EV4_IB0)
9276 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
9284 if (in_use & EV4_IB1)
9294 /* Haifa doesn't do well scheduling branches. */
9299 insn = next_nonnote_insn (insn);
9301 if (!insn || ! INSN_P (insn))
9304 /* Let Haifa tell us where it thinks insn group boundaries are. */
9305 if (GET_MODE (insn) == TImode)
9308 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9313 insn = next_nonnote_insn (insn);
9321 /* IN_USE is a mask of the slots currently filled within the insn group.
9322 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9323 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9325 LEN is, of course, the length of the group in bytes. */
9328 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
9335 || GET_CODE (PATTERN (insn)) == CLOBBER
9336 || GET_CODE (PATTERN (insn)) == USE)
9341 enum alphaev5_pipe pipe;
9343 pipe = alphaev5_insn_pipe (insn);
9347 /* Force complex instructions to start new groups. */
9351 /* If this is a completely unrecognized insn, it's an asm.
9352 We don't know how long it is, so record length as -1 to
9353 signal a needed realignment. */
9354 if (recog_memoized (insn) < 0)
9357 len = get_attr_length (insn);
9360 /* ??? Most of the places below, we would like to assert never
9361 happen, as it would indicate an error either in Haifa, or
9362 in the scheduling description. Unfortunately, Haifa never
9363 schedules the last instruction of the BB, so we don't have
9364 an accurate TI bit to go off. */
9366 if (in_use & EV5_E0)
9368 if (in_use & EV5_E1)
9373 in_use |= EV5_E0 | EV5_E01;
9377 if (in_use & EV5_E0)
9379 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9387 if (in_use & EV5_E1)
9393 if (in_use & EV5_FA)
9395 if (in_use & EV5_FM)
9400 in_use |= EV5_FA | EV5_FAM;
9404 if (in_use & EV5_FA)
9410 if (in_use & EV5_FM)
9423 /* Haifa doesn't do well scheduling branches. */
9424 /* ??? If this is predicted not-taken, slotting continues, except
9425 that no more IBR, FBR, or JSR insns may be slotted. */
9430 insn = next_nonnote_insn (insn);
9432 if (!insn || ! INSN_P (insn))
9435 /* Let Haifa tell us where it thinks insn group boundaries are. */
9436 if (GET_MODE (insn) == TImode)
9439 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9444 insn = next_nonnote_insn (insn);
9453 alphaev4_next_nop (int *pin_use)
9455 int in_use = *pin_use;
9458 if (!(in_use & EV4_IB0))
9463 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9468 else if (TARGET_FP && !(in_use & EV4_IB1))
9481 alphaev5_next_nop (int *pin_use)
9483 int in_use = *pin_use;
9486 if (!(in_use & EV5_E1))
9491 else if (TARGET_FP && !(in_use & EV5_FA))
9496 else if (TARGET_FP && !(in_use & EV5_FM))
9508 /* The instruction group alignment main loop. */
9511 alpha_align_insns (unsigned int max_align,
9512 rtx (*next_group) (rtx, int *, int *),
9513 rtx (*next_nop) (int *))
9515 /* ALIGN is the known alignment for the insn group. */
9517 /* OFS is the offset of the current insn in the insn group. */
9519 int prev_in_use, in_use, len, ldgp;
9522 /* Let shorten branches care for assigning alignments to code labels. */
9523 shorten_branches (get_insns ());
9525 if (align_functions < 4)
9527 else if ((unsigned int) align_functions < max_align)
9528 align = align_functions;
9532 ofs = prev_in_use = 0;
9535 i = next_nonnote_insn (i);
9537 ldgp = alpha_function_needs_gp ? 8 : 0;
9541 next = (*next_group) (i, &in_use, &len);
9543 /* When we see a label, resync alignment etc. */
9546 unsigned int new_align = 1 << label_to_alignment (i);
9548 if (new_align >= align)
9550 align = new_align < max_align ? new_align : max_align;
9554 else if (ofs & (new_align-1))
9555 ofs = (ofs | (new_align-1)) + 1;
9559 /* Handle complex instructions special. */
9560 else if (in_use == 0)
9562 /* Asms will have length < 0. This is a signal that we have
9563 lost alignment knowledge. Assume, however, that the asm
9564 will not mis-align instructions. */
9573 /* If the known alignment is smaller than the recognized insn group,
9574 realign the output. */
9575 else if ((int) align < len)
9577 unsigned int new_log_align = len > 8 ? 4 : 3;
9580 where = prev = prev_nonnote_insn (i);
9581 if (!where || !LABEL_P (where))
9584 /* Can't realign between a call and its gp reload. */
9585 if (! (TARGET_EXPLICIT_RELOCS
9586 && prev && CALL_P (prev)))
9588 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9589 align = 1 << new_log_align;
9594 /* We may not insert padding inside the initial ldgp sequence. */
9598 /* If the group won't fit in the same INT16 as the previous,
9599 we need to add padding to keep the group together. Rather
9600 than simply leaving the insn filling to the assembler, we
9601 can make use of the knowledge of what sorts of instructions
9602 were issued in the previous group to make sure that all of
9603 the added nops are really free. */
9604 else if (ofs + len > (int) align)
9606 int nop_count = (align - ofs) / 4;
9609 /* Insert nops before labels, branches, and calls to truly merge
9610 the execution of the nops with the previous instruction group. */
9611 where = prev_nonnote_insn (i);
9614 if (LABEL_P (where))
9616 rtx where2 = prev_nonnote_insn (where);
9617 if (where2 && JUMP_P (where2))
9620 else if (NONJUMP_INSN_P (where))
9627 emit_insn_before ((*next_nop)(&prev_in_use), where);
9628 while (--nop_count);
9632 ofs = (ofs + len) & (align - 1);
9633 prev_in_use = in_use;
9638 /* Insert an unop between a noreturn function call and GP load. */
9641 alpha_pad_noreturn (void)
9645 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9648 || !find_reg_note (insn, REG_NORETURN, NULL_RTX))
9651 next = next_active_insn (insn);
9655 rtx pat = PATTERN (next);
9657 if (GET_CODE (pat) == SET
9658 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9659 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9660 emit_insn_after (gen_unop (), insn);
9665 /* Machine dependent reorg pass. */
9670 /* Workaround for a linker error that triggers when an
9671 exception handler immediatelly follows a noreturn function.
9673 The instruction stream from an object file:
9675 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9676 58: 00 00 ba 27 ldah gp,0(ra)
9677 5c: 00 00 bd 23 lda gp,0(gp)
9678 60: 00 00 7d a7 ldq t12,0(gp)
9679 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9681 was converted in the final link pass to:
9683 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9684 fdb28: 00 00 fe 2f unop
9685 fdb2c: 00 00 fe 2f unop
9686 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9687 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9689 GP load instructions were wrongly cleared by the linker relaxation
9690 pass. This workaround prevents removal of GP loads by inserting
9691 an unop instruction between a noreturn function call and
9692 exception handler prologue. */
9694 if (current_function_has_exception_handlers ())
9695 alpha_pad_noreturn ();
9697 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9698 alpha_handle_trap_shadows ();
9700 /* Due to the number of extra trapb insns, don't bother fixing up
9701 alignment when trap precision is instruction. Moreover, we can
9702 only do our job when sched2 is run. */
9703 if (optimize && !optimize_size
9704 && alpha_tp != ALPHA_TP_INSN
9705 && flag_schedule_insns_after_reload)
9707 if (alpha_tune == PROCESSOR_EV4)
9708 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9709 else if (alpha_tune == PROCESSOR_EV5)
9710 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9714 #if !TARGET_ABI_UNICOSMK
9721 alpha_file_start (void)
9723 #ifdef OBJECT_FORMAT_ELF
9724 /* If emitting dwarf2 debug information, we cannot generate a .file
9725 directive to start the file, as it will conflict with dwarf2out
9726 file numbers. So it's only useful when emitting mdebug output. */
9727 targetm.asm_file_start_file_directive = (write_symbols == DBX_DEBUG);
9730 default_file_start ();
9732 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9735 fputs ("\t.set noreorder\n", asm_out_file);
9736 fputs ("\t.set volatile\n", asm_out_file);
9737 if (!TARGET_ABI_OPEN_VMS)
9738 fputs ("\t.set noat\n", asm_out_file);
9739 if (TARGET_EXPLICIT_RELOCS)
9740 fputs ("\t.set nomacro\n", asm_out_file);
9741 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9745 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9747 else if (TARGET_MAX)
9749 else if (TARGET_BWX)
9751 else if (alpha_cpu == PROCESSOR_EV5)
9756 fprintf (asm_out_file, "\t.arch %s\n", arch);
9761 #ifdef OBJECT_FORMAT_ELF
9762 /* Since we don't have a .dynbss section, we should not allow global
9763 relocations in the .rodata section. */
9766 alpha_elf_reloc_rw_mask (void)
9768 return flag_pic ? 3 : 2;
9771 /* Return a section for X. The only special thing we do here is to
9772 honor small data. */
9775 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9776 unsigned HOST_WIDE_INT align)
9778 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9779 /* ??? Consider using mergeable sdata sections. */
9780 return sdata_section;
9782 return default_elf_select_rtx_section (mode, x, align);
9786 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9788 unsigned int flags = 0;
9790 if (strcmp (name, ".sdata") == 0
9791 || strncmp (name, ".sdata.", 7) == 0
9792 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9793 || strcmp (name, ".sbss") == 0
9794 || strncmp (name, ".sbss.", 6) == 0
9795 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9796 flags = SECTION_SMALL;
9798 flags |= default_section_type_flags (decl, name, reloc);
9801 #endif /* OBJECT_FORMAT_ELF */
9803 /* Structure to collect function names for final output in link section. */
9804 /* Note that items marked with GTY can't be ifdef'ed out. */
9806 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9807 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9809 struct GTY(()) alpha_links
9814 enum links_kind lkind;
9815 enum reloc_kind rkind;
9818 struct GTY(()) alpha_funcs
9821 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9825 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9826 splay_tree alpha_links_tree;
9827 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9828 splay_tree alpha_funcs_tree;
9830 static GTY(()) int alpha_funcs_num;
9832 #if TARGET_ABI_OPEN_VMS
9834 /* Return the VMS argument type corresponding to MODE. */
9837 alpha_arg_type (enum machine_mode mode)
9842 return TARGET_FLOAT_VAX ? FF : FS;
9844 return TARGET_FLOAT_VAX ? FD : FT;
9850 /* Return an rtx for an integer representing the VMS Argument Information
9854 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9856 unsigned HOST_WIDE_INT regval = cum.num_args;
9859 for (i = 0; i < 6; i++)
9860 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9862 return GEN_INT (regval);
9865 /* Register the need for a (fake) .linkage entry for calls to function NAME.
9866 IS_LOCAL is 1 if this is for a definition, 0 if this is for a real call.
9867 Return a SYMBOL_REF suited to the call instruction. */
9870 alpha_need_linkage (const char *name, int is_local)
9872 splay_tree_node node;
9873 struct alpha_links *al;
9882 struct alpha_funcs *cfaf;
9884 if (!alpha_funcs_tree)
9885 alpha_funcs_tree = splay_tree_new_ggc
9886 (splay_tree_compare_pointers,
9887 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
9888 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
9891 cfaf = ggc_alloc_alpha_funcs ();
9894 cfaf->num = ++alpha_funcs_num;
9896 splay_tree_insert (alpha_funcs_tree,
9897 (splay_tree_key) current_function_decl,
9898 (splay_tree_value) cfaf);
9901 if (alpha_links_tree)
9903 /* Is this name already defined? */
9905 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9908 al = (struct alpha_links *) node->value;
9911 /* Defined here but external assumed. */
9912 if (al->lkind == KIND_EXTERN)
9913 al->lkind = KIND_LOCAL;
9917 /* Used here but unused assumed. */
9918 if (al->lkind == KIND_UNUSED)
9919 al->lkind = KIND_LOCAL;
9925 alpha_links_tree = splay_tree_new_ggc
9926 ((splay_tree_compare_fn) strcmp,
9927 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9928 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9930 al = ggc_alloc_alpha_links ();
9931 name = ggc_strdup (name);
9933 /* Assume external if no definition. */
9934 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9936 /* Ensure we have an IDENTIFIER so assemble_name can mark it used
9937 and find the ultimate alias target like assemble_name. */
9938 id = get_identifier (name);
9940 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9942 id = TREE_CHAIN (id);
9943 target = IDENTIFIER_POINTER (id);
9946 al->target = target ? target : name;
9947 al->linkage = gen_rtx_SYMBOL_REF (Pmode, name);
9949 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9950 (splay_tree_value) al);
9955 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9956 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9957 this is the reference to the linkage pointer value, 0 if this is the
9958 reference to the function entry value. RFLAG is 1 if this a reduced
9959 reference (code address only), 0 if this is a full reference. */
9962 alpha_use_linkage (rtx func, tree cfundecl, int lflag, int rflag)
9964 splay_tree_node cfunnode;
9965 struct alpha_funcs *cfaf;
9966 struct alpha_links *al;
9967 const char *name = XSTR (func, 0);
9969 cfaf = (struct alpha_funcs *) 0;
9970 al = (struct alpha_links *) 0;
9972 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9973 cfaf = (struct alpha_funcs *) cfunnode->value;
9977 splay_tree_node lnode;
9979 /* Is this name already defined? */
9981 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9983 al = (struct alpha_links *) lnode->value;
9986 cfaf->links = splay_tree_new_ggc
9987 ((splay_tree_compare_fn) strcmp,
9988 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9989 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9996 splay_tree_node node = 0;
9997 struct alpha_links *anl;
10002 name_len = strlen (name);
10003 linksym = (char *) alloca (name_len + 50);
10005 al = ggc_alloc_alpha_links ();
10006 al->num = cfaf->num;
10009 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
10012 anl = (struct alpha_links *) node->value;
10013 al->lkind = anl->lkind;
10014 name = anl->target;
10017 sprintf (linksym, "$%d..%s..lk", cfaf->num, name);
10018 buflen = strlen (linksym);
10020 al->linkage = gen_rtx_SYMBOL_REF
10021 (Pmode, ggc_alloc_string (linksym, buflen + 1));
10023 splay_tree_insert (cfaf->links, (splay_tree_key) name,
10024 (splay_tree_value) al);
10028 al->rkind = KIND_CODEADDR;
10030 al->rkind = KIND_LINKAGE;
10033 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
10035 return al->linkage;
10039 alpha_write_one_linkage (splay_tree_node node, void *data)
10041 const char *const name = (const char *) node->key;
10042 struct alpha_links *link = (struct alpha_links *) node->value;
10043 FILE *stream = (FILE *) data;
10045 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
10046 if (link->rkind == KIND_CODEADDR)
10048 if (link->lkind == KIND_LOCAL)
10050 /* Local and used */
10051 fprintf (stream, "\t.quad %s..en\n", name);
10055 /* External and used, request code address. */
10056 fprintf (stream, "\t.code_address %s\n", name);
10061 if (link->lkind == KIND_LOCAL)
10063 /* Local and used, build linkage pair. */
10064 fprintf (stream, "\t.quad %s..en\n", name);
10065 fprintf (stream, "\t.quad %s\n", name);
10069 /* External and used, request linkage pair. */
10070 fprintf (stream, "\t.linkage %s\n", name);
10078 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
10080 splay_tree_node node;
10081 struct alpha_funcs *func;
10083 fprintf (stream, "\t.link\n");
10084 fprintf (stream, "\t.align 3\n");
10087 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
10088 func = (struct alpha_funcs *) node->value;
10090 fputs ("\t.name ", stream);
10091 assemble_name (stream, funname);
10092 fputs ("..na\n", stream);
10093 ASM_OUTPUT_LABEL (stream, funname);
10094 fprintf (stream, "\t.pdesc ");
10095 assemble_name (stream, funname);
10096 fprintf (stream, "..en,%s\n",
10097 alpha_procedure_type == PT_STACK ? "stack"
10098 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
10102 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
10103 /* splay_tree_delete (func->links); */
10107 /* Switch to an arbitrary section NAME with attributes as specified
10108 by FLAGS. ALIGN specifies any known alignment requirements for
10109 the section; 0 if the default should be used. */
10112 vms_asm_named_section (const char *name, unsigned int flags,
10113 tree decl ATTRIBUTE_UNUSED)
10115 fputc ('\n', asm_out_file);
10116 fprintf (asm_out_file, ".section\t%s", name);
10118 if (flags & SECTION_DEBUG)
10119 fprintf (asm_out_file, ",NOWRT");
10121 fputc ('\n', asm_out_file);
10124 /* Record an element in the table of global constructors. SYMBOL is
10125 a SYMBOL_REF of the function to be called; PRIORITY is a number
10126 between 0 and MAX_INIT_PRIORITY.
10128 Differs from default_ctors_section_asm_out_constructor in that the
10129 width of the .ctors entry is always 64 bits, rather than the 32 bits
10130 used by a normal pointer. */
10133 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10135 switch_to_section (ctors_section);
10136 assemble_align (BITS_PER_WORD);
10137 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
10141 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10143 switch_to_section (dtors_section);
10144 assemble_align (BITS_PER_WORD);
10145 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
10150 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
10151 int is_local ATTRIBUTE_UNUSED)
10157 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
10158 tree cfundecl ATTRIBUTE_UNUSED,
10159 int lflag ATTRIBUTE_UNUSED,
10160 int rflag ATTRIBUTE_UNUSED)
10165 #endif /* TARGET_ABI_OPEN_VMS */
10167 #if TARGET_ABI_UNICOSMK
10169 /* This evaluates to true if we do not know how to pass TYPE solely in
10170 registers. This is the case for all arguments that do not fit in two
10174 unicosmk_must_pass_in_stack (enum machine_mode mode, const_tree type)
10179 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10181 if (TREE_ADDRESSABLE (type))
10184 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
10187 /* Define the offset between two registers, one to be eliminated, and the
10188 other its replacement, at the start of a routine. */
10191 unicosmk_initial_elimination_offset (int from, int to)
10195 fixed_size = alpha_sa_size();
10196 if (fixed_size != 0)
10199 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10200 return -fixed_size;
10201 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10203 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
10204 return (ALPHA_ROUND (crtl->outgoing_args_size)
10205 + ALPHA_ROUND (get_frame_size()));
10206 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
10207 return (ALPHA_ROUND (fixed_size)
10208 + ALPHA_ROUND (get_frame_size()
10209 + crtl->outgoing_args_size));
10211 gcc_unreachable ();
10214 /* Output the module name for .ident and .end directives. We have to strip
10215 directories and add make sure that the module name starts with a letter
10219 unicosmk_output_module_name (FILE *file)
10221 const char *name = lbasename (main_input_filename);
10222 unsigned len = strlen (name);
10223 char *clean_name = alloca (len + 2);
10224 char *ptr = clean_name;
10226 /* CAM only accepts module names that start with a letter or '$'. We
10227 prefix the module name with a '$' if necessary. */
10229 if (!ISALPHA (*name))
10231 memcpy (ptr, name, len + 1);
10232 clean_symbol_name (clean_name);
10233 fputs (clean_name, file);
10236 /* Output the definition of a common variable. */
10239 unicosmk_output_common (FILE *file, const char *name, int size, int align)
10242 printf ("T3E__: common %s\n", name);
10245 fputs("\t.endp\n\n\t.psect ", file);
10246 assemble_name(file, name);
10247 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
10248 fprintf(file, "\t.byte\t0:%d\n", size);
10250 /* Mark the symbol as defined in this module. */
10251 name_tree = get_identifier (name);
10252 TREE_ASM_WRITTEN (name_tree) = 1;
10255 #define SECTION_PUBLIC SECTION_MACH_DEP
10256 #define SECTION_MAIN (SECTION_PUBLIC << 1)
10257 static int current_section_align;
10259 /* A get_unnamed_section callback for switching to the text section. */
10262 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
10264 static int count = 0;
10265 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
10268 /* A get_unnamed_section callback for switching to the data section. */
10271 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
10273 static int count = 1;
10274 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
10277 /* Implement TARGET_ASM_INIT_SECTIONS.
10279 The Cray assembler is really weird with respect to sections. It has only
10280 named sections and you can't reopen a section once it has been closed.
10281 This means that we have to generate unique names whenever we want to
10282 reenter the text or the data section. */
10285 unicosmk_init_sections (void)
10287 text_section = get_unnamed_section (SECTION_CODE,
10288 unicosmk_output_text_section_asm_op,
10290 data_section = get_unnamed_section (SECTION_WRITE,
10291 unicosmk_output_data_section_asm_op,
10293 readonly_data_section = data_section;
10296 static unsigned int
10297 unicosmk_section_type_flags (tree decl, const char *name,
10298 int reloc ATTRIBUTE_UNUSED)
10300 unsigned int flags = default_section_type_flags (decl, name, reloc);
10305 if (TREE_CODE (decl) == FUNCTION_DECL)
10307 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10308 if (align_functions_log > current_section_align)
10309 current_section_align = align_functions_log;
10311 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
10312 flags |= SECTION_MAIN;
10315 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
10317 if (TREE_PUBLIC (decl))
10318 flags |= SECTION_PUBLIC;
10323 /* Generate a section name for decl and associate it with the
10327 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
10334 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10335 name = default_strip_name_encoding (name);
10336 len = strlen (name);
10338 if (TREE_CODE (decl) == FUNCTION_DECL)
10342 /* It is essential that we prefix the section name here because
10343 otherwise the section names generated for constructors and
10344 destructors confuse collect2. */
10346 string = alloca (len + 6);
10347 sprintf (string, "code@%s", name);
10348 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10350 else if (TREE_PUBLIC (decl))
10351 DECL_SECTION_NAME (decl) = build_string (len, name);
10356 string = alloca (len + 6);
10357 sprintf (string, "data@%s", name);
10358 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10362 /* Switch to an arbitrary section NAME with attributes as specified
10363 by FLAGS. ALIGN specifies any known alignment requirements for
10364 the section; 0 if the default should be used. */
10367 unicosmk_asm_named_section (const char *name, unsigned int flags,
10368 tree decl ATTRIBUTE_UNUSED)
10372 /* Close the previous section. */
10374 fputs ("\t.endp\n\n", asm_out_file);
10376 /* Find out what kind of section we are opening. */
10378 if (flags & SECTION_MAIN)
10379 fputs ("\t.start\tmain\n", asm_out_file);
10381 if (flags & SECTION_CODE)
10383 else if (flags & SECTION_PUBLIC)
10388 if (current_section_align != 0)
10389 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
10390 current_section_align, kind);
10392 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
10396 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
10399 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
10400 unicosmk_unique_section (decl, 0);
10403 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
10404 in code sections because .align fill unused space with zeroes. */
10407 unicosmk_output_align (FILE *file, int align)
10409 if (inside_function)
10410 fprintf (file, "\tgcc@code@align\t%d\n", align);
10412 fprintf (file, "\t.align\t%d\n", align);
10415 /* Add a case vector to the current function's list of deferred case
10416 vectors. Case vectors have to be put into a separate section because CAM
10417 does not allow data definitions in code sections. */
10420 unicosmk_defer_case_vector (rtx lab, rtx vec)
10422 struct machine_function *machine = cfun->machine;
10424 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10425 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10426 machine->addr_list);
10429 /* Output a case vector. */
10432 unicosmk_output_addr_vec (FILE *file, rtx vec)
10434 rtx lab = XEXP (vec, 0);
10435 rtx body = XEXP (vec, 1);
10436 int vlen = XVECLEN (body, 0);
10439 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10441 for (idx = 0; idx < vlen; idx++)
10443 ASM_OUTPUT_ADDR_VEC_ELT
10444 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10448 /* Output current function's deferred case vectors. */
10451 unicosmk_output_deferred_case_vectors (FILE *file)
10453 struct machine_function *machine = cfun->machine;
10456 if (machine->addr_list == NULL_RTX)
10459 switch_to_section (data_section);
10460 for (t = machine->addr_list; t; t = XEXP (t, 1))
10461 unicosmk_output_addr_vec (file, XEXP (t, 0));
10464 /* Generate the name of the SSIB section for the current function. */
10466 #define SSIB_PREFIX "__SSIB_"
10467 #define SSIB_PREFIX_LEN 7
10469 static const char *
10470 unicosmk_ssib_name (void)
10472 /* This is ok since CAM won't be able to deal with names longer than that
10475 static char name[256];
10478 const char *fnname;
10481 x = DECL_RTL (cfun->decl);
10482 gcc_assert (MEM_P (x));
10484 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10485 fnname = XSTR (x, 0);
10487 len = strlen (fnname);
10488 if (len + SSIB_PREFIX_LEN > 255)
10489 len = 255 - SSIB_PREFIX_LEN;
10491 strcpy (name, SSIB_PREFIX);
10492 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10493 name[len + SSIB_PREFIX_LEN] = 0;
10498 /* Set up the dynamic subprogram information block (DSIB) and update the
10499 frame pointer register ($15) for subroutines which have a frame. If the
10500 subroutine doesn't have a frame, simply increment $15. */
10503 unicosmk_gen_dsib (unsigned long *imaskP)
10505 if (alpha_procedure_type == PT_STACK)
10507 const char *ssib_name;
10510 /* Allocate 64 bytes for the DSIB. */
10512 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10514 emit_insn (gen_blockage ());
10516 /* Save the return address. */
10518 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10519 set_mem_alias_set (mem, alpha_sr_alias_set);
10520 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10521 (*imaskP) &= ~(1UL << REG_RA);
10523 /* Save the old frame pointer. */
10525 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10526 set_mem_alias_set (mem, alpha_sr_alias_set);
10527 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10528 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10530 emit_insn (gen_blockage ());
10532 /* Store the SSIB pointer. */
10534 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10535 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10536 set_mem_alias_set (mem, alpha_sr_alias_set);
10538 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10539 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10540 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10542 /* Save the CIW index. */
10544 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10545 set_mem_alias_set (mem, alpha_sr_alias_set);
10546 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10548 emit_insn (gen_blockage ());
10550 /* Set the new frame pointer. */
10551 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10552 stack_pointer_rtx, GEN_INT (64))));
10556 /* Increment the frame pointer register to indicate that we do not
10558 emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10559 hard_frame_pointer_rtx, const1_rtx));
10563 /* Output the static subroutine information block for the current
10567 unicosmk_output_ssib (FILE *file, const char *fnname)
10573 struct machine_function *machine = cfun->machine;
10576 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10577 unicosmk_ssib_name ());
10579 /* Some required stuff and the function name length. */
10581 len = strlen (fnname);
10582 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10585 ??? We don't do that yet. */
10587 fputs ("\t.quad\t0\n", file);
10589 /* Function address. */
10591 fputs ("\t.quad\t", file);
10592 assemble_name (file, fnname);
10595 fputs ("\t.quad\t0\n", file);
10596 fputs ("\t.quad\t0\n", file);
10599 ??? We do it the same way Cray CC does it but this could be
10602 for( i = 0; i < len; i++ )
10603 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10604 if( (len % 8) == 0 )
10605 fputs ("\t.quad\t0\n", file);
10607 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10609 /* All call information words used in the function. */
10611 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10614 #if HOST_BITS_PER_WIDE_INT == 32
10615 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10616 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10618 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10623 /* Add a call information word (CIW) to the list of the current function's
10624 CIWs and return its index.
10626 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10629 unicosmk_add_call_info_word (rtx x)
10632 struct machine_function *machine = cfun->machine;
10634 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10635 if (machine->first_ciw == NULL_RTX)
10636 machine->first_ciw = node;
10638 XEXP (machine->last_ciw, 1) = node;
10640 machine->last_ciw = node;
10641 ++machine->ciw_count;
10643 return GEN_INT (machine->ciw_count
10644 + strlen (current_function_name ())/8 + 5);
10647 /* The Cray assembler doesn't accept extern declarations for symbols which
10648 are defined in the same file. We have to keep track of all global
10649 symbols which are referenced and/or defined in a source file and output
10650 extern declarations for those which are referenced but not defined at
10651 the end of file. */
10653 /* List of identifiers for which an extern declaration might have to be
10655 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10657 struct unicosmk_extern_list
10659 struct unicosmk_extern_list *next;
10663 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10665 /* Output extern declarations which are required for every asm file. */
10668 unicosmk_output_default_externs (FILE *file)
10670 static const char *const externs[] =
10671 { "__T3E_MISMATCH" };
10676 n = ARRAY_SIZE (externs);
10678 for (i = 0; i < n; i++)
10679 fprintf (file, "\t.extern\t%s\n", externs[i]);
10682 /* Output extern declarations for global symbols which are have been
10683 referenced but not defined. */
10686 unicosmk_output_externs (FILE *file)
10688 struct unicosmk_extern_list *p;
10689 const char *real_name;
10693 len = strlen (user_label_prefix);
10694 for (p = unicosmk_extern_head; p != 0; p = p->next)
10696 /* We have to strip the encoding and possibly remove user_label_prefix
10697 from the identifier in order to handle -fleading-underscore and
10698 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10699 real_name = default_strip_name_encoding (p->name);
10700 if (len && p->name[0] == '*'
10701 && !memcmp (real_name, user_label_prefix, len))
10704 name_tree = get_identifier (real_name);
10705 if (! TREE_ASM_WRITTEN (name_tree))
10707 TREE_ASM_WRITTEN (name_tree) = 1;
10708 fputs ("\t.extern\t", file);
10709 assemble_name (file, p->name);
10715 /* Record an extern. */
10718 unicosmk_add_extern (const char *name)
10720 struct unicosmk_extern_list *p;
10722 p = (struct unicosmk_extern_list *)
10723 xmalloc (sizeof (struct unicosmk_extern_list));
10724 p->next = unicosmk_extern_head;
10726 unicosmk_extern_head = p;
10729 /* The Cray assembler generates incorrect code if identifiers which
10730 conflict with register names are used as instruction operands. We have
10731 to replace such identifiers with DEX expressions. */
10733 /* Structure to collect identifiers which have been replaced by DEX
10735 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10737 struct unicosmk_dex {
10738 struct unicosmk_dex *next;
10742 /* List of identifiers which have been replaced by DEX expressions. The DEX
10743 number is determined by the position in the list. */
10745 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10747 /* The number of elements in the DEX list. */
10749 static int unicosmk_dex_count = 0;
10751 /* Check if NAME must be replaced by a DEX expression. */
10754 unicosmk_special_name (const char *name)
10756 if (name[0] == '*')
10759 if (name[0] == '$')
10762 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10767 case '1': case '2':
10768 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10771 return (name[2] == '\0'
10772 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10775 return (ISDIGIT (name[1]) && name[2] == '\0');
10779 /* Return the DEX number if X must be replaced by a DEX expression and 0
10783 unicosmk_need_dex (rtx x)
10785 struct unicosmk_dex *dex;
10789 if (GET_CODE (x) != SYMBOL_REF)
10793 if (! unicosmk_special_name (name))
10796 i = unicosmk_dex_count;
10797 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10799 if (! strcmp (name, dex->name))
10804 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10806 dex->next = unicosmk_dex_list;
10807 unicosmk_dex_list = dex;
10809 ++unicosmk_dex_count;
10810 return unicosmk_dex_count;
10813 /* Output the DEX definitions for this file. */
10816 unicosmk_output_dex (FILE *file)
10818 struct unicosmk_dex *dex;
10821 if (unicosmk_dex_list == NULL)
10824 fprintf (file, "\t.dexstart\n");
10826 i = unicosmk_dex_count;
10827 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10829 fprintf (file, "\tDEX (%d) = ", i);
10830 assemble_name (file, dex->name);
10835 fprintf (file, "\t.dexend\n");
10838 /* Output text that to appear at the beginning of an assembler file. */
10841 unicosmk_file_start (void)
10845 fputs ("\t.ident\t", asm_out_file);
10846 unicosmk_output_module_name (asm_out_file);
10847 fputs ("\n\n", asm_out_file);
10849 /* The Unicos/Mk assembler uses different register names. Instead of trying
10850 to support them, we simply use micro definitions. */
10852 /* CAM has different register names: rN for the integer register N and fN
10853 for the floating-point register N. Instead of trying to use these in
10854 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10857 for (i = 0; i < 32; ++i)
10858 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10860 for (i = 0; i < 32; ++i)
10861 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10863 putc ('\n', asm_out_file);
10865 /* The .align directive fill unused space with zeroes which does not work
10866 in code sections. We define the macro 'gcc@code@align' which uses nops
10867 instead. Note that it assumes that code sections always have the
10868 biggest possible alignment since . refers to the current offset from
10869 the beginning of the section. */
10871 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10872 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10873 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10874 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10875 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10876 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10877 fputs ("\t.endr\n", asm_out_file);
10878 fputs ("\t.endif\n", asm_out_file);
10879 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10881 /* Output extern declarations which should always be visible. */
10882 unicosmk_output_default_externs (asm_out_file);
10884 /* Open a dummy section. We always need to be inside a section for the
10885 section-switching code to work correctly.
10886 ??? This should be a module id or something like that. I still have to
10887 figure out what the rules for those are. */
10888 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10891 /* Output text to appear at the end of an assembler file. This includes all
10892 pending extern declarations and DEX expressions. */
10895 unicosmk_file_end (void)
10897 fputs ("\t.endp\n\n", asm_out_file);
10899 /* Output all pending externs. */
10901 unicosmk_output_externs (asm_out_file);
10903 /* Output dex definitions used for functions whose names conflict with
10906 unicosmk_output_dex (asm_out_file);
10908 fputs ("\t.end\t", asm_out_file);
10909 unicosmk_output_module_name (asm_out_file);
10910 putc ('\n', asm_out_file);
10916 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10920 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10924 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10925 const char * fnname ATTRIBUTE_UNUSED)
10929 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10935 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10940 #endif /* TARGET_ABI_UNICOSMK */
10943 alpha_init_libfuncs (void)
10945 if (TARGET_ABI_UNICOSMK)
10947 /* Prevent gcc from generating calls to __divsi3. */
10948 set_optab_libfunc (sdiv_optab, SImode, 0);
10949 set_optab_libfunc (udiv_optab, SImode, 0);
10951 /* Use the functions provided by the system library
10952 for DImode integer division. */
10953 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10954 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10956 else if (TARGET_ABI_OPEN_VMS)
10958 /* Use the VMS runtime library functions for division and
10960 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10961 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10962 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10963 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10964 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10965 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10966 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10967 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10968 abort_libfunc = init_one_libfunc ("decc$abort");
10969 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
10970 #ifdef MEM_LIBFUNCS_INIT
10976 /* On the Alpha, we use this to disable the floating-point registers
10977 when they don't exist. */
10980 alpha_conditional_register_usage (void)
10983 if (! TARGET_FPREGS)
10984 for (i = 32; i < 63; i++)
10985 fixed_regs[i] = call_used_regs[i] = 1;
10988 /* Initialize the GCC target structure. */
10989 #if TARGET_ABI_OPEN_VMS
10990 # undef TARGET_ATTRIBUTE_TABLE
10991 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10992 # undef TARGET_CAN_ELIMINATE
10993 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
10996 #undef TARGET_IN_SMALL_DATA_P
10997 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10999 #if TARGET_ABI_UNICOSMK
11000 # undef TARGET_INSERT_ATTRIBUTES
11001 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
11002 # undef TARGET_SECTION_TYPE_FLAGS
11003 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
11004 # undef TARGET_ASM_UNIQUE_SECTION
11005 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
11006 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
11007 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
11008 # undef TARGET_ASM_GLOBALIZE_LABEL
11009 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
11010 # undef TARGET_MUST_PASS_IN_STACK
11011 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
11014 #undef TARGET_ASM_ALIGNED_HI_OP
11015 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
11016 #undef TARGET_ASM_ALIGNED_DI_OP
11017 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
11019 /* Default unaligned ops are provided for ELF systems. To get unaligned
11020 data for non-ELF systems, we have to turn off auto alignment. */
11021 #if !defined (OBJECT_FORMAT_ELF) || TARGET_ABI_OPEN_VMS
11022 #undef TARGET_ASM_UNALIGNED_HI_OP
11023 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
11024 #undef TARGET_ASM_UNALIGNED_SI_OP
11025 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
11026 #undef TARGET_ASM_UNALIGNED_DI_OP
11027 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
11030 #ifdef OBJECT_FORMAT_ELF
11031 #undef TARGET_ASM_RELOC_RW_MASK
11032 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
11033 #undef TARGET_ASM_SELECT_RTX_SECTION
11034 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
11035 #undef TARGET_SECTION_TYPE_FLAGS
11036 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
11039 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
11040 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
11042 #undef TARGET_INIT_LIBFUNCS
11043 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
11045 #undef TARGET_LEGITIMIZE_ADDRESS
11046 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
11048 #if TARGET_ABI_UNICOSMK
11049 #undef TARGET_ASM_FILE_START
11050 #define TARGET_ASM_FILE_START unicosmk_file_start
11051 #undef TARGET_ASM_FILE_END
11052 #define TARGET_ASM_FILE_END unicosmk_file_end
11054 #undef TARGET_ASM_FILE_START
11055 #define TARGET_ASM_FILE_START alpha_file_start
11056 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
11057 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
11060 #undef TARGET_SCHED_ADJUST_COST
11061 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
11062 #undef TARGET_SCHED_ISSUE_RATE
11063 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
11064 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
11065 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
11066 alpha_multipass_dfa_lookahead
11068 #undef TARGET_HAVE_TLS
11069 #define TARGET_HAVE_TLS HAVE_AS_TLS
11071 #undef TARGET_BUILTIN_DECL
11072 #define TARGET_BUILTIN_DECL alpha_builtin_decl
11073 #undef TARGET_INIT_BUILTINS
11074 #define TARGET_INIT_BUILTINS alpha_init_builtins
11075 #undef TARGET_EXPAND_BUILTIN
11076 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
11077 #undef TARGET_FOLD_BUILTIN
11078 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
11080 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
11081 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
11082 #undef TARGET_CANNOT_COPY_INSN_P
11083 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
11084 #undef TARGET_CANNOT_FORCE_CONST_MEM
11085 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
11088 #undef TARGET_ASM_OUTPUT_MI_THUNK
11089 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
11090 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
11091 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
11092 #undef TARGET_STDARG_OPTIMIZE_HOOK
11093 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
11096 #undef TARGET_RTX_COSTS
11097 #define TARGET_RTX_COSTS alpha_rtx_costs
11098 #undef TARGET_ADDRESS_COST
11099 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
11101 #undef TARGET_MACHINE_DEPENDENT_REORG
11102 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
11104 #undef TARGET_PROMOTE_FUNCTION_MODE
11105 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
11106 #undef TARGET_PROMOTE_PROTOTYPES
11107 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
11108 #undef TARGET_RETURN_IN_MEMORY
11109 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
11110 #undef TARGET_PASS_BY_REFERENCE
11111 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
11112 #undef TARGET_SETUP_INCOMING_VARARGS
11113 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
11114 #undef TARGET_STRICT_ARGUMENT_NAMING
11115 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
11116 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
11117 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
11118 #undef TARGET_SPLIT_COMPLEX_ARG
11119 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
11120 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
11121 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
11122 #undef TARGET_ARG_PARTIAL_BYTES
11123 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
11124 #undef TARGET_FUNCTION_ARG
11125 #define TARGET_FUNCTION_ARG alpha_function_arg
11126 #undef TARGET_FUNCTION_ARG_ADVANCE
11127 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
11128 #undef TARGET_TRAMPOLINE_INIT
11129 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
11131 #undef TARGET_SECONDARY_RELOAD
11132 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
11134 #undef TARGET_SCALAR_MODE_SUPPORTED_P
11135 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
11136 #undef TARGET_VECTOR_MODE_SUPPORTED_P
11137 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
11139 #undef TARGET_BUILD_BUILTIN_VA_LIST
11140 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
11142 #undef TARGET_EXPAND_BUILTIN_VA_START
11143 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
11145 /* The Alpha architecture does not require sequential consistency. See
11146 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
11147 for an example of how it can be violated in practice. */
11148 #undef TARGET_RELAXED_ORDERING
11149 #define TARGET_RELAXED_ORDERING true
11151 #undef TARGET_DEFAULT_TARGET_FLAGS
11152 #define TARGET_DEFAULT_TARGET_FLAGS \
11153 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
11154 #undef TARGET_HANDLE_OPTION
11155 #define TARGET_HANDLE_OPTION alpha_handle_option
11157 #undef TARGET_OPTION_OVERRIDE
11158 #define TARGET_OPTION_OVERRIDE alpha_option_override
11160 #undef TARGET_OPTION_OPTIMIZATION_TABLE
11161 #define TARGET_OPTION_OPTIMIZATION_TABLE alpha_option_optimization_table
11163 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
11164 #undef TARGET_MANGLE_TYPE
11165 #define TARGET_MANGLE_TYPE alpha_mangle_type
11168 #undef TARGET_LEGITIMATE_ADDRESS_P
11169 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
11171 #undef TARGET_CONDITIONAL_REGISTER_USAGE
11172 #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
11174 struct gcc_target targetm = TARGET_INITIALIZER;
11177 #include "gt-alpha.h"