1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-attr.h"
46 #include "integrate.h"
49 #include "target-def.h"
51 #include "langhooks.h"
52 #include <splay-tree.h>
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
55 #include "tree-flow.h"
56 #include "tree-stdarg.h"
57 #include "tm-constrs.h"
60 /* Specify which cpu to schedule for. */
61 enum processor_type alpha_tune;
63 /* Which cpu we're generating code for. */
64 enum processor_type alpha_cpu;
66 static const char * const alpha_cpu_name[] =
71 /* Specify how accurate floating-point traps need to be. */
73 enum alpha_trap_precision alpha_tp;
75 /* Specify the floating-point rounding mode. */
77 enum alpha_fp_rounding_mode alpha_fprm;
79 /* Specify which things cause traps. */
81 enum alpha_fp_trap_mode alpha_fptm;
83 /* Save information from a "cmpxx" operation until the branch or scc is
86 struct alpha_compare alpha_compare;
88 /* Nonzero if inside of a function, because the Alpha asm can't
89 handle .files inside of functions. */
91 static int inside_function = FALSE;
93 /* The number of cycles of latency we should assume on memory reads. */
95 int alpha_memory_latency = 3;
97 /* Whether the function needs the GP. */
99 static int alpha_function_needs_gp;
101 /* The alias set for prologue/epilogue register save/restore. */
103 static GTY(()) alias_set_type alpha_sr_alias_set;
105 /* The assembler name of the current function. */
107 static const char *alpha_fnname;
109 /* The next explicit relocation sequence number. */
110 extern GTY(()) int alpha_next_sequence_number;
111 int alpha_next_sequence_number = 1;
113 /* The literal and gpdisp sequence numbers for this insn, as printed
114 by %# and %* respectively. */
115 extern GTY(()) int alpha_this_literal_sequence_number;
116 extern GTY(()) int alpha_this_gpdisp_sequence_number;
117 int alpha_this_literal_sequence_number;
118 int alpha_this_gpdisp_sequence_number;
120 /* Costs of various operations on the different architectures. */
122 struct alpha_rtx_cost_data
124 unsigned char fp_add;
125 unsigned char fp_mult;
126 unsigned char fp_div_sf;
127 unsigned char fp_div_df;
128 unsigned char int_mult_si;
129 unsigned char int_mult_di;
130 unsigned char int_shift;
131 unsigned char int_cmov;
132 unsigned short int_div;
135 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
138 COSTS_N_INSNS (6), /* fp_add */
139 COSTS_N_INSNS (6), /* fp_mult */
140 COSTS_N_INSNS (34), /* fp_div_sf */
141 COSTS_N_INSNS (63), /* fp_div_df */
142 COSTS_N_INSNS (23), /* int_mult_si */
143 COSTS_N_INSNS (23), /* int_mult_di */
144 COSTS_N_INSNS (2), /* int_shift */
145 COSTS_N_INSNS (2), /* int_cmov */
146 COSTS_N_INSNS (97), /* int_div */
149 COSTS_N_INSNS (4), /* fp_add */
150 COSTS_N_INSNS (4), /* fp_mult */
151 COSTS_N_INSNS (15), /* fp_div_sf */
152 COSTS_N_INSNS (22), /* fp_div_df */
153 COSTS_N_INSNS (8), /* int_mult_si */
154 COSTS_N_INSNS (12), /* int_mult_di */
155 COSTS_N_INSNS (1) + 1, /* int_shift */
156 COSTS_N_INSNS (1), /* int_cmov */
157 COSTS_N_INSNS (83), /* int_div */
160 COSTS_N_INSNS (4), /* fp_add */
161 COSTS_N_INSNS (4), /* fp_mult */
162 COSTS_N_INSNS (12), /* fp_div_sf */
163 COSTS_N_INSNS (15), /* fp_div_df */
164 COSTS_N_INSNS (7), /* int_mult_si */
165 COSTS_N_INSNS (7), /* int_mult_di */
166 COSTS_N_INSNS (1), /* int_shift */
167 COSTS_N_INSNS (2), /* int_cmov */
168 COSTS_N_INSNS (86), /* int_div */
172 /* Similar but tuned for code size instead of execution latency. The
173 extra +N is fractional cost tuning based on latency. It's used to
174 encourage use of cheaper insns like shift, but only if there's just
177 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
179 COSTS_N_INSNS (1), /* fp_add */
180 COSTS_N_INSNS (1), /* fp_mult */
181 COSTS_N_INSNS (1), /* fp_div_sf */
182 COSTS_N_INSNS (1) + 1, /* fp_div_df */
183 COSTS_N_INSNS (1) + 1, /* int_mult_si */
184 COSTS_N_INSNS (1) + 2, /* int_mult_di */
185 COSTS_N_INSNS (1), /* int_shift */
186 COSTS_N_INSNS (1), /* int_cmov */
187 COSTS_N_INSNS (6), /* int_div */
190 /* Get the number of args of a function in one of two ways. */
191 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
192 #define NUM_ARGS current_function_args_info.num_args
194 #define NUM_ARGS current_function_args_info
200 /* Declarations of static functions. */
201 static struct machine_function *alpha_init_machine_status (void);
202 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
204 #if TARGET_ABI_OPEN_VMS
205 static void alpha_write_linkage (FILE *, const char *, tree);
208 static void unicosmk_output_deferred_case_vectors (FILE *);
209 static void unicosmk_gen_dsib (unsigned long *);
210 static void unicosmk_output_ssib (FILE *, const char *);
211 static int unicosmk_need_dex (rtx);
213 /* Implement TARGET_HANDLE_OPTION. */
216 alpha_handle_option (size_t code, const char *arg, int value)
222 target_flags |= MASK_SOFT_FP;
226 case OPT_mieee_with_inexact:
227 target_flags |= MASK_IEEE_CONFORMANT;
231 if (value != 16 && value != 32 && value != 64)
232 error ("bad value %qs for -mtls-size switch", arg);
239 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
240 /* Implement TARGET_MANGLE_TYPE. */
243 alpha_mangle_type (const_tree type)
245 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
246 && TARGET_LONG_DOUBLE_128)
249 /* For all other types, use normal C++ mangling. */
254 /* Parse target option strings. */
257 override_options (void)
259 static const struct cpu_table {
260 const char *const name;
261 const enum processor_type processor;
264 { "ev4", PROCESSOR_EV4, 0 },
265 { "ev45", PROCESSOR_EV4, 0 },
266 { "21064", PROCESSOR_EV4, 0 },
267 { "ev5", PROCESSOR_EV5, 0 },
268 { "21164", PROCESSOR_EV5, 0 },
269 { "ev56", PROCESSOR_EV5, MASK_BWX },
270 { "21164a", PROCESSOR_EV5, MASK_BWX },
271 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
272 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
274 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
275 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
276 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
277 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
283 /* Unicos/Mk doesn't have shared libraries. */
284 if (TARGET_ABI_UNICOSMK && flag_pic)
286 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
287 (flag_pic > 1) ? "PIC" : "pic");
291 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
292 floating-point instructions. Make that the default for this target. */
293 if (TARGET_ABI_UNICOSMK)
294 alpha_fprm = ALPHA_FPRM_DYN;
296 alpha_fprm = ALPHA_FPRM_NORM;
298 alpha_tp = ALPHA_TP_PROG;
299 alpha_fptm = ALPHA_FPTM_N;
301 /* We cannot use su and sui qualifiers for conversion instructions on
302 Unicos/Mk. I'm not sure if this is due to assembler or hardware
303 limitations. Right now, we issue a warning if -mieee is specified
304 and then ignore it; eventually, we should either get it right or
305 disable the option altogether. */
309 if (TARGET_ABI_UNICOSMK)
310 warning (0, "-mieee not supported on Unicos/Mk");
313 alpha_tp = ALPHA_TP_INSN;
314 alpha_fptm = ALPHA_FPTM_SU;
318 if (TARGET_IEEE_WITH_INEXACT)
320 if (TARGET_ABI_UNICOSMK)
321 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
324 alpha_tp = ALPHA_TP_INSN;
325 alpha_fptm = ALPHA_FPTM_SUI;
331 if (! strcmp (alpha_tp_string, "p"))
332 alpha_tp = ALPHA_TP_PROG;
333 else if (! strcmp (alpha_tp_string, "f"))
334 alpha_tp = ALPHA_TP_FUNC;
335 else if (! strcmp (alpha_tp_string, "i"))
336 alpha_tp = ALPHA_TP_INSN;
338 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
341 if (alpha_fprm_string)
343 if (! strcmp (alpha_fprm_string, "n"))
344 alpha_fprm = ALPHA_FPRM_NORM;
345 else if (! strcmp (alpha_fprm_string, "m"))
346 alpha_fprm = ALPHA_FPRM_MINF;
347 else if (! strcmp (alpha_fprm_string, "c"))
348 alpha_fprm = ALPHA_FPRM_CHOP;
349 else if (! strcmp (alpha_fprm_string,"d"))
350 alpha_fprm = ALPHA_FPRM_DYN;
352 error ("bad value %qs for -mfp-rounding-mode switch",
356 if (alpha_fptm_string)
358 if (strcmp (alpha_fptm_string, "n") == 0)
359 alpha_fptm = ALPHA_FPTM_N;
360 else if (strcmp (alpha_fptm_string, "u") == 0)
361 alpha_fptm = ALPHA_FPTM_U;
362 else if (strcmp (alpha_fptm_string, "su") == 0)
363 alpha_fptm = ALPHA_FPTM_SU;
364 else if (strcmp (alpha_fptm_string, "sui") == 0)
365 alpha_fptm = ALPHA_FPTM_SUI;
367 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
370 if (alpha_cpu_string)
372 for (i = 0; cpu_table [i].name; i++)
373 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
375 alpha_tune = alpha_cpu = cpu_table [i].processor;
376 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
377 target_flags |= cpu_table [i].flags;
380 if (! cpu_table [i].name)
381 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
384 if (alpha_tune_string)
386 for (i = 0; cpu_table [i].name; i++)
387 if (! strcmp (alpha_tune_string, cpu_table [i].name))
389 alpha_tune = cpu_table [i].processor;
392 if (! cpu_table [i].name)
393 error ("bad value %qs for -mcpu switch", alpha_tune_string);
396 /* Do some sanity checks on the above options. */
398 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
400 warning (0, "trap mode not supported on Unicos/Mk");
401 alpha_fptm = ALPHA_FPTM_N;
404 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
405 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
407 warning (0, "fp software completion requires -mtrap-precision=i");
408 alpha_tp = ALPHA_TP_INSN;
411 if (alpha_cpu == PROCESSOR_EV6)
413 /* Except for EV6 pass 1 (not released), we always have precise
414 arithmetic traps. Which means we can do software completion
415 without minding trap shadows. */
416 alpha_tp = ALPHA_TP_PROG;
419 if (TARGET_FLOAT_VAX)
421 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
423 warning (0, "rounding mode not supported for VAX floats");
424 alpha_fprm = ALPHA_FPRM_NORM;
426 if (alpha_fptm == ALPHA_FPTM_SUI)
428 warning (0, "trap mode not supported for VAX floats");
429 alpha_fptm = ALPHA_FPTM_SU;
431 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
432 warning (0, "128-bit long double not supported for VAX floats");
433 target_flags &= ~MASK_LONG_DOUBLE_128;
440 if (!alpha_mlat_string)
441 alpha_mlat_string = "L1";
443 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
444 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
446 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
447 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
448 && alpha_mlat_string[2] == '\0')
450 static int const cache_latency[][4] =
452 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
453 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
454 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
457 lat = alpha_mlat_string[1] - '0';
458 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
460 warning (0, "L%d cache latency unknown for %s",
461 lat, alpha_cpu_name[alpha_tune]);
465 lat = cache_latency[alpha_tune][lat-1];
467 else if (! strcmp (alpha_mlat_string, "main"))
469 /* Most current memories have about 370ns latency. This is
470 a reasonable guess for a fast cpu. */
475 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
479 alpha_memory_latency = lat;
482 /* Default the definition of "small data" to 8 bytes. */
486 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
488 target_flags |= MASK_SMALL_DATA;
489 else if (flag_pic == 2)
490 target_flags &= ~MASK_SMALL_DATA;
492 /* Align labels and loops for optimal branching. */
493 /* ??? Kludge these by not doing anything if we don't optimize and also if
494 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
495 if (optimize > 0 && write_symbols != SDB_DEBUG)
497 if (align_loops <= 0)
499 if (align_jumps <= 0)
502 if (align_functions <= 0)
503 align_functions = 16;
505 /* Acquire a unique set number for our register saves and restores. */
506 alpha_sr_alias_set = new_alias_set ();
508 /* Register variables and functions with the garbage collector. */
510 /* Set up function hooks. */
511 init_machine_status = alpha_init_machine_status;
513 /* Tell the compiler when we're using VAX floating point. */
514 if (TARGET_FLOAT_VAX)
516 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
517 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
518 REAL_MODE_FORMAT (TFmode) = NULL;
521 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
522 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
523 target_flags |= MASK_LONG_DOUBLE_128;
526 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
527 can be optimized to ap = __builtin_next_arg (0). */
528 if (TARGET_ABI_UNICOSMK)
529 targetm.expand_builtin_va_start = NULL;
532 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
535 zap_mask (HOST_WIDE_INT value)
539 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
541 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
547 /* Return true if OP is valid for a particular TLS relocation.
548 We are already guaranteed that OP is a CONST. */
551 tls_symbolic_operand_1 (rtx op, int size, int unspec)
555 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
557 op = XVECEXP (op, 0, 0);
559 if (GET_CODE (op) != SYMBOL_REF)
562 switch (SYMBOL_REF_TLS_MODEL (op))
564 case TLS_MODEL_LOCAL_DYNAMIC:
565 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
566 case TLS_MODEL_INITIAL_EXEC:
567 return unspec == UNSPEC_TPREL && size == 64;
568 case TLS_MODEL_LOCAL_EXEC:
569 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
575 /* Used by aligned_memory_operand and unaligned_memory_operand to
576 resolve what reload is going to do with OP if it's a register. */
579 resolve_reload_operand (rtx op)
581 if (reload_in_progress)
584 if (GET_CODE (tmp) == SUBREG)
585 tmp = SUBREG_REG (tmp);
586 if (GET_CODE (tmp) == REG
587 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
589 op = reg_equiv_memory_loc[REGNO (tmp)];
597 /* The scalar modes supported differs from the default check-what-c-supports
598 version in that sometimes TFmode is available even when long double
599 indicates only DFmode. On unicosmk, we have the situation that HImode
600 doesn't map to any C type, but of course we still support that. */
603 alpha_scalar_mode_supported_p (enum machine_mode mode)
611 case TImode: /* via optabs.c */
619 return TARGET_HAS_XFLOATING_LIBS;
626 /* Alpha implements a couple of integer vector mode operations when
627 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
628 which allows the vectorizer to operate on e.g. move instructions,
629 or when expand_vector_operations can do something useful. */
632 alpha_vector_mode_supported_p (enum machine_mode mode)
634 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
637 /* Return 1 if this function can directly return via $26. */
642 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
644 && alpha_sa_size () == 0
645 && get_frame_size () == 0
646 && current_function_outgoing_args_size == 0
647 && current_function_pretend_args_size == 0);
650 /* Return the ADDR_VEC associated with a tablejump insn. */
653 alpha_tablejump_addr_vec (rtx insn)
657 tmp = JUMP_LABEL (insn);
660 tmp = NEXT_INSN (tmp);
663 if (GET_CODE (tmp) == JUMP_INSN
664 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
665 return PATTERN (tmp);
669 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
672 alpha_tablejump_best_label (rtx insn)
674 rtx jump_table = alpha_tablejump_addr_vec (insn);
675 rtx best_label = NULL_RTX;
677 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
678 there for edge frequency counts from profile data. */
682 int n_labels = XVECLEN (jump_table, 1);
686 for (i = 0; i < n_labels; i++)
690 for (j = i + 1; j < n_labels; j++)
691 if (XEXP (XVECEXP (jump_table, 1, i), 0)
692 == XEXP (XVECEXP (jump_table, 1, j), 0))
695 if (count > best_count)
696 best_count = count, best_label = XVECEXP (jump_table, 1, i);
700 return best_label ? best_label : const0_rtx;
703 /* Return the TLS model to use for SYMBOL. */
705 static enum tls_model
706 tls_symbolic_operand_type (rtx symbol)
708 enum tls_model model;
710 if (GET_CODE (symbol) != SYMBOL_REF)
712 model = SYMBOL_REF_TLS_MODEL (symbol);
714 /* Local-exec with a 64-bit size is the same code as initial-exec. */
715 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
716 model = TLS_MODEL_INITIAL_EXEC;
721 /* Return true if the function DECL will share the same GP as any
722 function in the current unit of translation. */
725 decl_has_samegp (const_tree decl)
727 /* Functions that are not local can be overridden, and thus may
728 not share the same gp. */
729 if (!(*targetm.binds_local_p) (decl))
732 /* If -msmall-data is in effect, assume that there is only one GP
733 for the module, and so any local symbol has this property. We
734 need explicit relocations to be able to enforce this for symbols
735 not defined in this unit of translation, however. */
736 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
739 /* Functions that are not external are defined in this UoT. */
740 /* ??? Irritatingly, static functions not yet emitted are still
741 marked "external". Apply this to non-static functions only. */
742 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
745 /* Return true if EXP should be placed in the small data section. */
748 alpha_in_small_data_p (const_tree exp)
750 /* We want to merge strings, so we never consider them small data. */
751 if (TREE_CODE (exp) == STRING_CST)
754 /* Functions are never in the small data area. Duh. */
755 if (TREE_CODE (exp) == FUNCTION_DECL)
758 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
760 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
761 if (strcmp (section, ".sdata") == 0
762 || strcmp (section, ".sbss") == 0)
767 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
769 /* If this is an incomplete type with size 0, then we can't put it
770 in sdata because it might be too big when completed. */
771 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
778 #if TARGET_ABI_OPEN_VMS
780 alpha_linkage_symbol_p (const char *symname)
782 int symlen = strlen (symname);
785 return strcmp (&symname [symlen - 4], "..lk") == 0;
790 #define LINKAGE_SYMBOL_REF_P(X) \
791 ((GET_CODE (X) == SYMBOL_REF \
792 && alpha_linkage_symbol_p (XSTR (X, 0))) \
793 || (GET_CODE (X) == CONST \
794 && GET_CODE (XEXP (X, 0)) == PLUS \
795 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
796 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
799 /* legitimate_address_p recognizes an RTL expression that is a valid
800 memory address for an instruction. The MODE argument is the
801 machine mode for the MEM expression that wants to use this address.
803 For Alpha, we have either a constant address or the sum of a
804 register and a constant address, or just a register. For DImode,
805 any of those forms can be surrounded with an AND that clear the
806 low-order three bits; this is an "unaligned" access. */
809 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
811 /* If this is an ldq_u type address, discard the outer AND. */
813 && GET_CODE (x) == AND
814 && GET_CODE (XEXP (x, 1)) == CONST_INT
815 && INTVAL (XEXP (x, 1)) == -8)
818 /* Discard non-paradoxical subregs. */
819 if (GET_CODE (x) == SUBREG
820 && (GET_MODE_SIZE (GET_MODE (x))
821 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
824 /* Unadorned general registers are valid. */
827 ? STRICT_REG_OK_FOR_BASE_P (x)
828 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
831 /* Constant addresses (i.e. +/- 32k) are valid. */
832 if (CONSTANT_ADDRESS_P (x))
835 #if TARGET_ABI_OPEN_VMS
836 if (LINKAGE_SYMBOL_REF_P (x))
840 /* Register plus a small constant offset is valid. */
841 if (GET_CODE (x) == PLUS)
843 rtx ofs = XEXP (x, 1);
846 /* Discard non-paradoxical subregs. */
847 if (GET_CODE (x) == SUBREG
848 && (GET_MODE_SIZE (GET_MODE (x))
849 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
855 && NONSTRICT_REG_OK_FP_BASE_P (x)
856 && GET_CODE (ofs) == CONST_INT)
859 ? STRICT_REG_OK_FOR_BASE_P (x)
860 : NONSTRICT_REG_OK_FOR_BASE_P (x))
861 && CONSTANT_ADDRESS_P (ofs))
866 /* If we're managing explicit relocations, LO_SUM is valid, as
867 are small data symbols. */
868 else if (TARGET_EXPLICIT_RELOCS)
870 if (small_symbolic_operand (x, Pmode))
873 if (GET_CODE (x) == LO_SUM)
875 rtx ofs = XEXP (x, 1);
878 /* Discard non-paradoxical subregs. */
879 if (GET_CODE (x) == SUBREG
880 && (GET_MODE_SIZE (GET_MODE (x))
881 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
884 /* Must have a valid base register. */
887 ? STRICT_REG_OK_FOR_BASE_P (x)
888 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
891 /* The symbol must be local. */
892 if (local_symbolic_operand (ofs, Pmode)
893 || dtp32_symbolic_operand (ofs, Pmode)
894 || tp32_symbolic_operand (ofs, Pmode))
902 /* Build the SYMBOL_REF for __tls_get_addr. */
904 static GTY(()) rtx tls_get_addr_libfunc;
907 get_tls_get_addr (void)
909 if (!tls_get_addr_libfunc)
910 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
911 return tls_get_addr_libfunc;
914 /* Try machine-dependent ways of modifying an illegitimate address
915 to be legitimate. If we find one, return the new, valid address. */
918 alpha_legitimize_address (rtx x, rtx scratch,
919 enum machine_mode mode ATTRIBUTE_UNUSED)
921 HOST_WIDE_INT addend;
923 /* If the address is (plus reg const_int) and the CONST_INT is not a
924 valid offset, compute the high part of the constant and add it to
925 the register. Then our address is (plus temp low-part-const). */
926 if (GET_CODE (x) == PLUS
927 && GET_CODE (XEXP (x, 0)) == REG
928 && GET_CODE (XEXP (x, 1)) == CONST_INT
929 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
931 addend = INTVAL (XEXP (x, 1));
936 /* If the address is (const (plus FOO const_int)), find the low-order
937 part of the CONST_INT. Then load FOO plus any high-order part of the
938 CONST_INT into a register. Our address is (plus reg low-part-const).
939 This is done to reduce the number of GOT entries. */
940 if (can_create_pseudo_p ()
941 && GET_CODE (x) == CONST
942 && GET_CODE (XEXP (x, 0)) == PLUS
943 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
945 addend = INTVAL (XEXP (XEXP (x, 0), 1));
946 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
950 /* If we have a (plus reg const), emit the load as in (2), then add
951 the two registers, and finally generate (plus reg low-part-const) as
953 if (can_create_pseudo_p ()
954 && GET_CODE (x) == PLUS
955 && GET_CODE (XEXP (x, 0)) == REG
956 && GET_CODE (XEXP (x, 1)) == CONST
957 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
958 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
960 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
961 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
962 XEXP (XEXP (XEXP (x, 1), 0), 0),
963 NULL_RTX, 1, OPTAB_LIB_WIDEN);
967 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
968 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
970 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
972 switch (tls_symbolic_operand_type (x))
977 case TLS_MODEL_GLOBAL_DYNAMIC:
980 r0 = gen_rtx_REG (Pmode, 0);
981 r16 = gen_rtx_REG (Pmode, 16);
982 tga = get_tls_get_addr ();
983 dest = gen_reg_rtx (Pmode);
984 seq = GEN_INT (alpha_next_sequence_number++);
986 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
987 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
988 insn = emit_call_insn (insn);
989 CONST_OR_PURE_CALL_P (insn) = 1;
990 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
995 emit_libcall_block (insn, dest, r0, x);
998 case TLS_MODEL_LOCAL_DYNAMIC:
1001 r0 = gen_rtx_REG (Pmode, 0);
1002 r16 = gen_rtx_REG (Pmode, 16);
1003 tga = get_tls_get_addr ();
1004 scratch = gen_reg_rtx (Pmode);
1005 seq = GEN_INT (alpha_next_sequence_number++);
1007 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1008 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1009 insn = emit_call_insn (insn);
1010 CONST_OR_PURE_CALL_P (insn) = 1;
1011 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1013 insn = get_insns ();
1016 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1017 UNSPEC_TLSLDM_CALL);
1018 emit_libcall_block (insn, scratch, r0, eqv);
1020 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1021 eqv = gen_rtx_CONST (Pmode, eqv);
1023 if (alpha_tls_size == 64)
1025 dest = gen_reg_rtx (Pmode);
1026 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1027 emit_insn (gen_adddi3 (dest, dest, scratch));
1030 if (alpha_tls_size == 32)
1032 insn = gen_rtx_HIGH (Pmode, eqv);
1033 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1034 scratch = gen_reg_rtx (Pmode);
1035 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1037 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1039 case TLS_MODEL_INITIAL_EXEC:
1040 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1041 eqv = gen_rtx_CONST (Pmode, eqv);
1042 tp = gen_reg_rtx (Pmode);
1043 scratch = gen_reg_rtx (Pmode);
1044 dest = gen_reg_rtx (Pmode);
1046 emit_insn (gen_load_tp (tp));
1047 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1048 emit_insn (gen_adddi3 (dest, tp, scratch));
1051 case TLS_MODEL_LOCAL_EXEC:
1052 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1053 eqv = gen_rtx_CONST (Pmode, eqv);
1054 tp = gen_reg_rtx (Pmode);
1056 emit_insn (gen_load_tp (tp));
1057 if (alpha_tls_size == 32)
1059 insn = gen_rtx_HIGH (Pmode, eqv);
1060 insn = gen_rtx_PLUS (Pmode, tp, insn);
1061 tp = gen_reg_rtx (Pmode);
1062 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1064 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1070 if (local_symbolic_operand (x, Pmode))
1072 if (small_symbolic_operand (x, Pmode))
1076 if (can_create_pseudo_p ())
1077 scratch = gen_reg_rtx (Pmode);
1078 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1079 gen_rtx_HIGH (Pmode, x)));
1080 return gen_rtx_LO_SUM (Pmode, scratch, x);
1089 HOST_WIDE_INT low, high;
1091 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1093 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1097 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1098 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1099 1, OPTAB_LIB_WIDEN);
1101 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1102 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1103 1, OPTAB_LIB_WIDEN);
1105 return plus_constant (x, low);
1109 /* Primarily this is required for TLS symbols, but given that our move
1110 patterns *ought* to be able to handle any symbol at any time, we
1111 should never be spilling symbolic operands to the constant pool, ever. */
1114 alpha_cannot_force_const_mem (rtx x)
1116 enum rtx_code code = GET_CODE (x);
1117 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1120 /* We do not allow indirect calls to be optimized into sibling calls, nor
1121 can we allow a call to a function with a different GP to be optimized
1125 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1127 /* Can't do indirect tail calls, since we don't know if the target
1128 uses the same GP. */
1132 /* Otherwise, we can make a tail call if the target function shares
1134 return decl_has_samegp (decl);
1138 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1142 /* Don't re-split. */
1143 if (GET_CODE (x) == LO_SUM)
1146 return small_symbolic_operand (x, Pmode) != 0;
1150 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1154 /* Don't re-split. */
1155 if (GET_CODE (x) == LO_SUM)
1158 if (small_symbolic_operand (x, Pmode))
1160 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1169 split_small_symbolic_operand (rtx x)
1172 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1176 /* Indicate that INSN cannot be duplicated. This is true for any insn
1177 that we've marked with gpdisp relocs, since those have to stay in
1178 1-1 correspondence with one another.
1180 Technically we could copy them if we could set up a mapping from one
1181 sequence number to another, across the set of insns to be duplicated.
1182 This seems overly complicated and error-prone since interblock motion
1183 from sched-ebb could move one of the pair of insns to a different block.
1185 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1186 then they'll be in a different block from their ldgp. Which could lead
1187 the bb reorder code to think that it would be ok to copy just the block
1188 containing the call and branch to the block containing the ldgp. */
1191 alpha_cannot_copy_insn_p (rtx insn)
1193 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1195 if (recog_memoized (insn) >= 0)
1196 return get_attr_cannot_copy (insn);
1202 /* Try a machine-dependent way of reloading an illegitimate address
1203 operand. If we find one, push the reload and return the new rtx. */
1206 alpha_legitimize_reload_address (rtx x,
1207 enum machine_mode mode ATTRIBUTE_UNUSED,
1208 int opnum, int type,
1209 int ind_levels ATTRIBUTE_UNUSED)
1211 /* We must recognize output that we have already generated ourselves. */
1212 if (GET_CODE (x) == PLUS
1213 && GET_CODE (XEXP (x, 0)) == PLUS
1214 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1215 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1216 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1218 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1219 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1224 /* We wish to handle large displacements off a base register by
1225 splitting the addend across an ldah and the mem insn. This
1226 cuts number of extra insns needed from 3 to 1. */
1227 if (GET_CODE (x) == PLUS
1228 && GET_CODE (XEXP (x, 0)) == REG
1229 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1230 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1231 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1233 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1234 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1236 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1238 /* Check for 32-bit overflow. */
1239 if (high + low != val)
1242 /* Reload the high part into a base reg; leave the low part
1243 in the mem directly. */
1244 x = gen_rtx_PLUS (GET_MODE (x),
1245 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1249 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1250 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1258 /* Compute a (partial) cost for rtx X. Return true if the complete
1259 cost has been computed, and false if subexpressions should be
1260 scanned. In either case, *TOTAL contains the cost result. */
1263 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1265 enum machine_mode mode = GET_MODE (x);
1266 bool float_mode_p = FLOAT_MODE_P (mode);
1267 const struct alpha_rtx_cost_data *cost_data;
1270 cost_data = &alpha_rtx_cost_size;
1272 cost_data = &alpha_rtx_cost_data[alpha_tune];
1277 /* If this is an 8-bit constant, return zero since it can be used
1278 nearly anywhere with no cost. If it is a valid operand for an
1279 ADD or AND, likewise return 0 if we know it will be used in that
1280 context. Otherwise, return 2 since it might be used there later.
1281 All other constants take at least two insns. */
1282 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1290 if (x == CONST0_RTX (mode))
1292 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1293 || (outer_code == AND && and_operand (x, VOIDmode)))
1295 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1298 *total = COSTS_N_INSNS (2);
1304 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1305 *total = COSTS_N_INSNS (outer_code != MEM);
1306 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1307 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1308 else if (tls_symbolic_operand_type (x))
1309 /* Estimate of cost for call_pal rduniq. */
1310 /* ??? How many insns do we emit here? More than one... */
1311 *total = COSTS_N_INSNS (15);
1313 /* Otherwise we do a load from the GOT. */
1314 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1318 /* This is effectively an add_operand. */
1325 *total = cost_data->fp_add;
1326 else if (GET_CODE (XEXP (x, 0)) == MULT
1327 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1329 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1330 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1337 *total = cost_data->fp_mult;
1338 else if (mode == DImode)
1339 *total = cost_data->int_mult_di;
1341 *total = cost_data->int_mult_si;
1345 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1346 && INTVAL (XEXP (x, 1)) <= 3)
1348 *total = COSTS_N_INSNS (1);
1355 *total = cost_data->int_shift;
1360 *total = cost_data->fp_add;
1362 *total = cost_data->int_cmov;
1370 *total = cost_data->int_div;
1371 else if (mode == SFmode)
1372 *total = cost_data->fp_div_sf;
1374 *total = cost_data->fp_div_df;
1378 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1384 *total = COSTS_N_INSNS (1);
1392 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1398 case UNSIGNED_FLOAT:
1401 case FLOAT_TRUNCATE:
1402 *total = cost_data->fp_add;
1406 if (GET_CODE (XEXP (x, 0)) == MEM)
1409 *total = cost_data->fp_add;
1417 /* REF is an alignable memory location. Place an aligned SImode
1418 reference into *PALIGNED_MEM and the number of bits to shift into
1419 *PBITNUM. SCRATCH is a free register for use in reloading out
1420 of range stack slots. */
1423 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1426 HOST_WIDE_INT disp, offset;
1428 gcc_assert (GET_CODE (ref) == MEM);
1430 if (reload_in_progress
1431 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1433 base = find_replacement (&XEXP (ref, 0));
1434 gcc_assert (memory_address_p (GET_MODE (ref), base));
1437 base = XEXP (ref, 0);
1439 if (GET_CODE (base) == PLUS)
1440 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1444 /* Find the byte offset within an aligned word. If the memory itself is
1445 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1446 will have examined the base register and determined it is aligned, and
1447 thus displacements from it are naturally alignable. */
1448 if (MEM_ALIGN (ref) >= 32)
1453 /* Access the entire aligned word. */
1454 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1456 /* Convert the byte offset within the word to a bit offset. */
1457 if (WORDS_BIG_ENDIAN)
1458 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1461 *pbitnum = GEN_INT (offset);
1464 /* Similar, but just get the address. Handle the two reload cases.
1465 Add EXTRA_OFFSET to the address we return. */
1468 get_unaligned_address (rtx ref)
1471 HOST_WIDE_INT offset = 0;
1473 gcc_assert (GET_CODE (ref) == MEM);
1475 if (reload_in_progress
1476 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1478 base = find_replacement (&XEXP (ref, 0));
1480 gcc_assert (memory_address_p (GET_MODE (ref), base));
1483 base = XEXP (ref, 0);
1485 if (GET_CODE (base) == PLUS)
1486 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1488 return plus_constant (base, offset);
1491 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1492 X is always returned in a register. */
1495 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1497 if (GET_CODE (addr) == PLUS)
1499 ofs += INTVAL (XEXP (addr, 1));
1500 addr = XEXP (addr, 0);
1503 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1504 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1507 /* On the Alpha, all (non-symbolic) constants except zero go into
1508 a floating-point register via memory. Note that we cannot
1509 return anything that is not a subset of CLASS, and that some
1510 symbolic constants cannot be dropped to memory. */
1513 alpha_preferred_reload_class(rtx x, enum reg_class class)
1515 /* Zero is present in any register class. */
1516 if (x == CONST0_RTX (GET_MODE (x)))
1519 /* These sorts of constants we can easily drop to memory. */
1520 if (GET_CODE (x) == CONST_INT
1521 || GET_CODE (x) == CONST_DOUBLE
1522 || GET_CODE (x) == CONST_VECTOR)
1524 if (class == FLOAT_REGS)
1526 if (class == ALL_REGS)
1527 return GENERAL_REGS;
1531 /* All other kinds of constants should not (and in the case of HIGH
1532 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1533 secondary reload. */
1535 return (class == ALL_REGS ? GENERAL_REGS : class);
1540 /* Inform reload about cases where moving X with a mode MODE to a register in
1541 CLASS requires an extra scratch or immediate register. Return the class
1542 needed for the immediate register. */
1544 static enum reg_class
1545 alpha_secondary_reload (bool in_p, rtx x, enum reg_class class,
1546 enum machine_mode mode, secondary_reload_info *sri)
1548 /* Loading and storing HImode or QImode values to and from memory
1549 usually requires a scratch register. */
1550 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1552 if (any_memory_operand (x, mode))
1556 if (!aligned_memory_operand (x, mode))
1557 sri->icode = reload_in_optab[mode];
1560 sri->icode = reload_out_optab[mode];
1565 /* We also cannot do integral arithmetic into FP regs, as might result
1566 from register elimination into a DImode fp register. */
1567 if (class == FLOAT_REGS)
1569 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1570 return GENERAL_REGS;
1571 if (in_p && INTEGRAL_MODE_P (mode)
1572 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1573 return GENERAL_REGS;
1579 /* Subfunction of the following function. Update the flags of any MEM
1580 found in part of X. */
1583 alpha_set_memflags_1 (rtx *xp, void *data)
1585 rtx x = *xp, orig = (rtx) data;
1587 if (GET_CODE (x) != MEM)
1590 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1591 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1592 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1593 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1594 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1596 /* Sadly, we cannot use alias sets because the extra aliasing
1597 produced by the AND interferes. Given that two-byte quantities
1598 are the only thing we would be able to differentiate anyway,
1599 there does not seem to be any point in convoluting the early
1600 out of the alias check. */
1605 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1606 generated to perform a memory operation, look for any MEMs in either
1607 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1608 volatile flags from REF into each of the MEMs found. If REF is not
1609 a MEM, don't do anything. */
1612 alpha_set_memflags (rtx insn, rtx ref)
1616 if (GET_CODE (ref) != MEM)
1619 /* This is only called from alpha.md, after having had something
1620 generated from one of the insn patterns. So if everything is
1621 zero, the pattern is already up-to-date. */
1622 if (!MEM_VOLATILE_P (ref)
1623 && !MEM_IN_STRUCT_P (ref)
1624 && !MEM_SCALAR_P (ref)
1625 && !MEM_NOTRAP_P (ref)
1626 && !MEM_READONLY_P (ref))
1630 base_ptr = &PATTERN (insn);
1633 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1636 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1639 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1640 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1641 and return pc_rtx if successful. */
1644 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1645 HOST_WIDE_INT c, int n, bool no_output)
1649 /* Use a pseudo if highly optimizing and still generating RTL. */
1651 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1654 /* If this is a sign-extended 32-bit constant, we can do this in at most
1655 three insns, so do it if we have enough insns left. We always have
1656 a sign-extended 32-bit constant when compiling on a narrow machine. */
1658 if (HOST_BITS_PER_WIDE_INT != 64
1659 || c >> 31 == -1 || c >> 31 == 0)
1661 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1662 HOST_WIDE_INT tmp1 = c - low;
1663 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1664 HOST_WIDE_INT extra = 0;
1666 /* If HIGH will be interpreted as negative but the constant is
1667 positive, we must adjust it to do two ldha insns. */
1669 if ((high & 0x8000) != 0 && c >= 0)
1673 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1676 if (c == low || (low == 0 && extra == 0))
1678 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1679 but that meant that we can't handle INT_MIN on 32-bit machines
1680 (like NT/Alpha), because we recurse indefinitely through
1681 emit_move_insn to gen_movdi. So instead, since we know exactly
1682 what we want, create it explicitly. */
1687 target = gen_reg_rtx (mode);
1688 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1691 else if (n >= 2 + (extra != 0))
1695 if (!can_create_pseudo_p ())
1697 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1701 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1704 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1705 This means that if we go through expand_binop, we'll try to
1706 generate extensions, etc, which will require new pseudos, which
1707 will fail during some split phases. The SImode add patterns
1708 still exist, but are not named. So build the insns by hand. */
1713 subtarget = gen_reg_rtx (mode);
1714 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1715 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1721 target = gen_reg_rtx (mode);
1722 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1723 insn = gen_rtx_SET (VOIDmode, target, insn);
1729 /* If we couldn't do it that way, try some other methods. But if we have
1730 no instructions left, don't bother. Likewise, if this is SImode and
1731 we can't make pseudos, we can't do anything since the expand_binop
1732 and expand_unop calls will widen and try to make pseudos. */
1734 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1737 /* Next, see if we can load a related constant and then shift and possibly
1738 negate it to get the constant we want. Try this once each increasing
1739 numbers of insns. */
1741 for (i = 1; i < n; i++)
1743 /* First, see if minus some low bits, we've an easy load of
1746 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1749 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1754 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1755 target, 0, OPTAB_WIDEN);
1759 /* Next try complementing. */
1760 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1765 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1768 /* Next try to form a constant and do a left shift. We can do this
1769 if some low-order bits are zero; the exact_log2 call below tells
1770 us that information. The bits we are shifting out could be any
1771 value, but here we'll just try the 0- and sign-extended forms of
1772 the constant. To try to increase the chance of having the same
1773 constant in more than one insn, start at the highest number of
1774 bits to shift, but try all possibilities in case a ZAPNOT will
1777 bits = exact_log2 (c & -c);
1779 for (; bits > 0; bits--)
1782 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1785 new = (unsigned HOST_WIDE_INT)c >> bits;
1786 temp = alpha_emit_set_const (subtarget, mode, new,
1793 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1794 target, 0, OPTAB_WIDEN);
1798 /* Now try high-order zero bits. Here we try the shifted-in bits as
1799 all zero and all ones. Be careful to avoid shifting outside the
1800 mode and to avoid shifting outside the host wide int size. */
1801 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1802 confuse the recursive call and set all of the high 32 bits. */
1804 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1805 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1807 for (; bits > 0; bits--)
1810 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1813 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1814 temp = alpha_emit_set_const (subtarget, mode, new,
1821 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1822 target, 1, OPTAB_WIDEN);
1826 /* Now try high-order 1 bits. We get that with a sign-extension.
1827 But one bit isn't enough here. Be careful to avoid shifting outside
1828 the mode and to avoid shifting outside the host wide int size. */
1830 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1831 - floor_log2 (~ c) - 2);
1833 for (; bits > 0; bits--)
1836 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1839 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1840 temp = alpha_emit_set_const (subtarget, mode, new,
1847 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1848 target, 0, OPTAB_WIDEN);
1853 #if HOST_BITS_PER_WIDE_INT == 64
1854 /* Finally, see if can load a value into the target that is the same as the
1855 constant except that all bytes that are 0 are changed to be 0xff. If we
1856 can, then we can do a ZAPNOT to obtain the desired constant. */
1859 for (i = 0; i < 64; i += 8)
1860 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1861 new |= (HOST_WIDE_INT) 0xff << i;
1863 /* We are only called for SImode and DImode. If this is SImode, ensure that
1864 we are sign extended to a full word. */
1867 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1871 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1876 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1877 target, 0, OPTAB_WIDEN);
1885 /* Try to output insns to set TARGET equal to the constant C if it can be
1886 done in less than N insns. Do all computations in MODE. Returns the place
1887 where the output has been placed if it can be done and the insns have been
1888 emitted. If it would take more than N insns, zero is returned and no
1889 insns and emitted. */
1892 alpha_emit_set_const (rtx target, enum machine_mode mode,
1893 HOST_WIDE_INT c, int n, bool no_output)
1895 enum machine_mode orig_mode = mode;
1896 rtx orig_target = target;
1900 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1901 can't load this constant in one insn, do this in DImode. */
1902 if (!can_create_pseudo_p () && mode == SImode
1903 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1905 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1909 target = no_output ? NULL : gen_lowpart (DImode, target);
1912 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1914 target = no_output ? NULL : gen_lowpart (DImode, target);
1918 /* Try 1 insn, then 2, then up to N. */
1919 for (i = 1; i <= n; i++)
1921 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1929 insn = get_last_insn ();
1930 set = single_set (insn);
1931 if (! CONSTANT_P (SET_SRC (set)))
1932 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1937 /* Allow for the case where we changed the mode of TARGET. */
1940 if (result == target)
1941 result = orig_target;
1942 else if (mode != orig_mode)
1943 result = gen_lowpart (orig_mode, result);
1949 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1950 fall back to a straight forward decomposition. We do this to avoid
1951 exponential run times encountered when looking for longer sequences
1952 with alpha_emit_set_const. */
1955 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1957 HOST_WIDE_INT d1, d2, d3, d4;
1959 /* Decompose the entire word */
1960 #if HOST_BITS_PER_WIDE_INT >= 64
1961 gcc_assert (c2 == -(c1 < 0));
1962 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1964 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1965 c1 = (c1 - d2) >> 32;
1966 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1968 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1969 gcc_assert (c1 == d4);
1971 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1973 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1974 gcc_assert (c1 == d2);
1976 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1978 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1979 gcc_assert (c2 == d4);
1982 /* Construct the high word */
1985 emit_move_insn (target, GEN_INT (d4));
1987 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1990 emit_move_insn (target, GEN_INT (d3));
1992 /* Shift it into place */
1993 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1995 /* Add in the low bits. */
1997 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1999 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2004 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2008 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2010 HOST_WIDE_INT i0, i1;
2012 if (GET_CODE (x) == CONST_VECTOR)
2013 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2016 if (GET_CODE (x) == CONST_INT)
2021 else if (HOST_BITS_PER_WIDE_INT >= 64)
2023 i0 = CONST_DOUBLE_LOW (x);
2028 i0 = CONST_DOUBLE_LOW (x);
2029 i1 = CONST_DOUBLE_HIGH (x);
2036 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2037 are willing to load the value into a register via a move pattern.
2038 Normally this is all symbolic constants, integral constants that
2039 take three or fewer instructions, and floating-point zero. */
2042 alpha_legitimate_constant_p (rtx x)
2044 enum machine_mode mode = GET_MODE (x);
2045 HOST_WIDE_INT i0, i1;
2047 switch (GET_CODE (x))
2055 /* TLS symbols are never valid. */
2056 return SYMBOL_REF_TLS_MODEL (x) == 0;
2059 if (x == CONST0_RTX (mode))
2061 if (FLOAT_MODE_P (mode))
2066 if (x == CONST0_RTX (mode))
2068 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2070 if (GET_MODE_SIZE (mode) != 8)
2076 if (TARGET_BUILD_CONSTANTS)
2078 alpha_extract_integer (x, &i0, &i1);
2079 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2080 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2088 /* Operand 1 is known to be a constant, and should require more than one
2089 instruction to load. Emit that multi-part load. */
2092 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2094 HOST_WIDE_INT i0, i1;
2095 rtx temp = NULL_RTX;
2097 alpha_extract_integer (operands[1], &i0, &i1);
2099 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2100 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2102 if (!temp && TARGET_BUILD_CONSTANTS)
2103 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2107 if (!rtx_equal_p (operands[0], temp))
2108 emit_move_insn (operands[0], temp);
2115 /* Expand a move instruction; return true if all work is done.
2116 We don't handle non-bwx subword loads here. */
2119 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2121 /* If the output is not a register, the input must be. */
2122 if (GET_CODE (operands[0]) == MEM
2123 && ! reg_or_0_operand (operands[1], mode))
2124 operands[1] = force_reg (mode, operands[1]);
2126 /* Allow legitimize_address to perform some simplifications. */
2127 if (mode == Pmode && symbolic_operand (operands[1], mode))
2131 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2134 if (tmp == operands[0])
2141 /* Early out for non-constants and valid constants. */
2142 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2145 /* Split large integers. */
2146 if (GET_CODE (operands[1]) == CONST_INT
2147 || GET_CODE (operands[1]) == CONST_DOUBLE
2148 || GET_CODE (operands[1]) == CONST_VECTOR)
2150 if (alpha_split_const_mov (mode, operands))
2154 /* Otherwise we've nothing left but to drop the thing to memory. */
2155 operands[1] = force_const_mem (mode, operands[1]);
2156 if (reload_in_progress)
2158 emit_move_insn (operands[0], XEXP (operands[1], 0));
2159 operands[1] = replace_equiv_address (operands[1], operands[0]);
2162 operands[1] = validize_mem (operands[1]);
2166 /* Expand a non-bwx QImode or HImode move instruction;
2167 return true if all work is done. */
2170 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2174 /* If the output is not a register, the input must be. */
2175 if (MEM_P (operands[0]))
2176 operands[1] = force_reg (mode, operands[1]);
2178 /* Handle four memory cases, unaligned and aligned for either the input
2179 or the output. The only case where we can be called during reload is
2180 for aligned loads; all other cases require temporaries. */
2182 if (any_memory_operand (operands[1], mode))
2184 if (aligned_memory_operand (operands[1], mode))
2186 if (reload_in_progress)
2189 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2191 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2196 rtx aligned_mem, bitnum;
2197 rtx scratch = gen_reg_rtx (SImode);
2201 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2203 subtarget = operands[0];
2204 if (GET_CODE (subtarget) == REG)
2205 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2207 subtarget = gen_reg_rtx (DImode), copyout = true;
2210 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2213 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2218 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2223 /* Don't pass these as parameters since that makes the generated
2224 code depend on parameter evaluation order which will cause
2225 bootstrap failures. */
2227 rtx temp1, temp2, subtarget, ua;
2230 temp1 = gen_reg_rtx (DImode);
2231 temp2 = gen_reg_rtx (DImode);
2233 subtarget = operands[0];
2234 if (GET_CODE (subtarget) == REG)
2235 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2237 subtarget = gen_reg_rtx (DImode), copyout = true;
2239 ua = get_unaligned_address (operands[1]);
2241 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2243 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2245 alpha_set_memflags (seq, operands[1]);
2249 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2254 if (any_memory_operand (operands[0], mode))
2256 if (aligned_memory_operand (operands[0], mode))
2258 rtx aligned_mem, bitnum;
2259 rtx temp1 = gen_reg_rtx (SImode);
2260 rtx temp2 = gen_reg_rtx (SImode);
2262 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2264 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2269 rtx temp1 = gen_reg_rtx (DImode);
2270 rtx temp2 = gen_reg_rtx (DImode);
2271 rtx temp3 = gen_reg_rtx (DImode);
2272 rtx ua = get_unaligned_address (operands[0]);
2275 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2277 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2279 alpha_set_memflags (seq, operands[0]);
2288 /* Implement the movmisalign patterns. One of the operands is a memory
2289 that is not naturally aligned. Emit instructions to load it. */
2292 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2294 /* Honor misaligned loads, for those we promised to do so. */
2295 if (MEM_P (operands[1]))
2299 if (register_operand (operands[0], mode))
2302 tmp = gen_reg_rtx (mode);
2304 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2305 if (tmp != operands[0])
2306 emit_move_insn (operands[0], tmp);
2308 else if (MEM_P (operands[0]))
2310 if (!reg_or_0_operand (operands[1], mode))
2311 operands[1] = force_reg (mode, operands[1]);
2312 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2318 /* Generate an unsigned DImode to FP conversion. This is the same code
2319 optabs would emit if we didn't have TFmode patterns.
2321 For SFmode, this is the only construction I've found that can pass
2322 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2323 intermediates will work, because you'll get intermediate rounding
2324 that ruins the end result. Some of this could be fixed by turning
2325 on round-to-positive-infinity, but that requires diddling the fpsr,
2326 which kills performance. I tried turning this around and converting
2327 to a negative number, so that I could turn on /m, but either I did
2328 it wrong or there's something else cause I wound up with the exact
2329 same single-bit error. There is a branch-less form of this same code:
2340 fcmoveq $f10,$f11,$f0
2342 I'm not using it because it's the same number of instructions as
2343 this branch-full form, and it has more serialized long latency
2344 instructions on the critical path.
2346 For DFmode, we can avoid rounding errors by breaking up the word
2347 into two pieces, converting them separately, and adding them back:
2349 LC0: .long 0,0x5f800000
2354 cpyse $f11,$f31,$f10
2355 cpyse $f31,$f11,$f11
2363 This doesn't seem to be a clear-cut win over the optabs form.
2364 It probably all depends on the distribution of numbers being
2365 converted -- in the optabs form, all but high-bit-set has a
2366 much lower minimum execution time. */
2369 alpha_emit_floatuns (rtx operands[2])
2371 rtx neglab, donelab, i0, i1, f0, in, out;
2372 enum machine_mode mode;
2375 in = force_reg (DImode, operands[1]);
2376 mode = GET_MODE (out);
2377 neglab = gen_label_rtx ();
2378 donelab = gen_label_rtx ();
2379 i0 = gen_reg_rtx (DImode);
2380 i1 = gen_reg_rtx (DImode);
2381 f0 = gen_reg_rtx (mode);
2383 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2385 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2386 emit_jump_insn (gen_jump (donelab));
2389 emit_label (neglab);
2391 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2392 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2393 emit_insn (gen_iordi3 (i0, i0, i1));
2394 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2395 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2397 emit_label (donelab);
2400 /* Generate the comparison for a conditional branch. */
2403 alpha_emit_conditional_branch (enum rtx_code code)
2405 enum rtx_code cmp_code, branch_code;
2406 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2407 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2410 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2412 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2414 alpha_compare.fp_p = 0;
2417 /* The general case: fold the comparison code to the types of compares
2418 that we have, choosing the branch as necessary. */
2421 case EQ: case LE: case LT: case LEU: case LTU:
2423 /* We have these compares: */
2424 cmp_code = code, branch_code = NE;
2429 /* These must be reversed. */
2430 cmp_code = reverse_condition (code), branch_code = EQ;
2433 case GE: case GT: case GEU: case GTU:
2434 /* For FP, we swap them, for INT, we reverse them. */
2435 if (alpha_compare.fp_p)
2437 cmp_code = swap_condition (code);
2439 tem = op0, op0 = op1, op1 = tem;
2443 cmp_code = reverse_condition (code);
2452 if (alpha_compare.fp_p)
2455 if (flag_unsafe_math_optimizations)
2457 /* When we are not as concerned about non-finite values, and we
2458 are comparing against zero, we can branch directly. */
2459 if (op1 == CONST0_RTX (DFmode))
2460 cmp_code = UNKNOWN, branch_code = code;
2461 else if (op0 == CONST0_RTX (DFmode))
2463 /* Undo the swap we probably did just above. */
2464 tem = op0, op0 = op1, op1 = tem;
2465 branch_code = swap_condition (cmp_code);
2471 /* ??? We mark the branch mode to be CCmode to prevent the
2472 compare and branch from being combined, since the compare
2473 insn follows IEEE rules that the branch does not. */
2474 branch_mode = CCmode;
2481 /* The following optimizations are only for signed compares. */
2482 if (code != LEU && code != LTU && code != GEU && code != GTU)
2484 /* Whee. Compare and branch against 0 directly. */
2485 if (op1 == const0_rtx)
2486 cmp_code = UNKNOWN, branch_code = code;
2488 /* If the constants doesn't fit into an immediate, but can
2489 be generated by lda/ldah, we adjust the argument and
2490 compare against zero, so we can use beq/bne directly. */
2491 /* ??? Don't do this when comparing against symbols, otherwise
2492 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2493 be declared false out of hand (at least for non-weak). */
2494 else if (GET_CODE (op1) == CONST_INT
2495 && (code == EQ || code == NE)
2496 && !(symbolic_operand (op0, VOIDmode)
2497 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2499 rtx n_op1 = GEN_INT (-INTVAL (op1));
2501 if (! satisfies_constraint_I (op1)
2502 && (satisfies_constraint_K (n_op1)
2503 || satisfies_constraint_L (n_op1)))
2504 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2508 if (!reg_or_0_operand (op0, DImode))
2509 op0 = force_reg (DImode, op0);
2510 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2511 op1 = force_reg (DImode, op1);
2514 /* Emit an initial compare instruction, if necessary. */
2516 if (cmp_code != UNKNOWN)
2518 tem = gen_reg_rtx (cmp_mode);
2519 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2522 /* Zero the operands. */
2523 memset (&alpha_compare, 0, sizeof (alpha_compare));
2525 /* Return the branch comparison. */
2526 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2529 /* Certain simplifications can be done to make invalid setcc operations
2530 valid. Return the final comparison, or NULL if we can't work. */
2533 alpha_emit_setcc (enum rtx_code code)
2535 enum rtx_code cmp_code;
2536 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2537 int fp_p = alpha_compare.fp_p;
2540 /* Zero the operands. */
2541 memset (&alpha_compare, 0, sizeof (alpha_compare));
2543 if (fp_p && GET_MODE (op0) == TFmode)
2545 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2550 if (fp_p && !TARGET_FIX)
2553 /* The general case: fold the comparison code to the types of compares
2554 that we have, choosing the branch as necessary. */
2559 case EQ: case LE: case LT: case LEU: case LTU:
2561 /* We have these compares. */
2563 cmp_code = code, code = NE;
2567 if (!fp_p && op1 == const0_rtx)
2572 cmp_code = reverse_condition (code);
2576 case GE: case GT: case GEU: case GTU:
2577 /* These normally need swapping, but for integer zero we have
2578 special patterns that recognize swapped operands. */
2579 if (!fp_p && op1 == const0_rtx)
2581 code = swap_condition (code);
2583 cmp_code = code, code = NE;
2584 tmp = op0, op0 = op1, op1 = tmp;
2593 if (!register_operand (op0, DImode))
2594 op0 = force_reg (DImode, op0);
2595 if (!reg_or_8bit_operand (op1, DImode))
2596 op1 = force_reg (DImode, op1);
2599 /* Emit an initial compare instruction, if necessary. */
2600 if (cmp_code != UNKNOWN)
2602 enum machine_mode mode = fp_p ? DFmode : DImode;
2604 tmp = gen_reg_rtx (mode);
2605 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2606 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2608 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2612 /* Return the setcc comparison. */
2613 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2617 /* Rewrite a comparison against zero CMP of the form
2618 (CODE (cc0) (const_int 0)) so it can be written validly in
2619 a conditional move (if_then_else CMP ...).
2620 If both of the operands that set cc0 are nonzero we must emit
2621 an insn to perform the compare (it can't be done within
2622 the conditional move). */
2625 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2627 enum rtx_code code = GET_CODE (cmp);
2628 enum rtx_code cmov_code = NE;
2629 rtx op0 = alpha_compare.op0;
2630 rtx op1 = alpha_compare.op1;
2631 int fp_p = alpha_compare.fp_p;
2632 enum machine_mode cmp_mode
2633 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2634 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2635 enum machine_mode cmov_mode = VOIDmode;
2636 int local_fast_math = flag_unsafe_math_optimizations;
2639 /* Zero the operands. */
2640 memset (&alpha_compare, 0, sizeof (alpha_compare));
2642 if (fp_p != FLOAT_MODE_P (mode))
2644 enum rtx_code cmp_code;
2649 /* If we have fp<->int register move instructions, do a cmov by
2650 performing the comparison in fp registers, and move the
2651 zero/nonzero value to integer registers, where we can then
2652 use a normal cmov, or vice-versa. */
2656 case EQ: case LE: case LT: case LEU: case LTU:
2657 /* We have these compares. */
2658 cmp_code = code, code = NE;
2662 /* This must be reversed. */
2663 cmp_code = EQ, code = EQ;
2666 case GE: case GT: case GEU: case GTU:
2667 /* These normally need swapping, but for integer zero we have
2668 special patterns that recognize swapped operands. */
2669 if (!fp_p && op1 == const0_rtx)
2670 cmp_code = code, code = NE;
2673 cmp_code = swap_condition (code);
2675 tem = op0, op0 = op1, op1 = tem;
2683 tem = gen_reg_rtx (cmp_op_mode);
2684 emit_insn (gen_rtx_SET (VOIDmode, tem,
2685 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2688 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2689 op0 = gen_lowpart (cmp_op_mode, tem);
2690 op1 = CONST0_RTX (cmp_op_mode);
2692 local_fast_math = 1;
2695 /* We may be able to use a conditional move directly.
2696 This avoids emitting spurious compares. */
2697 if (signed_comparison_operator (cmp, VOIDmode)
2698 && (!fp_p || local_fast_math)
2699 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2700 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2702 /* We can't put the comparison inside the conditional move;
2703 emit a compare instruction and put that inside the
2704 conditional move. Make sure we emit only comparisons we have;
2705 swap or reverse as necessary. */
2707 if (!can_create_pseudo_p ())
2712 case EQ: case LE: case LT: case LEU: case LTU:
2713 /* We have these compares: */
2717 /* This must be reversed. */
2718 code = reverse_condition (code);
2722 case GE: case GT: case GEU: case GTU:
2723 /* These must be swapped. */
2724 if (op1 != CONST0_RTX (cmp_mode))
2726 code = swap_condition (code);
2727 tem = op0, op0 = op1, op1 = tem;
2737 if (!reg_or_0_operand (op0, DImode))
2738 op0 = force_reg (DImode, op0);
2739 if (!reg_or_8bit_operand (op1, DImode))
2740 op1 = force_reg (DImode, op1);
2743 /* ??? We mark the branch mode to be CCmode to prevent the compare
2744 and cmov from being combined, since the compare insn follows IEEE
2745 rules that the cmov does not. */
2746 if (fp_p && !local_fast_math)
2749 tem = gen_reg_rtx (cmp_op_mode);
2750 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2751 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2754 /* Simplify a conditional move of two constants into a setcc with
2755 arithmetic. This is done with a splitter since combine would
2756 just undo the work if done during code generation. It also catches
2757 cases we wouldn't have before cse. */
2760 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2761 rtx t_rtx, rtx f_rtx)
2763 HOST_WIDE_INT t, f, diff;
2764 enum machine_mode mode;
2765 rtx target, subtarget, tmp;
2767 mode = GET_MODE (dest);
2772 if (((code == NE || code == EQ) && diff < 0)
2773 || (code == GE || code == GT))
2775 code = reverse_condition (code);
2776 diff = t, t = f, f = diff;
2780 subtarget = target = dest;
2783 target = gen_lowpart (DImode, dest);
2784 if (can_create_pseudo_p ())
2785 subtarget = gen_reg_rtx (DImode);
2789 /* Below, we must be careful to use copy_rtx on target and subtarget
2790 in intermediate insns, as they may be a subreg rtx, which may not
2793 if (f == 0 && exact_log2 (diff) > 0
2794 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2795 viable over a longer latency cmove. On EV5, the E0 slot is a
2796 scarce resource, and on EV4 shift has the same latency as a cmove. */
2797 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2799 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2800 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2802 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2803 GEN_INT (exact_log2 (t)));
2804 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2806 else if (f == 0 && t == -1)
2808 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2809 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2811 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2813 else if (diff == 1 || diff == 4 || diff == 8)
2817 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2818 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2821 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2824 add_op = GEN_INT (f);
2825 if (sext_add_operand (add_op, mode))
2827 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2829 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2830 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2842 /* Look up the function X_floating library function name for the
2845 struct xfloating_op GTY(())
2847 const enum rtx_code code;
2848 const char *const GTY((skip)) osf_func;
2849 const char *const GTY((skip)) vms_func;
2853 static GTY(()) struct xfloating_op xfloating_ops[] =
2855 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2856 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2857 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2858 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2859 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2860 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2861 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2862 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2863 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2864 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2865 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2866 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2867 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2868 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2869 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2872 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2874 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2875 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2879 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2881 struct xfloating_op *ops = xfloating_ops;
2882 long n = ARRAY_SIZE (xfloating_ops);
2885 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2887 /* How irritating. Nothing to key off for the main table. */
2888 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2891 n = ARRAY_SIZE (vax_cvt_ops);
2894 for (i = 0; i < n; ++i, ++ops)
2895 if (ops->code == code)
2897 rtx func = ops->libcall;
2900 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2901 ? ops->vms_func : ops->osf_func);
2902 ops->libcall = func;
2910 /* Most X_floating operations take the rounding mode as an argument.
2911 Compute that here. */
2914 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2915 enum alpha_fp_rounding_mode round)
2921 case ALPHA_FPRM_NORM:
2924 case ALPHA_FPRM_MINF:
2927 case ALPHA_FPRM_CHOP:
2930 case ALPHA_FPRM_DYN:
2936 /* XXX For reference, round to +inf is mode = 3. */
2939 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2945 /* Emit an X_floating library function call.
2947 Note that these functions do not follow normal calling conventions:
2948 TFmode arguments are passed in two integer registers (as opposed to
2949 indirect); TFmode return values appear in R16+R17.
2951 FUNC is the function to call.
2952 TARGET is where the output belongs.
2953 OPERANDS are the inputs.
2954 NOPERANDS is the count of inputs.
2955 EQUIV is the expression equivalent for the function.
2959 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2960 int noperands, rtx equiv)
2962 rtx usage = NULL_RTX, tmp, reg;
2967 for (i = 0; i < noperands; ++i)
2969 switch (GET_MODE (operands[i]))
2972 reg = gen_rtx_REG (TFmode, regno);
2977 reg = gen_rtx_REG (DFmode, regno + 32);
2982 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
2985 reg = gen_rtx_REG (DImode, regno);
2993 emit_move_insn (reg, operands[i]);
2994 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
2997 switch (GET_MODE (target))
3000 reg = gen_rtx_REG (TFmode, 16);
3003 reg = gen_rtx_REG (DFmode, 32);
3006 reg = gen_rtx_REG (DImode, 0);
3012 tmp = gen_rtx_MEM (QImode, func);
3013 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3014 const0_rtx, const0_rtx));
3015 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3016 CONST_OR_PURE_CALL_P (tmp) = 1;
3021 emit_libcall_block (tmp, target, reg, equiv);
3024 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3027 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3031 rtx out_operands[3];
3033 func = alpha_lookup_xfloating_lib_func (code);
3034 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3036 out_operands[0] = operands[1];
3037 out_operands[1] = operands[2];
3038 out_operands[2] = GEN_INT (mode);
3039 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3040 gen_rtx_fmt_ee (code, TFmode, operands[1],
3044 /* Emit an X_floating library function call for a comparison. */
3047 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3049 enum rtx_code cmp_code, res_code;
3050 rtx func, out, operands[2], note;
3052 /* X_floating library comparison functions return
3056 Convert the compare against the raw return value. */
3084 func = alpha_lookup_xfloating_lib_func (cmp_code);
3088 out = gen_reg_rtx (DImode);
3090 /* What's actually returned is -1,0,1, not a proper boolean value,
3091 so use an EXPR_LIST as with a generic libcall instead of a
3092 comparison type expression. */
3093 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3094 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3095 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3096 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3101 /* Emit an X_floating library function call for a conversion. */
3104 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3106 int noperands = 1, mode;
3107 rtx out_operands[2];
3109 enum rtx_code code = orig_code;
3111 if (code == UNSIGNED_FIX)
3114 func = alpha_lookup_xfloating_lib_func (code);
3116 out_operands[0] = operands[1];
3121 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3122 out_operands[1] = GEN_INT (mode);
3125 case FLOAT_TRUNCATE:
3126 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3127 out_operands[1] = GEN_INT (mode);
3134 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3135 gen_rtx_fmt_e (orig_code,
3136 GET_MODE (operands[0]),
3140 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3141 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3142 guarantee that the sequence
3145 is valid. Naturally, output operand ordering is little-endian.
3146 This is used by *movtf_internal and *movti_internal. */
3149 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3152 switch (GET_CODE (operands[1]))
3155 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3156 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3160 operands[3] = adjust_address (operands[1], DImode, 8);
3161 operands[2] = adjust_address (operands[1], DImode, 0);
3166 gcc_assert (operands[1] == CONST0_RTX (mode));
3167 operands[2] = operands[3] = const0_rtx;
3174 switch (GET_CODE (operands[0]))
3177 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3178 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3182 operands[1] = adjust_address (operands[0], DImode, 8);
3183 operands[0] = adjust_address (operands[0], DImode, 0);
3190 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3193 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3194 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3198 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3199 op2 is a register containing the sign bit, operation is the
3200 logical operation to be performed. */
3203 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3205 rtx high_bit = operands[2];
3209 alpha_split_tmode_pair (operands, TFmode, false);
3211 /* Detect three flavors of operand overlap. */
3213 if (rtx_equal_p (operands[0], operands[2]))
3215 else if (rtx_equal_p (operands[1], operands[2]))
3217 if (rtx_equal_p (operands[0], high_bit))
3224 emit_move_insn (operands[0], operands[2]);
3226 /* ??? If the destination overlaps both source tf and high_bit, then
3227 assume source tf is dead in its entirety and use the other half
3228 for a scratch register. Otherwise "scratch" is just the proper
3229 destination register. */
3230 scratch = operands[move < 2 ? 1 : 3];
3232 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3236 emit_move_insn (operands[0], operands[2]);
3238 emit_move_insn (operands[1], scratch);
3242 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3246 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3247 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3248 lda r3,X(r11) lda r3,X+2(r11)
3249 extwl r1,r3,r1 extql r1,r3,r1
3250 extwh r2,r3,r2 extqh r2,r3,r2
3251 or r1.r2.r1 or r1,r2,r1
3254 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3255 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3256 lda r3,X(r11) lda r3,X(r11)
3257 extll r1,r3,r1 extll r1,r3,r1
3258 extlh r2,r3,r2 extlh r2,r3,r2
3259 or r1.r2.r1 addl r1,r2,r1
3261 quad: ldq_u r1,X(r11)
3270 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3271 HOST_WIDE_INT ofs, int sign)
3273 rtx meml, memh, addr, extl, exth, tmp, mema;
3274 enum machine_mode mode;
3276 if (TARGET_BWX && size == 2)
3278 meml = adjust_address (mem, QImode, ofs);
3279 memh = adjust_address (mem, QImode, ofs+1);
3280 if (BYTES_BIG_ENDIAN)
3281 tmp = meml, meml = memh, memh = tmp;
3282 extl = gen_reg_rtx (DImode);
3283 exth = gen_reg_rtx (DImode);
3284 emit_insn (gen_zero_extendqidi2 (extl, meml));
3285 emit_insn (gen_zero_extendqidi2 (exth, memh));
3286 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3287 NULL, 1, OPTAB_LIB_WIDEN);
3288 addr = expand_simple_binop (DImode, IOR, extl, exth,
3289 NULL, 1, OPTAB_LIB_WIDEN);
3291 if (sign && GET_MODE (tgt) != HImode)
3293 addr = gen_lowpart (HImode, addr);
3294 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3298 if (GET_MODE (tgt) != DImode)
3299 addr = gen_lowpart (GET_MODE (tgt), addr);
3300 emit_move_insn (tgt, addr);
3305 meml = gen_reg_rtx (DImode);
3306 memh = gen_reg_rtx (DImode);
3307 addr = gen_reg_rtx (DImode);
3308 extl = gen_reg_rtx (DImode);
3309 exth = gen_reg_rtx (DImode);
3311 mema = XEXP (mem, 0);
3312 if (GET_CODE (mema) == LO_SUM)
3313 mema = force_reg (Pmode, mema);
3315 /* AND addresses cannot be in any alias set, since they may implicitly
3316 alias surrounding code. Ideally we'd have some alias set that
3317 covered all types except those with alignment 8 or higher. */
3319 tmp = change_address (mem, DImode,
3320 gen_rtx_AND (DImode,
3321 plus_constant (mema, ofs),
3323 set_mem_alias_set (tmp, 0);
3324 emit_move_insn (meml, tmp);
3326 tmp = change_address (mem, DImode,
3327 gen_rtx_AND (DImode,
3328 plus_constant (mema, ofs + size - 1),
3330 set_mem_alias_set (tmp, 0);
3331 emit_move_insn (memh, tmp);
3333 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3335 emit_move_insn (addr, plus_constant (mema, -1));
3337 emit_insn (gen_extqh_be (extl, meml, addr));
3338 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3340 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3341 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3342 addr, 1, OPTAB_WIDEN);
3344 else if (sign && size == 2)
3346 emit_move_insn (addr, plus_constant (mema, ofs+2));
3348 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3349 emit_insn (gen_extqh_le (exth, memh, addr));
3351 /* We must use tgt here for the target. Alpha-vms port fails if we use
3352 addr for the target, because addr is marked as a pointer and combine
3353 knows that pointers are always sign-extended 32-bit values. */
3354 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3355 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3356 addr, 1, OPTAB_WIDEN);
3360 if (WORDS_BIG_ENDIAN)
3362 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3366 emit_insn (gen_extwh_be (extl, meml, addr));
3371 emit_insn (gen_extlh_be (extl, meml, addr));
3376 emit_insn (gen_extqh_be (extl, meml, addr));
3383 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3387 emit_move_insn (addr, plus_constant (mema, ofs));
3388 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3392 emit_insn (gen_extwh_le (exth, memh, addr));
3397 emit_insn (gen_extlh_le (exth, memh, addr));
3402 emit_insn (gen_extqh_le (exth, memh, addr));
3411 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3412 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3417 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3420 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3423 alpha_expand_unaligned_store (rtx dst, rtx src,
3424 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3426 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3428 if (TARGET_BWX && size == 2)
3430 if (src != const0_rtx)
3432 dstl = gen_lowpart (QImode, src);
3433 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3434 NULL, 1, OPTAB_LIB_WIDEN);
3435 dsth = gen_lowpart (QImode, dsth);
3438 dstl = dsth = const0_rtx;
3440 meml = adjust_address (dst, QImode, ofs);
3441 memh = adjust_address (dst, QImode, ofs+1);
3442 if (BYTES_BIG_ENDIAN)
3443 addr = meml, meml = memh, memh = addr;
3445 emit_move_insn (meml, dstl);
3446 emit_move_insn (memh, dsth);
3450 dstl = gen_reg_rtx (DImode);
3451 dsth = gen_reg_rtx (DImode);
3452 insl = gen_reg_rtx (DImode);
3453 insh = gen_reg_rtx (DImode);
3455 dsta = XEXP (dst, 0);
3456 if (GET_CODE (dsta) == LO_SUM)
3457 dsta = force_reg (Pmode, dsta);
3459 /* AND addresses cannot be in any alias set, since they may implicitly
3460 alias surrounding code. Ideally we'd have some alias set that
3461 covered all types except those with alignment 8 or higher. */
3463 meml = change_address (dst, DImode,
3464 gen_rtx_AND (DImode,
3465 plus_constant (dsta, ofs),
3467 set_mem_alias_set (meml, 0);
3469 memh = change_address (dst, DImode,
3470 gen_rtx_AND (DImode,
3471 plus_constant (dsta, ofs + size - 1),
3473 set_mem_alias_set (memh, 0);
3475 emit_move_insn (dsth, memh);
3476 emit_move_insn (dstl, meml);
3477 if (WORDS_BIG_ENDIAN)
3479 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3481 if (src != const0_rtx)
3486 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3489 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3492 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3495 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3496 GEN_INT (size*8), addr));
3502 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3506 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3507 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3511 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3515 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3519 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3521 if (src != CONST0_RTX (GET_MODE (src)))
3523 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3524 GEN_INT (size*8), addr));
3529 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3532 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3535 emit_insn (gen_insql_le (insl, src, addr));
3540 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3545 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3549 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3550 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3554 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3559 if (src != CONST0_RTX (GET_MODE (src)))
3561 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3562 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3565 if (WORDS_BIG_ENDIAN)
3567 emit_move_insn (meml, dstl);
3568 emit_move_insn (memh, dsth);
3572 /* Must store high before low for degenerate case of aligned. */
3573 emit_move_insn (memh, dsth);
3574 emit_move_insn (meml, dstl);
3578 /* The block move code tries to maximize speed by separating loads and
3579 stores at the expense of register pressure: we load all of the data
3580 before we store it back out. There are two secondary effects worth
3581 mentioning, that this speeds copying to/from aligned and unaligned
3582 buffers, and that it makes the code significantly easier to write. */
3584 #define MAX_MOVE_WORDS 8
3586 /* Load an integral number of consecutive unaligned quadwords. */
3589 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3590 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3592 rtx const im8 = GEN_INT (-8);
3593 rtx const i64 = GEN_INT (64);
3594 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3595 rtx sreg, areg, tmp, smema;
3598 smema = XEXP (smem, 0);
3599 if (GET_CODE (smema) == LO_SUM)
3600 smema = force_reg (Pmode, smema);
3602 /* Generate all the tmp registers we need. */
3603 for (i = 0; i < words; ++i)
3605 data_regs[i] = out_regs[i];
3606 ext_tmps[i] = gen_reg_rtx (DImode);
3608 data_regs[words] = gen_reg_rtx (DImode);
3611 smem = adjust_address (smem, GET_MODE (smem), ofs);
3613 /* Load up all of the source data. */
3614 for (i = 0; i < words; ++i)
3616 tmp = change_address (smem, DImode,
3617 gen_rtx_AND (DImode,
3618 plus_constant (smema, 8*i),
3620 set_mem_alias_set (tmp, 0);
3621 emit_move_insn (data_regs[i], tmp);
3624 tmp = change_address (smem, DImode,
3625 gen_rtx_AND (DImode,
3626 plus_constant (smema, 8*words - 1),
3628 set_mem_alias_set (tmp, 0);
3629 emit_move_insn (data_regs[words], tmp);
3631 /* Extract the half-word fragments. Unfortunately DEC decided to make
3632 extxh with offset zero a noop instead of zeroing the register, so
3633 we must take care of that edge condition ourselves with cmov. */
3635 sreg = copy_addr_to_reg (smema);
3636 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3638 if (WORDS_BIG_ENDIAN)
3639 emit_move_insn (sreg, plus_constant (sreg, 7));
3640 for (i = 0; i < words; ++i)
3642 if (WORDS_BIG_ENDIAN)
3644 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3645 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3649 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3650 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3652 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3653 gen_rtx_IF_THEN_ELSE (DImode,
3654 gen_rtx_EQ (DImode, areg,
3656 const0_rtx, ext_tmps[i])));
3659 /* Merge the half-words into whole words. */
3660 for (i = 0; i < words; ++i)
3662 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3663 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3667 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3668 may be NULL to store zeros. */
3671 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3672 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3674 rtx const im8 = GEN_INT (-8);
3675 rtx const i64 = GEN_INT (64);
3676 rtx ins_tmps[MAX_MOVE_WORDS];
3677 rtx st_tmp_1, st_tmp_2, dreg;
3678 rtx st_addr_1, st_addr_2, dmema;
3681 dmema = XEXP (dmem, 0);
3682 if (GET_CODE (dmema) == LO_SUM)
3683 dmema = force_reg (Pmode, dmema);
3685 /* Generate all the tmp registers we need. */
3686 if (data_regs != NULL)
3687 for (i = 0; i < words; ++i)
3688 ins_tmps[i] = gen_reg_rtx(DImode);
3689 st_tmp_1 = gen_reg_rtx(DImode);
3690 st_tmp_2 = gen_reg_rtx(DImode);
3693 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3695 st_addr_2 = change_address (dmem, DImode,
3696 gen_rtx_AND (DImode,
3697 plus_constant (dmema, words*8 - 1),
3699 set_mem_alias_set (st_addr_2, 0);
3701 st_addr_1 = change_address (dmem, DImode,
3702 gen_rtx_AND (DImode, dmema, im8));
3703 set_mem_alias_set (st_addr_1, 0);
3705 /* Load up the destination end bits. */
3706 emit_move_insn (st_tmp_2, st_addr_2);
3707 emit_move_insn (st_tmp_1, st_addr_1);
3709 /* Shift the input data into place. */
3710 dreg = copy_addr_to_reg (dmema);
3711 if (WORDS_BIG_ENDIAN)
3712 emit_move_insn (dreg, plus_constant (dreg, 7));
3713 if (data_regs != NULL)
3715 for (i = words-1; i >= 0; --i)
3717 if (WORDS_BIG_ENDIAN)
3719 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3720 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3724 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3725 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3728 for (i = words-1; i > 0; --i)
3730 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3731 ins_tmps[i-1], ins_tmps[i-1], 1,
3736 /* Split and merge the ends with the destination data. */
3737 if (WORDS_BIG_ENDIAN)
3739 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3740 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3744 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3745 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3748 if (data_regs != NULL)
3750 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3751 st_tmp_2, 1, OPTAB_WIDEN);
3752 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3753 st_tmp_1, 1, OPTAB_WIDEN);
3757 if (WORDS_BIG_ENDIAN)
3758 emit_move_insn (st_addr_1, st_tmp_1);
3760 emit_move_insn (st_addr_2, st_tmp_2);
3761 for (i = words-1; i > 0; --i)
3763 rtx tmp = change_address (dmem, DImode,
3764 gen_rtx_AND (DImode,
3765 plus_constant(dmema,
3766 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3768 set_mem_alias_set (tmp, 0);
3769 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3771 if (WORDS_BIG_ENDIAN)
3772 emit_move_insn (st_addr_2, st_tmp_2);
3774 emit_move_insn (st_addr_1, st_tmp_1);
3778 /* Expand string/block move operations.
3780 operands[0] is the pointer to the destination.
3781 operands[1] is the pointer to the source.
3782 operands[2] is the number of bytes to move.
3783 operands[3] is the alignment. */
3786 alpha_expand_block_move (rtx operands[])
3788 rtx bytes_rtx = operands[2];
3789 rtx align_rtx = operands[3];
3790 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3791 HOST_WIDE_INT bytes = orig_bytes;
3792 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3793 HOST_WIDE_INT dst_align = src_align;
3794 rtx orig_src = operands[1];
3795 rtx orig_dst = operands[0];
3796 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3798 unsigned int i, words, ofs, nregs = 0;
3800 if (orig_bytes <= 0)
3802 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3805 /* Look for additional alignment information from recorded register info. */
3807 tmp = XEXP (orig_src, 0);
3808 if (GET_CODE (tmp) == REG)
3809 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3810 else if (GET_CODE (tmp) == PLUS
3811 && GET_CODE (XEXP (tmp, 0)) == REG
3812 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3814 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3815 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3819 if (a >= 64 && c % 8 == 0)
3821 else if (a >= 32 && c % 4 == 0)
3823 else if (a >= 16 && c % 2 == 0)
3828 tmp = XEXP (orig_dst, 0);
3829 if (GET_CODE (tmp) == REG)
3830 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3831 else if (GET_CODE (tmp) == PLUS
3832 && GET_CODE (XEXP (tmp, 0)) == REG
3833 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3835 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3836 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3840 if (a >= 64 && c % 8 == 0)
3842 else if (a >= 32 && c % 4 == 0)
3844 else if (a >= 16 && c % 2 == 0)
3850 if (src_align >= 64 && bytes >= 8)
3854 for (i = 0; i < words; ++i)
3855 data_regs[nregs + i] = gen_reg_rtx (DImode);
3857 for (i = 0; i < words; ++i)
3858 emit_move_insn (data_regs[nregs + i],
3859 adjust_address (orig_src, DImode, ofs + i * 8));
3866 if (src_align >= 32 && bytes >= 4)
3870 for (i = 0; i < words; ++i)
3871 data_regs[nregs + i] = gen_reg_rtx (SImode);
3873 for (i = 0; i < words; ++i)
3874 emit_move_insn (data_regs[nregs + i],
3875 adjust_address (orig_src, SImode, ofs + i * 4));
3886 for (i = 0; i < words+1; ++i)
3887 data_regs[nregs + i] = gen_reg_rtx (DImode);
3889 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3897 if (! TARGET_BWX && bytes >= 4)
3899 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3900 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3907 if (src_align >= 16)
3910 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3911 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3914 } while (bytes >= 2);
3916 else if (! TARGET_BWX)
3918 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3919 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3927 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3928 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3933 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3935 /* Now save it back out again. */
3939 /* Write out the data in whatever chunks reading the source allowed. */
3940 if (dst_align >= 64)
3942 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3944 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3951 if (dst_align >= 32)
3953 /* If the source has remaining DImode regs, write them out in
3955 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3957 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3958 NULL_RTX, 1, OPTAB_WIDEN);
3960 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3961 gen_lowpart (SImode, data_regs[i]));
3962 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3963 gen_lowpart (SImode, tmp));
3968 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3970 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3977 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3979 /* Write out a remaining block of words using unaligned methods. */
3981 for (words = 1; i + words < nregs; words++)
3982 if (GET_MODE (data_regs[i + words]) != DImode)
3986 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3988 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3995 /* Due to the above, this won't be aligned. */
3996 /* ??? If we have more than one of these, consider constructing full
3997 words in registers and using alpha_expand_unaligned_store_words. */
3998 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4000 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4005 if (dst_align >= 16)
4006 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4008 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4013 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4015 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4020 /* The remainder must be byte copies. */
4023 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4024 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4033 alpha_expand_block_clear (rtx operands[])
4035 rtx bytes_rtx = operands[1];
4036 rtx align_rtx = operands[3];
4037 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4038 HOST_WIDE_INT bytes = orig_bytes;
4039 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4040 HOST_WIDE_INT alignofs = 0;
4041 rtx orig_dst = operands[0];
4043 int i, words, ofs = 0;
4045 if (orig_bytes <= 0)
4047 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4050 /* Look for stricter alignment. */
4051 tmp = XEXP (orig_dst, 0);
4052 if (GET_CODE (tmp) == REG)
4053 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4054 else if (GET_CODE (tmp) == PLUS
4055 && GET_CODE (XEXP (tmp, 0)) == REG