1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
47 #include "integrate.h"
50 #include "target-def.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "tree-gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58 #include "tm-constrs.h"
61 /* Specify which cpu to schedule for. */
62 enum processor_type alpha_tune;
64 /* Which cpu we're generating code for. */
65 enum processor_type alpha_cpu;
67 static const char * const alpha_cpu_name[] =
72 /* Specify how accurate floating-point traps need to be. */
74 enum alpha_trap_precision alpha_tp;
76 /* Specify the floating-point rounding mode. */
78 enum alpha_fp_rounding_mode alpha_fprm;
80 /* Specify which things cause traps. */
82 enum alpha_fp_trap_mode alpha_fptm;
84 /* Save information from a "cmpxx" operation until the branch or scc is
87 struct alpha_compare alpha_compare;
89 /* Nonzero if inside of a function, because the Alpha asm can't
90 handle .files inside of functions. */
92 static int inside_function = FALSE;
94 /* The number of cycles of latency we should assume on memory reads. */
96 int alpha_memory_latency = 3;
98 /* Whether the function needs the GP. */
100 static int alpha_function_needs_gp;
102 /* The alias set for prologue/epilogue register save/restore. */
104 static GTY(()) int alpha_sr_alias_set;
106 /* The assembler name of the current function. */
108 static const char *alpha_fnname;
110 /* The next explicit relocation sequence number. */
111 extern GTY(()) int alpha_next_sequence_number;
112 int alpha_next_sequence_number = 1;
114 /* The literal and gpdisp sequence numbers for this insn, as printed
115 by %# and %* respectively. */
116 extern GTY(()) int alpha_this_literal_sequence_number;
117 extern GTY(()) int alpha_this_gpdisp_sequence_number;
118 int alpha_this_literal_sequence_number;
119 int alpha_this_gpdisp_sequence_number;
121 /* Costs of various operations on the different architectures. */
123 struct alpha_rtx_cost_data
125 unsigned char fp_add;
126 unsigned char fp_mult;
127 unsigned char fp_div_sf;
128 unsigned char fp_div_df;
129 unsigned char int_mult_si;
130 unsigned char int_mult_di;
131 unsigned char int_shift;
132 unsigned char int_cmov;
133 unsigned short int_div;
136 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
139 COSTS_N_INSNS (6), /* fp_add */
140 COSTS_N_INSNS (6), /* fp_mult */
141 COSTS_N_INSNS (34), /* fp_div_sf */
142 COSTS_N_INSNS (63), /* fp_div_df */
143 COSTS_N_INSNS (23), /* int_mult_si */
144 COSTS_N_INSNS (23), /* int_mult_di */
145 COSTS_N_INSNS (2), /* int_shift */
146 COSTS_N_INSNS (2), /* int_cmov */
147 COSTS_N_INSNS (97), /* int_div */
150 COSTS_N_INSNS (4), /* fp_add */
151 COSTS_N_INSNS (4), /* fp_mult */
152 COSTS_N_INSNS (15), /* fp_div_sf */
153 COSTS_N_INSNS (22), /* fp_div_df */
154 COSTS_N_INSNS (8), /* int_mult_si */
155 COSTS_N_INSNS (12), /* int_mult_di */
156 COSTS_N_INSNS (1) + 1, /* int_shift */
157 COSTS_N_INSNS (1), /* int_cmov */
158 COSTS_N_INSNS (83), /* int_div */
161 COSTS_N_INSNS (4), /* fp_add */
162 COSTS_N_INSNS (4), /* fp_mult */
163 COSTS_N_INSNS (12), /* fp_div_sf */
164 COSTS_N_INSNS (15), /* fp_div_df */
165 COSTS_N_INSNS (7), /* int_mult_si */
166 COSTS_N_INSNS (7), /* int_mult_di */
167 COSTS_N_INSNS (1), /* int_shift */
168 COSTS_N_INSNS (2), /* int_cmov */
169 COSTS_N_INSNS (86), /* int_div */
173 /* Similar but tuned for code size instead of execution latency. The
174 extra +N is fractional cost tuning based on latency. It's used to
175 encourage use of cheaper insns like shift, but only if there's just
178 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
180 COSTS_N_INSNS (1), /* fp_add */
181 COSTS_N_INSNS (1), /* fp_mult */
182 COSTS_N_INSNS (1), /* fp_div_sf */
183 COSTS_N_INSNS (1) + 1, /* fp_div_df */
184 COSTS_N_INSNS (1) + 1, /* int_mult_si */
185 COSTS_N_INSNS (1) + 2, /* int_mult_di */
186 COSTS_N_INSNS (1), /* int_shift */
187 COSTS_N_INSNS (1), /* int_cmov */
188 COSTS_N_INSNS (6), /* int_div */
191 /* Get the number of args of a function in one of two ways. */
192 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
193 #define NUM_ARGS current_function_args_info.num_args
195 #define NUM_ARGS current_function_args_info
201 /* Declarations of static functions. */
202 static struct machine_function *alpha_init_machine_status (void);
203 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
205 #if TARGET_ABI_OPEN_VMS
206 static void alpha_write_linkage (FILE *, const char *, tree);
209 static void unicosmk_output_deferred_case_vectors (FILE *);
210 static void unicosmk_gen_dsib (unsigned long *);
211 static void unicosmk_output_ssib (FILE *, const char *);
212 static int unicosmk_need_dex (rtx);
214 /* Implement TARGET_HANDLE_OPTION. */
217 alpha_handle_option (size_t code, const char *arg, int value)
223 target_flags |= MASK_SOFT_FP;
227 case OPT_mieee_with_inexact:
228 target_flags |= MASK_IEEE_CONFORMANT;
232 if (value != 16 && value != 32 && value != 64)
233 error ("bad value %qs for -mtls-size switch", arg);
240 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
241 /* Implement TARGET_MANGLE_FUNDAMENTAL_TYPE. */
244 alpha_mangle_fundamental_type (tree type)
246 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
247 && TARGET_LONG_DOUBLE_128)
250 /* For all other types, use normal C++ mangling. */
255 /* Parse target option strings. */
258 override_options (void)
260 static const struct cpu_table {
261 const char *const name;
262 const enum processor_type processor;
265 { "ev4", PROCESSOR_EV4, 0 },
266 { "ev45", PROCESSOR_EV4, 0 },
267 { "21064", PROCESSOR_EV4, 0 },
268 { "ev5", PROCESSOR_EV5, 0 },
269 { "21164", PROCESSOR_EV5, 0 },
270 { "ev56", PROCESSOR_EV5, MASK_BWX },
271 { "21164a", PROCESSOR_EV5, MASK_BWX },
272 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
274 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
275 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
276 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
277 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
278 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
284 /* Unicos/Mk doesn't have shared libraries. */
285 if (TARGET_ABI_UNICOSMK && flag_pic)
287 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
288 (flag_pic > 1) ? "PIC" : "pic");
292 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
293 floating-point instructions. Make that the default for this target. */
294 if (TARGET_ABI_UNICOSMK)
295 alpha_fprm = ALPHA_FPRM_DYN;
297 alpha_fprm = ALPHA_FPRM_NORM;
299 alpha_tp = ALPHA_TP_PROG;
300 alpha_fptm = ALPHA_FPTM_N;
302 /* We cannot use su and sui qualifiers for conversion instructions on
303 Unicos/Mk. I'm not sure if this is due to assembler or hardware
304 limitations. Right now, we issue a warning if -mieee is specified
305 and then ignore it; eventually, we should either get it right or
306 disable the option altogether. */
310 if (TARGET_ABI_UNICOSMK)
311 warning (0, "-mieee not supported on Unicos/Mk");
314 alpha_tp = ALPHA_TP_INSN;
315 alpha_fptm = ALPHA_FPTM_SU;
319 if (TARGET_IEEE_WITH_INEXACT)
321 if (TARGET_ABI_UNICOSMK)
322 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
325 alpha_tp = ALPHA_TP_INSN;
326 alpha_fptm = ALPHA_FPTM_SUI;
332 if (! strcmp (alpha_tp_string, "p"))
333 alpha_tp = ALPHA_TP_PROG;
334 else if (! strcmp (alpha_tp_string, "f"))
335 alpha_tp = ALPHA_TP_FUNC;
336 else if (! strcmp (alpha_tp_string, "i"))
337 alpha_tp = ALPHA_TP_INSN;
339 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
342 if (alpha_fprm_string)
344 if (! strcmp (alpha_fprm_string, "n"))
345 alpha_fprm = ALPHA_FPRM_NORM;
346 else if (! strcmp (alpha_fprm_string, "m"))
347 alpha_fprm = ALPHA_FPRM_MINF;
348 else if (! strcmp (alpha_fprm_string, "c"))
349 alpha_fprm = ALPHA_FPRM_CHOP;
350 else if (! strcmp (alpha_fprm_string,"d"))
351 alpha_fprm = ALPHA_FPRM_DYN;
353 error ("bad value %qs for -mfp-rounding-mode switch",
357 if (alpha_fptm_string)
359 if (strcmp (alpha_fptm_string, "n") == 0)
360 alpha_fptm = ALPHA_FPTM_N;
361 else if (strcmp (alpha_fptm_string, "u") == 0)
362 alpha_fptm = ALPHA_FPTM_U;
363 else if (strcmp (alpha_fptm_string, "su") == 0)
364 alpha_fptm = ALPHA_FPTM_SU;
365 else if (strcmp (alpha_fptm_string, "sui") == 0)
366 alpha_fptm = ALPHA_FPTM_SUI;
368 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
371 if (alpha_cpu_string)
373 for (i = 0; cpu_table [i].name; i++)
374 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
376 alpha_tune = alpha_cpu = cpu_table [i].processor;
377 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
378 target_flags |= cpu_table [i].flags;
381 if (! cpu_table [i].name)
382 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
385 if (alpha_tune_string)
387 for (i = 0; cpu_table [i].name; i++)
388 if (! strcmp (alpha_tune_string, cpu_table [i].name))
390 alpha_tune = cpu_table [i].processor;
393 if (! cpu_table [i].name)
394 error ("bad value %qs for -mcpu switch", alpha_tune_string);
397 /* Do some sanity checks on the above options. */
399 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
401 warning (0, "trap mode not supported on Unicos/Mk");
402 alpha_fptm = ALPHA_FPTM_N;
405 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
406 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
408 warning (0, "fp software completion requires -mtrap-precision=i");
409 alpha_tp = ALPHA_TP_INSN;
412 if (alpha_cpu == PROCESSOR_EV6)
414 /* Except for EV6 pass 1 (not released), we always have precise
415 arithmetic traps. Which means we can do software completion
416 without minding trap shadows. */
417 alpha_tp = ALPHA_TP_PROG;
420 if (TARGET_FLOAT_VAX)
422 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
424 warning (0, "rounding mode not supported for VAX floats");
425 alpha_fprm = ALPHA_FPRM_NORM;
427 if (alpha_fptm == ALPHA_FPTM_SUI)
429 warning (0, "trap mode not supported for VAX floats");
430 alpha_fptm = ALPHA_FPTM_SU;
432 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
433 warning (0, "128-bit long double not supported for VAX floats");
434 target_flags &= ~MASK_LONG_DOUBLE_128;
441 if (!alpha_mlat_string)
442 alpha_mlat_string = "L1";
444 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
445 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
447 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
448 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
449 && alpha_mlat_string[2] == '\0')
451 static int const cache_latency[][4] =
453 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
454 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
455 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
458 lat = alpha_mlat_string[1] - '0';
459 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
461 warning (0, "L%d cache latency unknown for %s",
462 lat, alpha_cpu_name[alpha_tune]);
466 lat = cache_latency[alpha_tune][lat-1];
468 else if (! strcmp (alpha_mlat_string, "main"))
470 /* Most current memories have about 370ns latency. This is
471 a reasonable guess for a fast cpu. */
476 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
480 alpha_memory_latency = lat;
483 /* Default the definition of "small data" to 8 bytes. */
487 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
489 target_flags |= MASK_SMALL_DATA;
490 else if (flag_pic == 2)
491 target_flags &= ~MASK_SMALL_DATA;
493 /* Align labels and loops for optimal branching. */
494 /* ??? Kludge these by not doing anything if we don't optimize and also if
495 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
496 if (optimize > 0 && write_symbols != SDB_DEBUG)
498 if (align_loops <= 0)
500 if (align_jumps <= 0)
503 if (align_functions <= 0)
504 align_functions = 16;
506 /* Acquire a unique set number for our register saves and restores. */
507 alpha_sr_alias_set = new_alias_set ();
509 /* Register variables and functions with the garbage collector. */
511 /* Set up function hooks. */
512 init_machine_status = alpha_init_machine_status;
514 /* Tell the compiler when we're using VAX floating point. */
515 if (TARGET_FLOAT_VAX)
517 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
518 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
519 REAL_MODE_FORMAT (TFmode) = NULL;
522 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
523 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
524 target_flags |= MASK_LONG_DOUBLE_128;
528 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
531 zap_mask (HOST_WIDE_INT value)
535 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
537 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
543 /* Return true if OP is valid for a particular TLS relocation.
544 We are already guaranteed that OP is a CONST. */
547 tls_symbolic_operand_1 (rtx op, int size, int unspec)
551 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
553 op = XVECEXP (op, 0, 0);
555 if (GET_CODE (op) != SYMBOL_REF)
558 switch (SYMBOL_REF_TLS_MODEL (op))
560 case TLS_MODEL_LOCAL_DYNAMIC:
561 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
562 case TLS_MODEL_INITIAL_EXEC:
563 return unspec == UNSPEC_TPREL && size == 64;
564 case TLS_MODEL_LOCAL_EXEC:
565 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
571 /* Used by aligned_memory_operand and unaligned_memory_operand to
572 resolve what reload is going to do with OP if it's a register. */
575 resolve_reload_operand (rtx op)
577 if (reload_in_progress)
580 if (GET_CODE (tmp) == SUBREG)
581 tmp = SUBREG_REG (tmp);
582 if (GET_CODE (tmp) == REG
583 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
585 op = reg_equiv_memory_loc[REGNO (tmp)];
593 /* The scalar modes supported differs from the default check-what-c-supports
594 version in that sometimes TFmode is available even when long double
595 indicates only DFmode. On unicosmk, we have the situation that HImode
596 doesn't map to any C type, but of course we still support that. */
599 alpha_scalar_mode_supported_p (enum machine_mode mode)
607 case TImode: /* via optabs.c */
615 return TARGET_HAS_XFLOATING_LIBS;
622 /* Alpha implements a couple of integer vector mode operations when
623 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
624 which allows the vectorizer to operate on e.g. move instructions,
625 or when expand_vector_operations can do something useful. */
628 alpha_vector_mode_supported_p (enum machine_mode mode)
630 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
633 /* Return 1 if this function can directly return via $26. */
638 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
640 && alpha_sa_size () == 0
641 && get_frame_size () == 0
642 && current_function_outgoing_args_size == 0
643 && current_function_pretend_args_size == 0);
646 /* Return the ADDR_VEC associated with a tablejump insn. */
649 alpha_tablejump_addr_vec (rtx insn)
653 tmp = JUMP_LABEL (insn);
656 tmp = NEXT_INSN (tmp);
659 if (GET_CODE (tmp) == JUMP_INSN
660 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
661 return PATTERN (tmp);
665 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
668 alpha_tablejump_best_label (rtx insn)
670 rtx jump_table = alpha_tablejump_addr_vec (insn);
671 rtx best_label = NULL_RTX;
673 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
674 there for edge frequency counts from profile data. */
678 int n_labels = XVECLEN (jump_table, 1);
682 for (i = 0; i < n_labels; i++)
686 for (j = i + 1; j < n_labels; j++)
687 if (XEXP (XVECEXP (jump_table, 1, i), 0)
688 == XEXP (XVECEXP (jump_table, 1, j), 0))
691 if (count > best_count)
692 best_count = count, best_label = XVECEXP (jump_table, 1, i);
696 return best_label ? best_label : const0_rtx;
699 /* Return the TLS model to use for SYMBOL. */
701 static enum tls_model
702 tls_symbolic_operand_type (rtx symbol)
704 enum tls_model model;
706 if (GET_CODE (symbol) != SYMBOL_REF)
708 model = SYMBOL_REF_TLS_MODEL (symbol);
710 /* Local-exec with a 64-bit size is the same code as initial-exec. */
711 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
712 model = TLS_MODEL_INITIAL_EXEC;
717 /* Return true if the function DECL will share the same GP as any
718 function in the current unit of translation. */
721 decl_has_samegp (tree decl)
723 /* Functions that are not local can be overridden, and thus may
724 not share the same gp. */
725 if (!(*targetm.binds_local_p) (decl))
728 /* If -msmall-data is in effect, assume that there is only one GP
729 for the module, and so any local symbol has this property. We
730 need explicit relocations to be able to enforce this for symbols
731 not defined in this unit of translation, however. */
732 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
735 /* Functions that are not external are defined in this UoT. */
736 /* ??? Irritatingly, static functions not yet emitted are still
737 marked "external". Apply this to non-static functions only. */
738 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
741 /* Return true if EXP should be placed in the small data section. */
744 alpha_in_small_data_p (tree exp)
746 /* We want to merge strings, so we never consider them small data. */
747 if (TREE_CODE (exp) == STRING_CST)
750 /* Functions are never in the small data area. Duh. */
751 if (TREE_CODE (exp) == FUNCTION_DECL)
754 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
756 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
757 if (strcmp (section, ".sdata") == 0
758 || strcmp (section, ".sbss") == 0)
763 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
765 /* If this is an incomplete type with size 0, then we can't put it
766 in sdata because it might be too big when completed. */
767 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
774 #if TARGET_ABI_OPEN_VMS
776 alpha_linkage_symbol_p (const char *symname)
778 int symlen = strlen (symname);
781 return strcmp (&symname [symlen - 4], "..lk") == 0;
786 #define LINKAGE_SYMBOL_REF_P(X) \
787 ((GET_CODE (X) == SYMBOL_REF \
788 && alpha_linkage_symbol_p (XSTR (X, 0))) \
789 || (GET_CODE (X) == CONST \
790 && GET_CODE (XEXP (X, 0)) == PLUS \
791 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
792 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
795 /* legitimate_address_p recognizes an RTL expression that is a valid
796 memory address for an instruction. The MODE argument is the
797 machine mode for the MEM expression that wants to use this address.
799 For Alpha, we have either a constant address or the sum of a
800 register and a constant address, or just a register. For DImode,
801 any of those forms can be surrounded with an AND that clear the
802 low-order three bits; this is an "unaligned" access. */
805 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
807 /* If this is an ldq_u type address, discard the outer AND. */
809 && GET_CODE (x) == AND
810 && GET_CODE (XEXP (x, 1)) == CONST_INT
811 && INTVAL (XEXP (x, 1)) == -8)
814 /* Discard non-paradoxical subregs. */
815 if (GET_CODE (x) == SUBREG
816 && (GET_MODE_SIZE (GET_MODE (x))
817 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
820 /* Unadorned general registers are valid. */
823 ? STRICT_REG_OK_FOR_BASE_P (x)
824 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
827 /* Constant addresses (i.e. +/- 32k) are valid. */
828 if (CONSTANT_ADDRESS_P (x))
831 #if TARGET_ABI_OPEN_VMS
832 if (LINKAGE_SYMBOL_REF_P (x))
836 /* Register plus a small constant offset is valid. */
837 if (GET_CODE (x) == PLUS)
839 rtx ofs = XEXP (x, 1);
842 /* Discard non-paradoxical subregs. */
843 if (GET_CODE (x) == SUBREG
844 && (GET_MODE_SIZE (GET_MODE (x))
845 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
851 && NONSTRICT_REG_OK_FP_BASE_P (x)
852 && GET_CODE (ofs) == CONST_INT)
855 ? STRICT_REG_OK_FOR_BASE_P (x)
856 : NONSTRICT_REG_OK_FOR_BASE_P (x))
857 && CONSTANT_ADDRESS_P (ofs))
862 /* If we're managing explicit relocations, LO_SUM is valid, as
863 are small data symbols. */
864 else if (TARGET_EXPLICIT_RELOCS)
866 if (small_symbolic_operand (x, Pmode))
869 if (GET_CODE (x) == LO_SUM)
871 rtx ofs = XEXP (x, 1);
874 /* Discard non-paradoxical subregs. */
875 if (GET_CODE (x) == SUBREG
876 && (GET_MODE_SIZE (GET_MODE (x))
877 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
880 /* Must have a valid base register. */
883 ? STRICT_REG_OK_FOR_BASE_P (x)
884 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
887 /* The symbol must be local. */
888 if (local_symbolic_operand (ofs, Pmode)
889 || dtp32_symbolic_operand (ofs, Pmode)
890 || tp32_symbolic_operand (ofs, Pmode))
898 /* Build the SYMBOL_REF for __tls_get_addr. */
900 static GTY(()) rtx tls_get_addr_libfunc;
903 get_tls_get_addr (void)
905 if (!tls_get_addr_libfunc)
906 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
907 return tls_get_addr_libfunc;
910 /* Try machine-dependent ways of modifying an illegitimate address
911 to be legitimate. If we find one, return the new, valid address. */
914 alpha_legitimize_address (rtx x, rtx scratch,
915 enum machine_mode mode ATTRIBUTE_UNUSED)
917 HOST_WIDE_INT addend;
919 /* If the address is (plus reg const_int) and the CONST_INT is not a
920 valid offset, compute the high part of the constant and add it to
921 the register. Then our address is (plus temp low-part-const). */
922 if (GET_CODE (x) == PLUS
923 && GET_CODE (XEXP (x, 0)) == REG
924 && GET_CODE (XEXP (x, 1)) == CONST_INT
925 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
927 addend = INTVAL (XEXP (x, 1));
932 /* If the address is (const (plus FOO const_int)), find the low-order
933 part of the CONST_INT. Then load FOO plus any high-order part of the
934 CONST_INT into a register. Our address is (plus reg low-part-const).
935 This is done to reduce the number of GOT entries. */
937 && GET_CODE (x) == CONST
938 && GET_CODE (XEXP (x, 0)) == PLUS
939 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
941 addend = INTVAL (XEXP (XEXP (x, 0), 1));
942 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
946 /* If we have a (plus reg const), emit the load as in (2), then add
947 the two registers, and finally generate (plus reg low-part-const) as
950 && GET_CODE (x) == PLUS
951 && GET_CODE (XEXP (x, 0)) == REG
952 && GET_CODE (XEXP (x, 1)) == CONST
953 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
954 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
956 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
957 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
958 XEXP (XEXP (XEXP (x, 1), 0), 0),
959 NULL_RTX, 1, OPTAB_LIB_WIDEN);
963 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
964 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
966 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
968 switch (tls_symbolic_operand_type (x))
973 case TLS_MODEL_GLOBAL_DYNAMIC:
976 r0 = gen_rtx_REG (Pmode, 0);
977 r16 = gen_rtx_REG (Pmode, 16);
978 tga = get_tls_get_addr ();
979 dest = gen_reg_rtx (Pmode);
980 seq = GEN_INT (alpha_next_sequence_number++);
982 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
983 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
984 insn = emit_call_insn (insn);
985 CONST_OR_PURE_CALL_P (insn) = 1;
986 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
991 emit_libcall_block (insn, dest, r0, x);
994 case TLS_MODEL_LOCAL_DYNAMIC:
997 r0 = gen_rtx_REG (Pmode, 0);
998 r16 = gen_rtx_REG (Pmode, 16);
999 tga = get_tls_get_addr ();
1000 scratch = gen_reg_rtx (Pmode);
1001 seq = GEN_INT (alpha_next_sequence_number++);
1003 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1004 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1005 insn = emit_call_insn (insn);
1006 CONST_OR_PURE_CALL_P (insn) = 1;
1007 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1009 insn = get_insns ();
1012 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1013 UNSPEC_TLSLDM_CALL);
1014 emit_libcall_block (insn, scratch, r0, eqv);
1016 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1017 eqv = gen_rtx_CONST (Pmode, eqv);
1019 if (alpha_tls_size == 64)
1021 dest = gen_reg_rtx (Pmode);
1022 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1023 emit_insn (gen_adddi3 (dest, dest, scratch));
1026 if (alpha_tls_size == 32)
1028 insn = gen_rtx_HIGH (Pmode, eqv);
1029 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1030 scratch = gen_reg_rtx (Pmode);
1031 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1033 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1035 case TLS_MODEL_INITIAL_EXEC:
1036 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1037 eqv = gen_rtx_CONST (Pmode, eqv);
1038 tp = gen_reg_rtx (Pmode);
1039 scratch = gen_reg_rtx (Pmode);
1040 dest = gen_reg_rtx (Pmode);
1042 emit_insn (gen_load_tp (tp));
1043 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1044 emit_insn (gen_adddi3 (dest, tp, scratch));
1047 case TLS_MODEL_LOCAL_EXEC:
1048 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1049 eqv = gen_rtx_CONST (Pmode, eqv);
1050 tp = gen_reg_rtx (Pmode);
1052 emit_insn (gen_load_tp (tp));
1053 if (alpha_tls_size == 32)
1055 insn = gen_rtx_HIGH (Pmode, eqv);
1056 insn = gen_rtx_PLUS (Pmode, tp, insn);
1057 tp = gen_reg_rtx (Pmode);
1058 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1060 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1066 if (local_symbolic_operand (x, Pmode))
1068 if (small_symbolic_operand (x, Pmode))
1072 if (!no_new_pseudos)
1073 scratch = gen_reg_rtx (Pmode);
1074 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1075 gen_rtx_HIGH (Pmode, x)));
1076 return gen_rtx_LO_SUM (Pmode, scratch, x);
1085 HOST_WIDE_INT low, high;
1087 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1089 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1093 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1094 (no_new_pseudos ? scratch : NULL_RTX),
1095 1, OPTAB_LIB_WIDEN);
1097 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1098 (no_new_pseudos ? scratch : NULL_RTX),
1099 1, OPTAB_LIB_WIDEN);
1101 return plus_constant (x, low);
1105 /* Primarily this is required for TLS symbols, but given that our move
1106 patterns *ought* to be able to handle any symbol at any time, we
1107 should never be spilling symbolic operands to the constant pool, ever. */
1110 alpha_cannot_force_const_mem (rtx x)
1112 enum rtx_code code = GET_CODE (x);
1113 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1116 /* We do not allow indirect calls to be optimized into sibling calls, nor
1117 can we allow a call to a function with a different GP to be optimized
1121 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1123 /* Can't do indirect tail calls, since we don't know if the target
1124 uses the same GP. */
1128 /* Otherwise, we can make a tail call if the target function shares
1130 return decl_has_samegp (decl);
1134 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1138 /* Don't re-split. */
1139 if (GET_CODE (x) == LO_SUM)
1142 return small_symbolic_operand (x, Pmode) != 0;
1146 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1150 /* Don't re-split. */
1151 if (GET_CODE (x) == LO_SUM)
1154 if (small_symbolic_operand (x, Pmode))
1156 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1165 split_small_symbolic_operand (rtx x)
1168 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1172 /* Indicate that INSN cannot be duplicated. This is true for any insn
1173 that we've marked with gpdisp relocs, since those have to stay in
1174 1-1 correspondence with one another.
1176 Technically we could copy them if we could set up a mapping from one
1177 sequence number to another, across the set of insns to be duplicated.
1178 This seems overly complicated and error-prone since interblock motion
1179 from sched-ebb could move one of the pair of insns to a different block.
1181 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1182 then they'll be in a different block from their ldgp. Which could lead
1183 the bb reorder code to think that it would be ok to copy just the block
1184 containing the call and branch to the block containing the ldgp. */
1187 alpha_cannot_copy_insn_p (rtx insn)
1189 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1191 if (recog_memoized (insn) >= 0)
1192 return get_attr_cannot_copy (insn);
1198 /* Try a machine-dependent way of reloading an illegitimate address
1199 operand. If we find one, push the reload and return the new rtx. */
1202 alpha_legitimize_reload_address (rtx x,
1203 enum machine_mode mode ATTRIBUTE_UNUSED,
1204 int opnum, int type,
1205 int ind_levels ATTRIBUTE_UNUSED)
1207 /* We must recognize output that we have already generated ourselves. */
1208 if (GET_CODE (x) == PLUS
1209 && GET_CODE (XEXP (x, 0)) == PLUS
1210 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1211 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1212 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1214 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1215 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1220 /* We wish to handle large displacements off a base register by
1221 splitting the addend across an ldah and the mem insn. This
1222 cuts number of extra insns needed from 3 to 1. */
1223 if (GET_CODE (x) == PLUS
1224 && GET_CODE (XEXP (x, 0)) == REG
1225 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1226 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1227 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1229 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1230 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1232 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1234 /* Check for 32-bit overflow. */
1235 if (high + low != val)
1238 /* Reload the high part into a base reg; leave the low part
1239 in the mem directly. */
1240 x = gen_rtx_PLUS (GET_MODE (x),
1241 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1245 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1246 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1254 /* Compute a (partial) cost for rtx X. Return true if the complete
1255 cost has been computed, and false if subexpressions should be
1256 scanned. In either case, *TOTAL contains the cost result. */
1259 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1261 enum machine_mode mode = GET_MODE (x);
1262 bool float_mode_p = FLOAT_MODE_P (mode);
1263 const struct alpha_rtx_cost_data *cost_data;
1266 cost_data = &alpha_rtx_cost_size;
1268 cost_data = &alpha_rtx_cost_data[alpha_tune];
1273 /* If this is an 8-bit constant, return zero since it can be used
1274 nearly anywhere with no cost. If it is a valid operand for an
1275 ADD or AND, likewise return 0 if we know it will be used in that
1276 context. Otherwise, return 2 since it might be used there later.
1277 All other constants take at least two insns. */
1278 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1286 if (x == CONST0_RTX (mode))
1288 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1289 || (outer_code == AND && and_operand (x, VOIDmode)))
1291 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1294 *total = COSTS_N_INSNS (2);
1300 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1301 *total = COSTS_N_INSNS (outer_code != MEM);
1302 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1303 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1304 else if (tls_symbolic_operand_type (x))
1305 /* Estimate of cost for call_pal rduniq. */
1306 /* ??? How many insns do we emit here? More than one... */
1307 *total = COSTS_N_INSNS (15);
1309 /* Otherwise we do a load from the GOT. */
1310 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1314 /* This is effectively an add_operand. */
1321 *total = cost_data->fp_add;
1322 else if (GET_CODE (XEXP (x, 0)) == MULT
1323 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1325 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1326 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1333 *total = cost_data->fp_mult;
1334 else if (mode == DImode)
1335 *total = cost_data->int_mult_di;
1337 *total = cost_data->int_mult_si;
1341 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1342 && INTVAL (XEXP (x, 1)) <= 3)
1344 *total = COSTS_N_INSNS (1);
1351 *total = cost_data->int_shift;
1356 *total = cost_data->fp_add;
1358 *total = cost_data->int_cmov;
1366 *total = cost_data->int_div;
1367 else if (mode == SFmode)
1368 *total = cost_data->fp_div_sf;
1370 *total = cost_data->fp_div_df;
1374 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1380 *total = COSTS_N_INSNS (1);
1388 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1394 case UNSIGNED_FLOAT:
1397 case FLOAT_TRUNCATE:
1398 *total = cost_data->fp_add;
1402 if (GET_CODE (XEXP (x, 0)) == MEM)
1405 *total = cost_data->fp_add;
1413 /* REF is an alignable memory location. Place an aligned SImode
1414 reference into *PALIGNED_MEM and the number of bits to shift into
1415 *PBITNUM. SCRATCH is a free register for use in reloading out
1416 of range stack slots. */
1419 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1422 HOST_WIDE_INT disp, offset;
1424 gcc_assert (GET_CODE (ref) == MEM);
1426 if (reload_in_progress
1427 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1429 base = find_replacement (&XEXP (ref, 0));
1430 gcc_assert (memory_address_p (GET_MODE (ref), base));
1433 base = XEXP (ref, 0);
1435 if (GET_CODE (base) == PLUS)
1436 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1440 /* Find the byte offset within an aligned word. If the memory itself is
1441 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1442 will have examined the base register and determined it is aligned, and
1443 thus displacements from it are naturally alignable. */
1444 if (MEM_ALIGN (ref) >= 32)
1449 /* Access the entire aligned word. */
1450 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1452 /* Convert the byte offset within the word to a bit offset. */
1453 if (WORDS_BIG_ENDIAN)
1454 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1457 *pbitnum = GEN_INT (offset);
1460 /* Similar, but just get the address. Handle the two reload cases.
1461 Add EXTRA_OFFSET to the address we return. */
1464 get_unaligned_address (rtx ref, int extra_offset)
1467 HOST_WIDE_INT offset = 0;
1469 gcc_assert (GET_CODE (ref) == MEM);
1471 if (reload_in_progress
1472 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1474 base = find_replacement (&XEXP (ref, 0));
1476 gcc_assert (memory_address_p (GET_MODE (ref), base));
1479 base = XEXP (ref, 0);
1481 if (GET_CODE (base) == PLUS)
1482 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1484 return plus_constant (base, offset + extra_offset);
1487 /* On the Alpha, all (non-symbolic) constants except zero go into
1488 a floating-point register via memory. Note that we cannot
1489 return anything that is not a subset of CLASS, and that some
1490 symbolic constants cannot be dropped to memory. */
1493 alpha_preferred_reload_class(rtx x, enum reg_class class)
1495 /* Zero is present in any register class. */
1496 if (x == CONST0_RTX (GET_MODE (x)))
1499 /* These sorts of constants we can easily drop to memory. */
1500 if (GET_CODE (x) == CONST_INT
1501 || GET_CODE (x) == CONST_DOUBLE
1502 || GET_CODE (x) == CONST_VECTOR)
1504 if (class == FLOAT_REGS)
1506 if (class == ALL_REGS)
1507 return GENERAL_REGS;
1511 /* All other kinds of constants should not (and in the case of HIGH
1512 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1513 secondary reload. */
1515 return (class == ALL_REGS ? GENERAL_REGS : class);
1520 /* Loading and storing HImode or QImode values to and from memory
1521 usually requires a scratch register. The exceptions are loading
1522 QImode and HImode from an aligned address to a general register
1523 unless byte instructions are permitted.
1525 We also cannot load an unaligned address or a paradoxical SUBREG
1526 into an FP register.
1528 We also cannot do integral arithmetic into FP regs, as might result
1529 from register elimination into a DImode fp register. */
1532 alpha_secondary_reload_class (enum reg_class class, enum machine_mode mode,
1535 if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
1537 if (GET_CODE (x) == MEM
1538 || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1539 || (GET_CODE (x) == SUBREG
1540 && (GET_CODE (SUBREG_REG (x)) == MEM
1541 || (GET_CODE (SUBREG_REG (x)) == REG
1542 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
1544 if (!in || !aligned_memory_operand(x, mode))
1545 return GENERAL_REGS;
1549 if (class == FLOAT_REGS)
1551 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
1552 return GENERAL_REGS;
1554 if (GET_CODE (x) == SUBREG
1555 && (GET_MODE_SIZE (GET_MODE (x))
1556 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1557 return GENERAL_REGS;
1559 if (in && INTEGRAL_MODE_P (mode)
1560 && ! (memory_operand (x, mode) || x == const0_rtx))
1561 return GENERAL_REGS;
1567 /* Subfunction of the following function. Update the flags of any MEM
1568 found in part of X. */
1571 alpha_set_memflags_1 (rtx *xp, void *data)
1573 rtx x = *xp, orig = (rtx) data;
1575 if (GET_CODE (x) != MEM)
1578 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1579 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1580 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1581 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1582 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1584 /* Sadly, we cannot use alias sets because the extra aliasing
1585 produced by the AND interferes. Given that two-byte quantities
1586 are the only thing we would be able to differentiate anyway,
1587 there does not seem to be any point in convoluting the early
1588 out of the alias check. */
1593 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1594 generated to perform a memory operation, look for any MEMs in either
1595 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1596 volatile flags from REF into each of the MEMs found. If REF is not
1597 a MEM, don't do anything. */
1600 alpha_set_memflags (rtx insn, rtx ref)
1604 if (GET_CODE (ref) != MEM)
1607 /* This is only called from alpha.md, after having had something
1608 generated from one of the insn patterns. So if everything is
1609 zero, the pattern is already up-to-date. */
1610 if (!MEM_VOLATILE_P (ref)
1611 && !MEM_IN_STRUCT_P (ref)
1612 && !MEM_SCALAR_P (ref)
1613 && !MEM_NOTRAP_P (ref)
1614 && !MEM_READONLY_P (ref))
1618 base_ptr = &PATTERN (insn);
1621 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1624 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1627 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1628 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1629 and return pc_rtx if successful. */
1632 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1633 HOST_WIDE_INT c, int n, bool no_output)
1637 /* Use a pseudo if highly optimizing and still generating RTL. */
1639 = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
1642 /* If this is a sign-extended 32-bit constant, we can do this in at most
1643 three insns, so do it if we have enough insns left. We always have
1644 a sign-extended 32-bit constant when compiling on a narrow machine. */
1646 if (HOST_BITS_PER_WIDE_INT != 64
1647 || c >> 31 == -1 || c >> 31 == 0)
1649 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1650 HOST_WIDE_INT tmp1 = c - low;
1651 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1652 HOST_WIDE_INT extra = 0;
1654 /* If HIGH will be interpreted as negative but the constant is
1655 positive, we must adjust it to do two ldha insns. */
1657 if ((high & 0x8000) != 0 && c >= 0)
1661 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1664 if (c == low || (low == 0 && extra == 0))
1666 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1667 but that meant that we can't handle INT_MIN on 32-bit machines
1668 (like NT/Alpha), because we recurse indefinitely through
1669 emit_move_insn to gen_movdi. So instead, since we know exactly
1670 what we want, create it explicitly. */
1675 target = gen_reg_rtx (mode);
1676 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1679 else if (n >= 2 + (extra != 0))
1685 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1689 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1692 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1693 This means that if we go through expand_binop, we'll try to
1694 generate extensions, etc, which will require new pseudos, which
1695 will fail during some split phases. The SImode add patterns
1696 still exist, but are not named. So build the insns by hand. */
1701 subtarget = gen_reg_rtx (mode);
1702 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1703 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1709 target = gen_reg_rtx (mode);
1710 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1711 insn = gen_rtx_SET (VOIDmode, target, insn);
1717 /* If we couldn't do it that way, try some other methods. But if we have
1718 no instructions left, don't bother. Likewise, if this is SImode and
1719 we can't make pseudos, we can't do anything since the expand_binop
1720 and expand_unop calls will widen and try to make pseudos. */
1722 if (n == 1 || (mode == SImode && no_new_pseudos))
1725 /* Next, see if we can load a related constant and then shift and possibly
1726 negate it to get the constant we want. Try this once each increasing
1727 numbers of insns. */
1729 for (i = 1; i < n; i++)
1731 /* First, see if minus some low bits, we've an easy load of
1734 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1737 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1742 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1743 target, 0, OPTAB_WIDEN);
1747 /* Next try complementing. */
1748 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1753 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1756 /* Next try to form a constant and do a left shift. We can do this
1757 if some low-order bits are zero; the exact_log2 call below tells
1758 us that information. The bits we are shifting out could be any
1759 value, but here we'll just try the 0- and sign-extended forms of
1760 the constant. To try to increase the chance of having the same
1761 constant in more than one insn, start at the highest number of
1762 bits to shift, but try all possibilities in case a ZAPNOT will
1765 bits = exact_log2 (c & -c);
1767 for (; bits > 0; bits--)
1770 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1773 new = (unsigned HOST_WIDE_INT)c >> bits;
1774 temp = alpha_emit_set_const (subtarget, mode, new,
1781 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1782 target, 0, OPTAB_WIDEN);
1786 /* Now try high-order zero bits. Here we try the shifted-in bits as
1787 all zero and all ones. Be careful to avoid shifting outside the
1788 mode and to avoid shifting outside the host wide int size. */
1789 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1790 confuse the recursive call and set all of the high 32 bits. */
1792 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1793 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1795 for (; bits > 0; bits--)
1798 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1801 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1802 temp = alpha_emit_set_const (subtarget, mode, new,
1809 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1810 target, 1, OPTAB_WIDEN);
1814 /* Now try high-order 1 bits. We get that with a sign-extension.
1815 But one bit isn't enough here. Be careful to avoid shifting outside
1816 the mode and to avoid shifting outside the host wide int size. */
1818 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1819 - floor_log2 (~ c) - 2);
1821 for (; bits > 0; bits--)
1824 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1827 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1828 temp = alpha_emit_set_const (subtarget, mode, new,
1835 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1836 target, 0, OPTAB_WIDEN);
1841 #if HOST_BITS_PER_WIDE_INT == 64
1842 /* Finally, see if can load a value into the target that is the same as the
1843 constant except that all bytes that are 0 are changed to be 0xff. If we
1844 can, then we can do a ZAPNOT to obtain the desired constant. */
1847 for (i = 0; i < 64; i += 8)
1848 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1849 new |= (HOST_WIDE_INT) 0xff << i;
1851 /* We are only called for SImode and DImode. If this is SImode, ensure that
1852 we are sign extended to a full word. */
1855 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1859 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1864 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1865 target, 0, OPTAB_WIDEN);
1873 /* Try to output insns to set TARGET equal to the constant C if it can be
1874 done in less than N insns. Do all computations in MODE. Returns the place
1875 where the output has been placed if it can be done and the insns have been
1876 emitted. If it would take more than N insns, zero is returned and no
1877 insns and emitted. */
1880 alpha_emit_set_const (rtx target, enum machine_mode mode,
1881 HOST_WIDE_INT c, int n, bool no_output)
1883 enum machine_mode orig_mode = mode;
1884 rtx orig_target = target;
1888 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1889 can't load this constant in one insn, do this in DImode. */
1890 if (no_new_pseudos && mode == SImode
1891 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1893 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1897 target = no_output ? NULL : gen_lowpart (DImode, target);
1900 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1902 target = no_output ? NULL : gen_lowpart (DImode, target);
1906 /* Try 1 insn, then 2, then up to N. */
1907 for (i = 1; i <= n; i++)
1909 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1917 insn = get_last_insn ();
1918 set = single_set (insn);
1919 if (! CONSTANT_P (SET_SRC (set)))
1920 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1925 /* Allow for the case where we changed the mode of TARGET. */
1928 if (result == target)
1929 result = orig_target;
1930 else if (mode != orig_mode)
1931 result = gen_lowpart (orig_mode, result);
1937 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1938 fall back to a straight forward decomposition. We do this to avoid
1939 exponential run times encountered when looking for longer sequences
1940 with alpha_emit_set_const. */
1943 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1945 HOST_WIDE_INT d1, d2, d3, d4;
1947 /* Decompose the entire word */
1948 #if HOST_BITS_PER_WIDE_INT >= 64
1949 gcc_assert (c2 == -(c1 < 0));
1950 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1952 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1953 c1 = (c1 - d2) >> 32;
1954 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1956 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1957 gcc_assert (c1 == d4);
1959 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1961 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1962 gcc_assert (c1 == d2);
1964 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1966 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1967 gcc_assert (c2 == d4);
1970 /* Construct the high word */
1973 emit_move_insn (target, GEN_INT (d4));
1975 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1978 emit_move_insn (target, GEN_INT (d3));
1980 /* Shift it into place */
1981 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1983 /* Add in the low bits. */
1985 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1987 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
1992 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
1996 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
1998 HOST_WIDE_INT i0, i1;
2000 if (GET_CODE (x) == CONST_VECTOR)
2001 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2004 if (GET_CODE (x) == CONST_INT)
2009 else if (HOST_BITS_PER_WIDE_INT >= 64)
2011 i0 = CONST_DOUBLE_LOW (x);
2016 i0 = CONST_DOUBLE_LOW (x);
2017 i1 = CONST_DOUBLE_HIGH (x);
2024 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2025 are willing to load the value into a register via a move pattern.
2026 Normally this is all symbolic constants, integral constants that
2027 take three or fewer instructions, and floating-point zero. */
2030 alpha_legitimate_constant_p (rtx x)
2032 enum machine_mode mode = GET_MODE (x);
2033 HOST_WIDE_INT i0, i1;
2035 switch (GET_CODE (x))
2043 /* TLS symbols are never valid. */
2044 return SYMBOL_REF_TLS_MODEL (x) == 0;
2047 if (x == CONST0_RTX (mode))
2049 if (FLOAT_MODE_P (mode))
2054 if (x == CONST0_RTX (mode))
2056 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2058 if (GET_MODE_SIZE (mode) != 8)
2064 if (TARGET_BUILD_CONSTANTS)
2066 alpha_extract_integer (x, &i0, &i1);
2067 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2068 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2076 /* Operand 1 is known to be a constant, and should require more than one
2077 instruction to load. Emit that multi-part load. */
2080 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2082 HOST_WIDE_INT i0, i1;
2083 rtx temp = NULL_RTX;
2085 alpha_extract_integer (operands[1], &i0, &i1);
2087 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2088 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2090 if (!temp && TARGET_BUILD_CONSTANTS)
2091 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2095 if (!rtx_equal_p (operands[0], temp))
2096 emit_move_insn (operands[0], temp);
2103 /* Expand a move instruction; return true if all work is done.
2104 We don't handle non-bwx subword loads here. */
2107 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2109 /* If the output is not a register, the input must be. */
2110 if (GET_CODE (operands[0]) == MEM
2111 && ! reg_or_0_operand (operands[1], mode))
2112 operands[1] = force_reg (mode, operands[1]);
2114 /* Allow legitimize_address to perform some simplifications. */
2115 if (mode == Pmode && symbolic_operand (operands[1], mode))
2119 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2122 if (tmp == operands[0])
2129 /* Early out for non-constants and valid constants. */
2130 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2133 /* Split large integers. */
2134 if (GET_CODE (operands[1]) == CONST_INT
2135 || GET_CODE (operands[1]) == CONST_DOUBLE
2136 || GET_CODE (operands[1]) == CONST_VECTOR)
2138 if (alpha_split_const_mov (mode, operands))
2142 /* Otherwise we've nothing left but to drop the thing to memory. */
2143 operands[1] = force_const_mem (mode, operands[1]);
2144 if (reload_in_progress)
2146 emit_move_insn (operands[0], XEXP (operands[1], 0));
2147 operands[1] = copy_rtx (operands[1]);
2148 XEXP (operands[1], 0) = operands[0];
2151 operands[1] = validize_mem (operands[1]);
2155 /* Expand a non-bwx QImode or HImode move instruction;
2156 return true if all work is done. */
2159 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2161 /* If the output is not a register, the input must be. */
2162 if (GET_CODE (operands[0]) == MEM)
2163 operands[1] = force_reg (mode, operands[1]);
2165 /* Handle four memory cases, unaligned and aligned for either the input
2166 or the output. The only case where we can be called during reload is
2167 for aligned loads; all other cases require temporaries. */
2169 if (GET_CODE (operands[1]) == MEM
2170 || (GET_CODE (operands[1]) == SUBREG
2171 && GET_CODE (SUBREG_REG (operands[1])) == MEM)
2172 || (reload_in_progress && GET_CODE (operands[1]) == REG
2173 && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
2174 || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
2175 && GET_CODE (SUBREG_REG (operands[1])) == REG
2176 && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
2178 if (aligned_memory_operand (operands[1], mode))
2180 if (reload_in_progress)
2182 emit_insn ((mode == QImode
2183 ? gen_reload_inqi_help
2184 : gen_reload_inhi_help)
2185 (operands[0], operands[1],
2186 gen_rtx_REG (SImode, REGNO (operands[0]))));
2190 rtx aligned_mem, bitnum;
2191 rtx scratch = gen_reg_rtx (SImode);
2195 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2197 subtarget = operands[0];
2198 if (GET_CODE (subtarget) == REG)
2199 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2201 subtarget = gen_reg_rtx (DImode), copyout = true;
2203 emit_insn ((mode == QImode
2204 ? gen_aligned_loadqi
2205 : gen_aligned_loadhi)
2206 (subtarget, aligned_mem, bitnum, scratch));
2209 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2214 /* Don't pass these as parameters since that makes the generated
2215 code depend on parameter evaluation order which will cause
2216 bootstrap failures. */
2218 rtx temp1, temp2, seq, subtarget;
2221 temp1 = gen_reg_rtx (DImode);
2222 temp2 = gen_reg_rtx (DImode);
2224 subtarget = operands[0];
2225 if (GET_CODE (subtarget) == REG)
2226 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2228 subtarget = gen_reg_rtx (DImode), copyout = true;
2230 seq = ((mode == QImode
2231 ? gen_unaligned_loadqi
2232 : gen_unaligned_loadhi)
2233 (subtarget, get_unaligned_address (operands[1], 0),
2235 alpha_set_memflags (seq, operands[1]);
2239 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2244 if (GET_CODE (operands[0]) == MEM
2245 || (GET_CODE (operands[0]) == SUBREG
2246 && GET_CODE (SUBREG_REG (operands[0])) == MEM)
2247 || (reload_in_progress && GET_CODE (operands[0]) == REG
2248 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
2249 || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
2250 && GET_CODE (SUBREG_REG (operands[0])) == REG
2251 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
2253 if (aligned_memory_operand (operands[0], mode))
2255 rtx aligned_mem, bitnum;
2256 rtx temp1 = gen_reg_rtx (SImode);
2257 rtx temp2 = gen_reg_rtx (SImode);
2259 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2261 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2266 rtx temp1 = gen_reg_rtx (DImode);
2267 rtx temp2 = gen_reg_rtx (DImode);
2268 rtx temp3 = gen_reg_rtx (DImode);
2269 rtx seq = ((mode == QImode
2270 ? gen_unaligned_storeqi
2271 : gen_unaligned_storehi)
2272 (get_unaligned_address (operands[0], 0),
2273 operands[1], temp1, temp2, temp3));
2275 alpha_set_memflags (seq, operands[0]);
2284 /* Implement the movmisalign patterns. One of the operands is a memory
2285 that is not naturally aligned. Emit instructions to load it. */
2288 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2290 /* Honor misaligned loads, for those we promised to do so. */
2291 if (MEM_P (operands[1]))
2295 if (register_operand (operands[0], mode))
2298 tmp = gen_reg_rtx (mode);
2300 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2301 if (tmp != operands[0])
2302 emit_move_insn (operands[0], tmp);
2304 else if (MEM_P (operands[0]))
2306 if (!reg_or_0_operand (operands[1], mode))
2307 operands[1] = force_reg (mode, operands[1]);
2308 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2314 /* Generate an unsigned DImode to FP conversion. This is the same code
2315 optabs would emit if we didn't have TFmode patterns.
2317 For SFmode, this is the only construction I've found that can pass
2318 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2319 intermediates will work, because you'll get intermediate rounding
2320 that ruins the end result. Some of this could be fixed by turning
2321 on round-to-positive-infinity, but that requires diddling the fpsr,
2322 which kills performance. I tried turning this around and converting
2323 to a negative number, so that I could turn on /m, but either I did
2324 it wrong or there's something else cause I wound up with the exact
2325 same single-bit error. There is a branch-less form of this same code:
2336 fcmoveq $f10,$f11,$f0
2338 I'm not using it because it's the same number of instructions as
2339 this branch-full form, and it has more serialized long latency
2340 instructions on the critical path.
2342 For DFmode, we can avoid rounding errors by breaking up the word
2343 into two pieces, converting them separately, and adding them back:
2345 LC0: .long 0,0x5f800000
2350 cpyse $f11,$f31,$f10
2351 cpyse $f31,$f11,$f11
2359 This doesn't seem to be a clear-cut win over the optabs form.
2360 It probably all depends on the distribution of numbers being
2361 converted -- in the optabs form, all but high-bit-set has a
2362 much lower minimum execution time. */
2365 alpha_emit_floatuns (rtx operands[2])
2367 rtx neglab, donelab, i0, i1, f0, in, out;
2368 enum machine_mode mode;
2371 in = force_reg (DImode, operands[1]);
2372 mode = GET_MODE (out);
2373 neglab = gen_label_rtx ();
2374 donelab = gen_label_rtx ();
2375 i0 = gen_reg_rtx (DImode);
2376 i1 = gen_reg_rtx (DImode);
2377 f0 = gen_reg_rtx (mode);
2379 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2381 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2382 emit_jump_insn (gen_jump (donelab));
2385 emit_label (neglab);
2387 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2388 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2389 emit_insn (gen_iordi3 (i0, i0, i1));
2390 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2391 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2393 emit_label (donelab);
2396 /* Generate the comparison for a conditional branch. */
2399 alpha_emit_conditional_branch (enum rtx_code code)
2401 enum rtx_code cmp_code, branch_code;
2402 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2403 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2406 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2408 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2410 alpha_compare.fp_p = 0;
2413 /* The general case: fold the comparison code to the types of compares
2414 that we have, choosing the branch as necessary. */
2417 case EQ: case LE: case LT: case LEU: case LTU:
2419 /* We have these compares: */
2420 cmp_code = code, branch_code = NE;
2425 /* These must be reversed. */
2426 cmp_code = reverse_condition (code), branch_code = EQ;
2429 case GE: case GT: case GEU: case GTU:
2430 /* For FP, we swap them, for INT, we reverse them. */
2431 if (alpha_compare.fp_p)
2433 cmp_code = swap_condition (code);
2435 tem = op0, op0 = op1, op1 = tem;
2439 cmp_code = reverse_condition (code);
2448 if (alpha_compare.fp_p)
2451 if (flag_unsafe_math_optimizations)
2453 /* When we are not as concerned about non-finite values, and we
2454 are comparing against zero, we can branch directly. */
2455 if (op1 == CONST0_RTX (DFmode))
2456 cmp_code = UNKNOWN, branch_code = code;
2457 else if (op0 == CONST0_RTX (DFmode))
2459 /* Undo the swap we probably did just above. */
2460 tem = op0, op0 = op1, op1 = tem;
2461 branch_code = swap_condition (cmp_code);
2467 /* ??? We mark the branch mode to be CCmode to prevent the
2468 compare and branch from being combined, since the compare
2469 insn follows IEEE rules that the branch does not. */
2470 branch_mode = CCmode;
2477 /* The following optimizations are only for signed compares. */
2478 if (code != LEU && code != LTU && code != GEU && code != GTU)
2480 /* Whee. Compare and branch against 0 directly. */
2481 if (op1 == const0_rtx)
2482 cmp_code = UNKNOWN, branch_code = code;
2484 /* If the constants doesn't fit into an immediate, but can
2485 be generated by lda/ldah, we adjust the argument and
2486 compare against zero, so we can use beq/bne directly. */
2487 /* ??? Don't do this when comparing against symbols, otherwise
2488 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2489 be declared false out of hand (at least for non-weak). */
2490 else if (GET_CODE (op1) == CONST_INT
2491 && (code == EQ || code == NE)
2492 && !(symbolic_operand (op0, VOIDmode)
2493 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2495 rtx n_op1 = GEN_INT (-INTVAL (op1));
2497 if (! satisfies_constraint_I (op1)
2498 && (satisfies_constraint_K (n_op1)
2499 || satisfies_constraint_L (n_op1)))
2500 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2504 if (!reg_or_0_operand (op0, DImode))
2505 op0 = force_reg (DImode, op0);
2506 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2507 op1 = force_reg (DImode, op1);
2510 /* Emit an initial compare instruction, if necessary. */
2512 if (cmp_code != UNKNOWN)
2514 tem = gen_reg_rtx (cmp_mode);
2515 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2518 /* Zero the operands. */
2519 memset (&alpha_compare, 0, sizeof (alpha_compare));
2521 /* Return the branch comparison. */
2522 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2525 /* Certain simplifications can be done to make invalid setcc operations
2526 valid. Return the final comparison, or NULL if we can't work. */
2529 alpha_emit_setcc (enum rtx_code code)
2531 enum rtx_code cmp_code;
2532 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2533 int fp_p = alpha_compare.fp_p;
2536 /* Zero the operands. */
2537 memset (&alpha_compare, 0, sizeof (alpha_compare));
2539 if (fp_p && GET_MODE (op0) == TFmode)
2541 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2546 if (fp_p && !TARGET_FIX)
2549 /* The general case: fold the comparison code to the types of compares
2550 that we have, choosing the branch as necessary. */
2555 case EQ: case LE: case LT: case LEU: case LTU:
2557 /* We have these compares. */
2559 cmp_code = code, code = NE;
2563 if (!fp_p && op1 == const0_rtx)
2568 cmp_code = reverse_condition (code);
2572 case GE: case GT: case GEU: case GTU:
2573 /* These normally need swapping, but for integer zero we have
2574 special patterns that recognize swapped operands. */
2575 if (!fp_p && op1 == const0_rtx)
2577 code = swap_condition (code);
2579 cmp_code = code, code = NE;
2580 tmp = op0, op0 = op1, op1 = tmp;
2589 if (!register_operand (op0, DImode))
2590 op0 = force_reg (DImode, op0);
2591 if (!reg_or_8bit_operand (op1, DImode))
2592 op1 = force_reg (DImode, op1);
2595 /* Emit an initial compare instruction, if necessary. */
2596 if (cmp_code != UNKNOWN)
2598 enum machine_mode mode = fp_p ? DFmode : DImode;
2600 tmp = gen_reg_rtx (mode);
2601 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2602 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2604 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2608 /* Return the setcc comparison. */
2609 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2613 /* Rewrite a comparison against zero CMP of the form
2614 (CODE (cc0) (const_int 0)) so it can be written validly in
2615 a conditional move (if_then_else CMP ...).
2616 If both of the operands that set cc0 are nonzero we must emit
2617 an insn to perform the compare (it can't be done within
2618 the conditional move). */
2621 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2623 enum rtx_code code = GET_CODE (cmp);
2624 enum rtx_code cmov_code = NE;
2625 rtx op0 = alpha_compare.op0;
2626 rtx op1 = alpha_compare.op1;
2627 int fp_p = alpha_compare.fp_p;
2628 enum machine_mode cmp_mode
2629 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2630 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2631 enum machine_mode cmov_mode = VOIDmode;
2632 int local_fast_math = flag_unsafe_math_optimizations;
2635 /* Zero the operands. */
2636 memset (&alpha_compare, 0, sizeof (alpha_compare));
2638 if (fp_p != FLOAT_MODE_P (mode))
2640 enum rtx_code cmp_code;
2645 /* If we have fp<->int register move instructions, do a cmov by
2646 performing the comparison in fp registers, and move the
2647 zero/nonzero value to integer registers, where we can then
2648 use a normal cmov, or vice-versa. */
2652 case EQ: case LE: case LT: case LEU: case LTU:
2653 /* We have these compares. */
2654 cmp_code = code, code = NE;
2658 /* This must be reversed. */
2659 cmp_code = EQ, code = EQ;
2662 case GE: case GT: case GEU: case GTU:
2663 /* These normally need swapping, but for integer zero we have
2664 special patterns that recognize swapped operands. */
2665 if (!fp_p && op1 == const0_rtx)
2666 cmp_code = code, code = NE;
2669 cmp_code = swap_condition (code);
2671 tem = op0, op0 = op1, op1 = tem;
2679 tem = gen_reg_rtx (cmp_op_mode);
2680 emit_insn (gen_rtx_SET (VOIDmode, tem,
2681 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2684 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2685 op0 = gen_lowpart (cmp_op_mode, tem);
2686 op1 = CONST0_RTX (cmp_op_mode);
2688 local_fast_math = 1;
2691 /* We may be able to use a conditional move directly.
2692 This avoids emitting spurious compares. */
2693 if (signed_comparison_operator (cmp, VOIDmode)
2694 && (!fp_p || local_fast_math)
2695 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2696 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2698 /* We can't put the comparison inside the conditional move;
2699 emit a compare instruction and put that inside the
2700 conditional move. Make sure we emit only comparisons we have;
2701 swap or reverse as necessary. */
2708 case EQ: case LE: case LT: case LEU: case LTU:
2709 /* We have these compares: */
2713 /* This must be reversed. */
2714 code = reverse_condition (code);
2718 case GE: case GT: case GEU: case GTU:
2719 /* These must be swapped. */
2720 if (op1 != CONST0_RTX (cmp_mode))
2722 code = swap_condition (code);
2723 tem = op0, op0 = op1, op1 = tem;
2733 if (!reg_or_0_operand (op0, DImode))
2734 op0 = force_reg (DImode, op0);
2735 if (!reg_or_8bit_operand (op1, DImode))
2736 op1 = force_reg (DImode, op1);
2739 /* ??? We mark the branch mode to be CCmode to prevent the compare
2740 and cmov from being combined, since the compare insn follows IEEE
2741 rules that the cmov does not. */
2742 if (fp_p && !local_fast_math)
2745 tem = gen_reg_rtx (cmp_op_mode);
2746 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2747 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2750 /* Simplify a conditional move of two constants into a setcc with
2751 arithmetic. This is done with a splitter since combine would
2752 just undo the work if done during code generation. It also catches
2753 cases we wouldn't have before cse. */
2756 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2757 rtx t_rtx, rtx f_rtx)
2759 HOST_WIDE_INT t, f, diff;
2760 enum machine_mode mode;
2761 rtx target, subtarget, tmp;
2763 mode = GET_MODE (dest);
2768 if (((code == NE || code == EQ) && diff < 0)
2769 || (code == GE || code == GT))
2771 code = reverse_condition (code);
2772 diff = t, t = f, f = diff;
2776 subtarget = target = dest;
2779 target = gen_lowpart (DImode, dest);
2780 if (! no_new_pseudos)
2781 subtarget = gen_reg_rtx (DImode);
2785 /* Below, we must be careful to use copy_rtx on target and subtarget
2786 in intermediate insns, as they may be a subreg rtx, which may not
2789 if (f == 0 && exact_log2 (diff) > 0
2790 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2791 viable over a longer latency cmove. On EV5, the E0 slot is a
2792 scarce resource, and on EV4 shift has the same latency as a cmove. */
2793 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2795 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2796 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2798 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2799 GEN_INT (exact_log2 (t)));
2800 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2802 else if (f == 0 && t == -1)
2804 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2805 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2807 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2809 else if (diff == 1 || diff == 4 || diff == 8)
2813 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2814 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2817 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2820 add_op = GEN_INT (f);
2821 if (sext_add_operand (add_op, mode))
2823 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2825 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2826 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2838 /* Look up the function X_floating library function name for the
2841 struct xfloating_op GTY(())
2843 const enum rtx_code code;
2844 const char *const GTY((skip)) osf_func;
2845 const char *const GTY((skip)) vms_func;
2849 static GTY(()) struct xfloating_op xfloating_ops[] =
2851 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2852 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2853 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2854 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2855 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2856 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2857 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2858 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2859 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2860 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2861 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2862 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2863 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2864 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2865 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2868 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2870 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2871 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2875 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2877 struct xfloating_op *ops = xfloating_ops;
2878 long n = ARRAY_SIZE (xfloating_ops);
2881 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2883 /* How irritating. Nothing to key off for the main table. */
2884 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2887 n = ARRAY_SIZE (vax_cvt_ops);
2890 for (i = 0; i < n; ++i, ++ops)
2891 if (ops->code == code)
2893 rtx func = ops->libcall;
2896 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2897 ? ops->vms_func : ops->osf_func);
2898 ops->libcall = func;
2906 /* Most X_floating operations take the rounding mode as an argument.
2907 Compute that here. */
2910 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2911 enum alpha_fp_rounding_mode round)
2917 case ALPHA_FPRM_NORM:
2920 case ALPHA_FPRM_MINF:
2923 case ALPHA_FPRM_CHOP:
2926 case ALPHA_FPRM_DYN:
2932 /* XXX For reference, round to +inf is mode = 3. */
2935 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2941 /* Emit an X_floating library function call.
2943 Note that these functions do not follow normal calling conventions:
2944 TFmode arguments are passed in two integer registers (as opposed to
2945 indirect); TFmode return values appear in R16+R17.
2947 FUNC is the function to call.
2948 TARGET is where the output belongs.
2949 OPERANDS are the inputs.
2950 NOPERANDS is the count of inputs.
2951 EQUIV is the expression equivalent for the function.
2955 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2956 int noperands, rtx equiv)
2958 rtx usage = NULL_RTX, tmp, reg;
2963 for (i = 0; i < noperands; ++i)
2965 switch (GET_MODE (operands[i]))
2968 reg = gen_rtx_REG (TFmode, regno);
2973 reg = gen_rtx_REG (DFmode, regno + 32);
2978 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
2981 reg = gen_rtx_REG (DImode, regno);
2989 emit_move_insn (reg, operands[i]);
2990 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
2993 switch (GET_MODE (target))
2996 reg = gen_rtx_REG (TFmode, 16);
2999 reg = gen_rtx_REG (DFmode, 32);
3002 reg = gen_rtx_REG (DImode, 0);
3008 tmp = gen_rtx_MEM (QImode, func);
3009 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3010 const0_rtx, const0_rtx));
3011 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3012 CONST_OR_PURE_CALL_P (tmp) = 1;
3017 emit_libcall_block (tmp, target, reg, equiv);
3020 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3023 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3027 rtx out_operands[3];
3029 func = alpha_lookup_xfloating_lib_func (code);
3030 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3032 out_operands[0] = operands[1];
3033 out_operands[1] = operands[2];
3034 out_operands[2] = GEN_INT (mode);
3035 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3036 gen_rtx_fmt_ee (code, TFmode, operands[1],
3040 /* Emit an X_floating library function call for a comparison. */
3043 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3045 enum rtx_code cmp_code, res_code;
3046 rtx func, out, operands[2];
3048 /* X_floating library comparison functions return
3052 Convert the compare against the raw return value. */
3080 func = alpha_lookup_xfloating_lib_func (cmp_code);
3084 out = gen_reg_rtx (DImode);
3086 /* ??? Strange mode for equiv because what's actually returned
3087 is -1,0,1, not a proper boolean value. */
3088 alpha_emit_xfloating_libcall (func, out, operands, 2,
3089 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
3094 /* Emit an X_floating library function call for a conversion. */
3097 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3099 int noperands = 1, mode;
3100 rtx out_operands[2];
3102 enum rtx_code code = orig_code;
3104 if (code == UNSIGNED_FIX)
3107 func = alpha_lookup_xfloating_lib_func (code);
3109 out_operands[0] = operands[1];
3114 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3115 out_operands[1] = GEN_INT (mode);
3118 case FLOAT_TRUNCATE:
3119 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3120 out_operands[1] = GEN_INT (mode);
3127 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3128 gen_rtx_fmt_e (orig_code,
3129 GET_MODE (operands[0]),
3133 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3134 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3135 guarantee that the sequence
3138 is valid. Naturally, output operand ordering is little-endian.
3139 This is used by *movtf_internal and *movti_internal. */
3142 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3145 switch (GET_CODE (operands[1]))
3148 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3149 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3153 operands[3] = adjust_address (operands[1], DImode, 8);
3154 operands[2] = adjust_address (operands[1], DImode, 0);
3159 gcc_assert (operands[1] == CONST0_RTX (mode));
3160 operands[2] = operands[3] = const0_rtx;
3167 switch (GET_CODE (operands[0]))
3170 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3171 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3175 operands[1] = adjust_address (operands[0], DImode, 8);
3176 operands[0] = adjust_address (operands[0], DImode, 0);
3183 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3186 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3187 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3191 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3192 op2 is a register containing the sign bit, operation is the
3193 logical operation to be performed. */
3196 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3198 rtx high_bit = operands[2];
3202 alpha_split_tmode_pair (operands, TFmode, false);
3204 /* Detect three flavors of operand overlap. */
3206 if (rtx_equal_p (operands[0], operands[2]))
3208 else if (rtx_equal_p (operands[1], operands[2]))
3210 if (rtx_equal_p (operands[0], high_bit))
3217 emit_move_insn (operands[0], operands[2]);
3219 /* ??? If the destination overlaps both source tf and high_bit, then
3220 assume source tf is dead in its entirety and use the other half
3221 for a scratch register. Otherwise "scratch" is just the proper
3222 destination register. */
3223 scratch = operands[move < 2 ? 1 : 3];
3225 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3229 emit_move_insn (operands[0], operands[2]);
3231 emit_move_insn (operands[1], scratch);
3235 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3239 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3240 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3241 lda r3,X(r11) lda r3,X+2(r11)
3242 extwl r1,r3,r1 extql r1,r3,r1
3243 extwh r2,r3,r2 extqh r2,r3,r2
3244 or r1.r2.r1 or r1,r2,r1
3247 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3248 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3249 lda r3,X(r11) lda r3,X(r11)
3250 extll r1,r3,r1 extll r1,r3,r1
3251 extlh r2,r3,r2 extlh r2,r3,r2
3252 or r1.r2.r1 addl r1,r2,r1
3254 quad: ldq_u r1,X(r11)
3263 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3264 HOST_WIDE_INT ofs, int sign)
3266 rtx meml, memh, addr, extl, exth, tmp, mema;
3267 enum machine_mode mode;
3269 if (TARGET_BWX && size == 2)
3271 meml = adjust_address (mem, QImode, ofs);
3272 memh = adjust_address (mem, QImode, ofs+1);
3273 if (BYTES_BIG_ENDIAN)
3274 tmp = meml, meml = memh, memh = tmp;
3275 extl = gen_reg_rtx (DImode);
3276 exth = gen_reg_rtx (DImode);
3277 emit_insn (gen_zero_extendqidi2 (extl, meml));
3278 emit_insn (gen_zero_extendqidi2 (exth, memh));
3279 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3280 NULL, 1, OPTAB_LIB_WIDEN);
3281 addr = expand_simple_binop (DImode, IOR, extl, exth,
3282 NULL, 1, OPTAB_LIB_WIDEN);
3284 if (sign && GET_MODE (tgt) != HImode)
3286 addr = gen_lowpart (HImode, addr);
3287 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3291 if (GET_MODE (tgt) != DImode)
3292 addr = gen_lowpart (GET_MODE (tgt), addr);
3293 emit_move_insn (tgt, addr);
3298 meml = gen_reg_rtx (DImode);
3299 memh = gen_reg_rtx (DImode);
3300 addr = gen_reg_rtx (DImode);
3301 extl = gen_reg_rtx (DImode);
3302 exth = gen_reg_rtx (DImode);
3304 mema = XEXP (mem, 0);
3305 if (GET_CODE (mema) == LO_SUM)
3306 mema = force_reg (Pmode, mema);
3308 /* AND addresses cannot be in any alias set, since they may implicitly
3309 alias surrounding code. Ideally we'd have some alias set that
3310 covered all types except those with alignment 8 or higher. */
3312 tmp = change_address (mem, DImode,
3313 gen_rtx_AND (DImode,
3314 plus_constant (mema, ofs),
3316 set_mem_alias_set (tmp, 0);
3317 emit_move_insn (meml, tmp);
3319 tmp = change_address (mem, DImode,
3320 gen_rtx_AND (DImode,
3321 plus_constant (mema, ofs + size - 1),
3323 set_mem_alias_set (tmp, 0);
3324 emit_move_insn (memh, tmp);
3326 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3328 emit_move_insn (addr, plus_constant (mema, -1));
3330 emit_insn (gen_extqh_be (extl, meml, addr));
3331 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3333 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3334 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3335 addr, 1, OPTAB_WIDEN);
3337 else if (sign && size == 2)
3339 emit_move_insn (addr, plus_constant (mema, ofs+2));
3341 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3342 emit_insn (gen_extqh_le (exth, memh, addr));
3344 /* We must use tgt here for the target. Alpha-vms port fails if we use
3345 addr for the target, because addr is marked as a pointer and combine
3346 knows that pointers are always sign-extended 32-bit values. */
3347 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3348 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3349 addr, 1, OPTAB_WIDEN);
3353 if (WORDS_BIG_ENDIAN)
3355 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3359 emit_insn (gen_extwh_be (extl, meml, addr));
3364 emit_insn (gen_extlh_be (extl, meml, addr));
3369 emit_insn (gen_extqh_be (extl, meml, addr));
3376 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3380 emit_move_insn (addr, plus_constant (mema, ofs));
3381 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3385 emit_insn (gen_extwh_le (exth, memh, addr));
3390 emit_insn (gen_extlh_le (exth, memh, addr));
3395 emit_insn (gen_extqh_le (exth, memh, addr));
3404 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3405 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3410 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3413 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3416 alpha_expand_unaligned_store (rtx dst, rtx src,
3417 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3419 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3421 if (TARGET_BWX && size == 2)
3423 if (src != const0_rtx)
3425 dstl = gen_lowpart (QImode, src);
3426 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3427 NULL, 1, OPTAB_LIB_WIDEN);
3428 dsth = gen_lowpart (QImode, dsth);
3431 dstl = dsth = const0_rtx;
3433 meml = adjust_address (dst, QImode, ofs);
3434 memh = adjust_address (dst, QImode, ofs+1);
3435 if (BYTES_BIG_ENDIAN)
3436 addr = meml, meml = memh, memh = addr;
3438 emit_move_insn (meml, dstl);
3439 emit_move_insn (memh, dsth);
3443 dstl = gen_reg_rtx (DImode);
3444 dsth = gen_reg_rtx (DImode);
3445 insl = gen_reg_rtx (DImode);
3446 insh = gen_reg_rtx (DImode);
3448 dsta = XEXP (dst, 0);
3449 if (GET_CODE (dsta) == LO_SUM)
3450 dsta = force_reg (Pmode, dsta);
3452 /* AND addresses cannot be in any alias set, since they may implicitly
3453 alias surrounding code. Ideally we'd have some alias set that
3454 covered all types except those with alignment 8 or higher. */
3456 meml = change_address (dst, DImode,
3457 gen_rtx_AND (DImode,
3458 plus_constant (dsta, ofs),
3460 set_mem_alias_set (meml, 0);
3462 memh = change_address (dst, DImode,
3463 gen_rtx_AND (DImode,
3464 plus_constant (dsta, ofs + size - 1),