1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
32 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "conditions.h"
37 #include "insn-attr.h"
45 #include "basic-block.h"
46 #include "integrate.h"
49 #include "target-def.h"
51 #include "langhooks.h"
58 /* Define the specific costs for a given cpu. */
60 struct processor_costs
63 const int m; /* cost of an M instruction. */
64 const int mghi; /* cost of an MGHI instruction. */
65 const int mh; /* cost of an MH instruction. */
66 const int mhi; /* cost of an MHI instruction. */
67 const int ml; /* cost of an ML instruction. */
68 const int mr; /* cost of an MR instruction. */
69 const int ms; /* cost of an MS instruction. */
70 const int msg; /* cost of an MSG instruction. */
71 const int msgf; /* cost of an MSGF instruction. */
72 const int msgfr; /* cost of an MSGFR instruction. */
73 const int msgr; /* cost of an MSGR instruction. */
74 const int msr; /* cost of an MSR instruction. */
75 const int mult_df; /* cost of multiplication in DFmode. */
78 const int sqxbr; /* cost of square root in TFmode. */
79 const int sqdbr; /* cost of square root in DFmode. */
80 const int sqebr; /* cost of square root in SFmode. */
81 /* multiply and add */
82 const int madbr; /* cost of multiply and add in DFmode. */
83 const int maebr; /* cost of multiply and add in SFmode. */
95 const struct processor_costs *s390_cost;
98 struct processor_costs z900_cost =
100 COSTS_N_INSNS (5), /* M */
101 COSTS_N_INSNS (10), /* MGHI */
102 COSTS_N_INSNS (5), /* MH */
103 COSTS_N_INSNS (4), /* MHI */
104 COSTS_N_INSNS (5), /* ML */
105 COSTS_N_INSNS (5), /* MR */
106 COSTS_N_INSNS (4), /* MS */
107 COSTS_N_INSNS (15), /* MSG */
108 COSTS_N_INSNS (7), /* MSGF */
109 COSTS_N_INSNS (7), /* MSGFR */
110 COSTS_N_INSNS (10), /* MSGR */
111 COSTS_N_INSNS (4), /* MSR */
112 COSTS_N_INSNS (7), /* multiplication in DFmode */
113 COSTS_N_INSNS (13), /* MXBR */
114 COSTS_N_INSNS (136), /* SQXBR */
115 COSTS_N_INSNS (44), /* SQDBR */
116 COSTS_N_INSNS (35), /* SQEBR */
117 COSTS_N_INSNS (18), /* MADBR */
118 COSTS_N_INSNS (13), /* MAEBR */
119 COSTS_N_INSNS (134), /* DXBR */
120 COSTS_N_INSNS (30), /* DDBR */
121 COSTS_N_INSNS (27), /* DEBR */
122 COSTS_N_INSNS (220), /* DLGR */
123 COSTS_N_INSNS (34), /* DLR */
124 COSTS_N_INSNS (34), /* DR */
125 COSTS_N_INSNS (32), /* DSGFR */
126 COSTS_N_INSNS (32), /* DSGR */
130 struct processor_costs z990_cost =
132 COSTS_N_INSNS (4), /* M */
133 COSTS_N_INSNS (2), /* MGHI */
134 COSTS_N_INSNS (2), /* MH */
135 COSTS_N_INSNS (2), /* MHI */
136 COSTS_N_INSNS (4), /* ML */
137 COSTS_N_INSNS (4), /* MR */
138 COSTS_N_INSNS (5), /* MS */
139 COSTS_N_INSNS (6), /* MSG */
140 COSTS_N_INSNS (4), /* MSGF */
141 COSTS_N_INSNS (4), /* MSGFR */
142 COSTS_N_INSNS (4), /* MSGR */
143 COSTS_N_INSNS (4), /* MSR */
144 COSTS_N_INSNS (1), /* multiplication in DFmode */
145 COSTS_N_INSNS (28), /* MXBR */
146 COSTS_N_INSNS (130), /* SQXBR */
147 COSTS_N_INSNS (66), /* SQDBR */
148 COSTS_N_INSNS (38), /* SQEBR */
149 COSTS_N_INSNS (1), /* MADBR */
150 COSTS_N_INSNS (1), /* MAEBR */
151 COSTS_N_INSNS (60), /* DXBR */
152 COSTS_N_INSNS (40), /* DDBR */
153 COSTS_N_INSNS (26), /* DEBR */
154 COSTS_N_INSNS (176), /* DLGR */
155 COSTS_N_INSNS (31), /* DLR */
156 COSTS_N_INSNS (31), /* DR */
157 COSTS_N_INSNS (31), /* DSGFR */
158 COSTS_N_INSNS (31), /* DSGR */
162 struct processor_costs z9_109_cost =
164 COSTS_N_INSNS (4), /* M */
165 COSTS_N_INSNS (2), /* MGHI */
166 COSTS_N_INSNS (2), /* MH */
167 COSTS_N_INSNS (2), /* MHI */
168 COSTS_N_INSNS (4), /* ML */
169 COSTS_N_INSNS (4), /* MR */
170 COSTS_N_INSNS (5), /* MS */
171 COSTS_N_INSNS (6), /* MSG */
172 COSTS_N_INSNS (4), /* MSGF */
173 COSTS_N_INSNS (4), /* MSGFR */
174 COSTS_N_INSNS (4), /* MSGR */
175 COSTS_N_INSNS (4), /* MSR */
176 COSTS_N_INSNS (1), /* multiplication in DFmode */
177 COSTS_N_INSNS (28), /* MXBR */
178 COSTS_N_INSNS (130), /* SQXBR */
179 COSTS_N_INSNS (66), /* SQDBR */
180 COSTS_N_INSNS (38), /* SQEBR */
181 COSTS_N_INSNS (1), /* MADBR */
182 COSTS_N_INSNS (1), /* MAEBR */
183 COSTS_N_INSNS (60), /* DXBR */
184 COSTS_N_INSNS (40), /* DDBR */
185 COSTS_N_INSNS (26), /* DEBR */
186 COSTS_N_INSNS (30), /* DLGR */
187 COSTS_N_INSNS (23), /* DLR */
188 COSTS_N_INSNS (23), /* DR */
189 COSTS_N_INSNS (24), /* DSGFR */
190 COSTS_N_INSNS (24), /* DSGR */
194 struct processor_costs z10_cost =
196 COSTS_N_INSNS (10), /* M */
197 COSTS_N_INSNS (10), /* MGHI */
198 COSTS_N_INSNS (10), /* MH */
199 COSTS_N_INSNS (10), /* MHI */
200 COSTS_N_INSNS (10), /* ML */
201 COSTS_N_INSNS (10), /* MR */
202 COSTS_N_INSNS (10), /* MS */
203 COSTS_N_INSNS (10), /* MSG */
204 COSTS_N_INSNS (10), /* MSGF */
205 COSTS_N_INSNS (10), /* MSGFR */
206 COSTS_N_INSNS (10), /* MSGR */
207 COSTS_N_INSNS (10), /* MSR */
208 COSTS_N_INSNS (1) , /* multiplication in DFmode */
209 COSTS_N_INSNS (50), /* MXBR */
210 COSTS_N_INSNS (120), /* SQXBR */
211 COSTS_N_INSNS (52), /* SQDBR */
212 COSTS_N_INSNS (38), /* SQEBR */
213 COSTS_N_INSNS (1), /* MADBR */
214 COSTS_N_INSNS (1), /* MAEBR */
215 COSTS_N_INSNS (111), /* DXBR */
216 COSTS_N_INSNS (39), /* DDBR */
217 COSTS_N_INSNS (32), /* DEBR */
218 COSTS_N_INSNS (160), /* DLGR */
219 COSTS_N_INSNS (71), /* DLR */
220 COSTS_N_INSNS (71), /* DR */
221 COSTS_N_INSNS (71), /* DSGFR */
222 COSTS_N_INSNS (71), /* DSGR */
225 extern int reload_completed;
227 /* Structure used to hold the components of a S/390 memory
228 address. A legitimate address on S/390 is of the general
230 base + index + displacement
231 where any of the components is optional.
233 base and index are registers of the class ADDR_REGS,
234 displacement is an unsigned 12-bit immediate constant. */
245 /* Which cpu are we tuning for. */
246 enum processor_type s390_tune = PROCESSOR_max;
248 /* Which instruction set architecture to use. */
249 enum processor_type s390_arch;
252 HOST_WIDE_INT s390_warn_framesize = 0;
253 HOST_WIDE_INT s390_stack_size = 0;
254 HOST_WIDE_INT s390_stack_guard = 0;
256 /* The following structure is embedded in the machine
257 specific part of struct function. */
259 struct GTY (()) s390_frame_layout
261 /* Offset within stack frame. */
262 HOST_WIDE_INT gprs_offset;
263 HOST_WIDE_INT f0_offset;
264 HOST_WIDE_INT f4_offset;
265 HOST_WIDE_INT f8_offset;
266 HOST_WIDE_INT backchain_offset;
268 /* Number of first and last gpr where slots in the register
269 save area are reserved for. */
270 int first_save_gpr_slot;
271 int last_save_gpr_slot;
273 /* Number of first and last gpr to be saved, restored. */
275 int first_restore_gpr;
277 int last_restore_gpr;
279 /* Bits standing for floating point registers. Set, if the
280 respective register has to be saved. Starting with reg 16 (f0)
281 at the rightmost bit.
282 Bit 15 - 8 7 6 5 4 3 2 1 0
283 fpr 15 - 8 7 5 3 1 6 4 2 0
284 reg 31 - 24 23 22 21 20 19 18 17 16 */
285 unsigned int fpr_bitmap;
287 /* Number of floating point registers f8-f15 which must be saved. */
290 /* Set if return address needs to be saved.
291 This flag is set by s390_return_addr_rtx if it could not use
292 the initial value of r14 and therefore depends on r14 saved
294 bool save_return_addr_p;
296 /* Size of stack frame. */
297 HOST_WIDE_INT frame_size;
300 /* Define the structure for the machine field in struct function. */
302 struct GTY(()) machine_function
304 struct s390_frame_layout frame_layout;
306 /* Literal pool base register. */
309 /* True if we may need to perform branch splitting. */
310 bool split_branches_pending_p;
312 /* Some local-dynamic TLS symbol name. */
313 const char *some_ld_name;
315 bool has_landing_pad_p;
318 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
320 #define cfun_frame_layout (cfun->machine->frame_layout)
321 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
322 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
323 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_WORD)
324 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
326 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
329 /* Number of GPRs and FPRs used for argument passing. */
330 #define GP_ARG_NUM_REG 5
331 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
333 /* A couple of shortcuts. */
334 #define CONST_OK_FOR_J(x) \
335 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
336 #define CONST_OK_FOR_K(x) \
337 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
338 #define CONST_OK_FOR_Os(x) \
339 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
340 #define CONST_OK_FOR_Op(x) \
341 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
342 #define CONST_OK_FOR_On(x) \
343 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
345 #define REGNO_PAIR_OK(REGNO, MODE) \
346 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
348 /* That's the read ahead of the dynamic branch prediction unit in
349 bytes on a z10 CPU. */
350 #define Z10_PREDICT_DISTANCE 384
352 static enum machine_mode
353 s390_libgcc_cmp_return_mode (void)
355 return TARGET_64BIT ? DImode : SImode;
358 static enum machine_mode
359 s390_libgcc_shift_count_mode (void)
361 return TARGET_64BIT ? DImode : SImode;
364 /* Return true if the back end supports mode MODE. */
366 s390_scalar_mode_supported_p (enum machine_mode mode)
368 if (DECIMAL_FLOAT_MODE_P (mode))
369 return default_decimal_float_supported_p ();
371 return default_scalar_mode_supported_p (mode);
374 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
377 s390_set_has_landing_pad_p (bool value)
379 cfun->machine->has_landing_pad_p = value;
382 /* If two condition code modes are compatible, return a condition code
383 mode which is compatible with both. Otherwise, return
386 static enum machine_mode
387 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
395 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
396 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
417 /* Return true if SET either doesn't set the CC register, or else
418 the source and destination have matching CC modes and that
419 CC mode is at least as constrained as REQ_MODE. */
422 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
424 enum machine_mode set_mode;
426 gcc_assert (GET_CODE (set) == SET);
428 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
431 set_mode = GET_MODE (SET_DEST (set));
445 if (req_mode != set_mode)
450 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
451 && req_mode != CCSRmode && req_mode != CCURmode)
457 if (req_mode != CCAmode)
465 return (GET_MODE (SET_SRC (set)) == set_mode);
468 /* Return true if every SET in INSN that sets the CC register
469 has source and destination with matching CC modes and that
470 CC mode is at least as constrained as REQ_MODE.
471 If REQ_MODE is VOIDmode, always return false. */
474 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
478 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
479 if (req_mode == VOIDmode)
482 if (GET_CODE (PATTERN (insn)) == SET)
483 return s390_match_ccmode_set (PATTERN (insn), req_mode);
485 if (GET_CODE (PATTERN (insn)) == PARALLEL)
486 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
488 rtx set = XVECEXP (PATTERN (insn), 0, i);
489 if (GET_CODE (set) == SET)
490 if (!s390_match_ccmode_set (set, req_mode))
497 /* If a test-under-mask instruction can be used to implement
498 (compare (and ... OP1) OP2), return the CC mode required
499 to do that. Otherwise, return VOIDmode.
500 MIXED is true if the instruction can distinguish between
501 CC1 and CC2 for mixed selected bits (TMxx), it is false
502 if the instruction cannot (TM). */
505 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
509 /* ??? Fixme: should work on CONST_DOUBLE as well. */
510 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
513 /* Selected bits all zero: CC0.
514 e.g.: int a; if ((a & (16 + 128)) == 0) */
515 if (INTVAL (op2) == 0)
518 /* Selected bits all one: CC3.
519 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
520 if (INTVAL (op2) == INTVAL (op1))
523 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
525 if ((a & (16 + 128)) == 16) -> CCT1
526 if ((a & (16 + 128)) == 128) -> CCT2 */
529 bit1 = exact_log2 (INTVAL (op2));
530 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
531 if (bit0 != -1 && bit1 != -1)
532 return bit0 > bit1 ? CCT1mode : CCT2mode;
538 /* Given a comparison code OP (EQ, NE, etc.) and the operands
539 OP0 and OP1 of a COMPARE, return the mode to be used for the
543 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
549 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
550 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
552 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
553 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
555 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
556 || GET_CODE (op1) == NEG)
557 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
560 if (GET_CODE (op0) == AND)
562 /* Check whether we can potentially do it via TM. */
563 enum machine_mode ccmode;
564 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
565 if (ccmode != VOIDmode)
567 /* Relax CCTmode to CCZmode to allow fall-back to AND
568 if that turns out to be beneficial. */
569 return ccmode == CCTmode ? CCZmode : ccmode;
573 if (register_operand (op0, HImode)
574 && GET_CODE (op1) == CONST_INT
575 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
577 if (register_operand (op0, QImode)
578 && GET_CODE (op1) == CONST_INT
579 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
588 /* The only overflow condition of NEG and ABS happens when
589 -INT_MAX is used as parameter, which stays negative. So
590 we have an overflow from a positive value to a negative.
591 Using CCAP mode the resulting cc can be used for comparisons. */
592 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
593 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
596 /* If constants are involved in an add instruction it is possible to use
597 the resulting cc for comparisons with zero. Knowing the sign of the
598 constant the overflow behavior gets predictable. e.g.:
599 int a, b; if ((b = a + c) > 0)
600 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
601 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
602 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
604 if (INTVAL (XEXP((op0), 1)) < 0)
618 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
619 && GET_CODE (op1) != CONST_INT)
625 if (GET_CODE (op0) == PLUS
626 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
629 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
630 && GET_CODE (op1) != CONST_INT)
636 if (GET_CODE (op0) == MINUS
637 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
640 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
641 && GET_CODE (op1) != CONST_INT)
650 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
651 that we can implement more efficiently. */
654 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
656 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
657 if ((*code == EQ || *code == NE)
658 && *op1 == const0_rtx
659 && GET_CODE (*op0) == ZERO_EXTRACT
660 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
661 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
662 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
664 rtx inner = XEXP (*op0, 0);
665 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
666 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
667 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
669 if (len > 0 && len < modesize
670 && pos >= 0 && pos + len <= modesize
671 && modesize <= HOST_BITS_PER_WIDE_INT)
673 unsigned HOST_WIDE_INT block;
674 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
675 block <<= modesize - pos - len;
677 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
678 gen_int_mode (block, GET_MODE (inner)));
682 /* Narrow AND of memory against immediate to enable TM. */
683 if ((*code == EQ || *code == NE)
684 && *op1 == const0_rtx
685 && GET_CODE (*op0) == AND
686 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
687 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
689 rtx inner = XEXP (*op0, 0);
690 rtx mask = XEXP (*op0, 1);
692 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
693 if (GET_CODE (inner) == SUBREG
694 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
695 && (GET_MODE_SIZE (GET_MODE (inner))
696 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
698 & GET_MODE_MASK (GET_MODE (inner))
699 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
701 inner = SUBREG_REG (inner);
703 /* Do not change volatile MEMs. */
704 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
706 int part = s390_single_part (XEXP (*op0, 1),
707 GET_MODE (inner), QImode, 0);
710 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
711 inner = adjust_address_nv (inner, QImode, part);
712 *op0 = gen_rtx_AND (QImode, inner, mask);
717 /* Narrow comparisons against 0xffff to HImode if possible. */
718 if ((*code == EQ || *code == NE)
719 && GET_CODE (*op1) == CONST_INT
720 && INTVAL (*op1) == 0xffff
721 && SCALAR_INT_MODE_P (GET_MODE (*op0))
722 && (nonzero_bits (*op0, GET_MODE (*op0))
723 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
725 *op0 = gen_lowpart (HImode, *op0);
729 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
730 if (GET_CODE (*op0) == UNSPEC
731 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
732 && XVECLEN (*op0, 0) == 1
733 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
734 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
735 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
736 && *op1 == const0_rtx)
738 enum rtx_code new_code = UNKNOWN;
741 case EQ: new_code = EQ; break;
742 case NE: new_code = NE; break;
743 case LT: new_code = GTU; break;
744 case GT: new_code = LTU; break;
745 case LE: new_code = GEU; break;
746 case GE: new_code = LEU; break;
750 if (new_code != UNKNOWN)
752 *op0 = XVECEXP (*op0, 0, 0);
757 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
758 if (GET_CODE (*op0) == UNSPEC
759 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
760 && XVECLEN (*op0, 0) == 1
761 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
762 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
763 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
764 && *op1 == const0_rtx)
766 enum rtx_code new_code = UNKNOWN;
769 case EQ: new_code = EQ; break;
770 case NE: new_code = NE; break;
774 if (new_code != UNKNOWN)
776 *op0 = XVECEXP (*op0, 0, 0);
781 /* Simplify cascaded EQ, NE with const0_rtx. */
782 if ((*code == NE || *code == EQ)
783 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
784 && GET_MODE (*op0) == SImode
785 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
786 && REG_P (XEXP (*op0, 0))
787 && XEXP (*op0, 1) == const0_rtx
788 && *op1 == const0_rtx)
790 if ((*code == EQ && GET_CODE (*op0) == NE)
791 || (*code == NE && GET_CODE (*op0) == EQ))
795 *op0 = XEXP (*op0, 0);
798 /* Prefer register over memory as first operand. */
799 if (MEM_P (*op0) && REG_P (*op1))
801 rtx tem = *op0; *op0 = *op1; *op1 = tem;
802 *code = swap_condition (*code);
806 /* Emit a compare instruction suitable to implement the comparison
807 OP0 CODE OP1. Return the correct condition RTL to be placed in
808 the IF_THEN_ELSE of the conditional branch testing the result. */
811 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
813 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
816 /* Do not output a redundant compare instruction if a compare_and_swap
817 pattern already computed the result and the machine modes are compatible. */
818 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
820 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
826 cc = gen_rtx_REG (mode, CC_REGNUM);
827 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
830 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
833 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
835 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
836 conditional branch testing the result. */
839 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem, rtx cmp, rtx new_rtx)
841 emit_insn (gen_sync_compare_and_swapsi (old, mem, cmp, new_rtx));
842 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM), const0_rtx);
845 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
846 unconditional jump, else a conditional jump under condition COND. */
849 s390_emit_jump (rtx target, rtx cond)
853 target = gen_rtx_LABEL_REF (VOIDmode, target);
855 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
857 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
858 emit_jump_insn (insn);
861 /* Return branch condition mask to implement a branch
862 specified by CODE. Return -1 for invalid comparisons. */
865 s390_branch_condition_mask (rtx code)
867 const int CC0 = 1 << 3;
868 const int CC1 = 1 << 2;
869 const int CC2 = 1 << 1;
870 const int CC3 = 1 << 0;
872 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
873 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
874 gcc_assert (XEXP (code, 1) == const0_rtx);
876 switch (GET_MODE (XEXP (code, 0)))
880 switch (GET_CODE (code))
883 case NE: return CC1 | CC2 | CC3;
889 switch (GET_CODE (code))
892 case NE: return CC0 | CC2 | CC3;
898 switch (GET_CODE (code))
901 case NE: return CC0 | CC1 | CC3;
907 switch (GET_CODE (code))
910 case NE: return CC0 | CC1 | CC2;
916 switch (GET_CODE (code))
918 case EQ: return CC0 | CC2;
919 case NE: return CC1 | CC3;
925 switch (GET_CODE (code))
927 case LTU: return CC2 | CC3; /* carry */
928 case GEU: return CC0 | CC1; /* no carry */
934 switch (GET_CODE (code))
936 case GTU: return CC0 | CC1; /* borrow */
937 case LEU: return CC2 | CC3; /* no borrow */
943 switch (GET_CODE (code))
945 case EQ: return CC0 | CC2;
946 case NE: return CC1 | CC3;
947 case LTU: return CC1;
948 case GTU: return CC3;
949 case LEU: return CC1 | CC2;
950 case GEU: return CC2 | CC3;
955 switch (GET_CODE (code))
958 case NE: return CC1 | CC2 | CC3;
959 case LTU: return CC1;
960 case GTU: return CC2;
961 case LEU: return CC0 | CC1;
962 case GEU: return CC0 | CC2;
968 switch (GET_CODE (code))
971 case NE: return CC2 | CC1 | CC3;
972 case LTU: return CC2;
973 case GTU: return CC1;
974 case LEU: return CC0 | CC2;
975 case GEU: return CC0 | CC1;
981 switch (GET_CODE (code))
984 case NE: return CC1 | CC2 | CC3;
985 case LT: return CC1 | CC3;
987 case LE: return CC0 | CC1 | CC3;
988 case GE: return CC0 | CC2;
994 switch (GET_CODE (code))
997 case NE: return CC1 | CC2 | CC3;
999 case GT: return CC2 | CC3;
1000 case LE: return CC0 | CC1;
1001 case GE: return CC0 | CC2 | CC3;
1007 switch (GET_CODE (code))
1009 case EQ: return CC0;
1010 case NE: return CC1 | CC2 | CC3;
1011 case LT: return CC1;
1012 case GT: return CC2;
1013 case LE: return CC0 | CC1;
1014 case GE: return CC0 | CC2;
1015 case UNORDERED: return CC3;
1016 case ORDERED: return CC0 | CC1 | CC2;
1017 case UNEQ: return CC0 | CC3;
1018 case UNLT: return CC1 | CC3;
1019 case UNGT: return CC2 | CC3;
1020 case UNLE: return CC0 | CC1 | CC3;
1021 case UNGE: return CC0 | CC2 | CC3;
1022 case LTGT: return CC1 | CC2;
1028 switch (GET_CODE (code))
1030 case EQ: return CC0;
1031 case NE: return CC2 | CC1 | CC3;
1032 case LT: return CC2;
1033 case GT: return CC1;
1034 case LE: return CC0 | CC2;
1035 case GE: return CC0 | CC1;
1036 case UNORDERED: return CC3;
1037 case ORDERED: return CC0 | CC2 | CC1;
1038 case UNEQ: return CC0 | CC3;
1039 case UNLT: return CC2 | CC3;
1040 case UNGT: return CC1 | CC3;
1041 case UNLE: return CC0 | CC2 | CC3;
1042 case UNGE: return CC0 | CC1 | CC3;
1043 case LTGT: return CC2 | CC1;
1054 /* Return branch condition mask to implement a compare and branch
1055 specified by CODE. Return -1 for invalid comparisons. */
1058 s390_compare_and_branch_condition_mask (rtx code)
1060 const int CC0 = 1 << 3;
1061 const int CC1 = 1 << 2;
1062 const int CC2 = 1 << 1;
1064 switch (GET_CODE (code))
1088 /* If INV is false, return assembler mnemonic string to implement
1089 a branch specified by CODE. If INV is true, return mnemonic
1090 for the corresponding inverted branch. */
1093 s390_branch_condition_mnemonic (rtx code, int inv)
1097 static const char *const mnemonic[16] =
1099 NULL, "o", "h", "nle",
1100 "l", "nhe", "lh", "ne",
1101 "e", "nlh", "he", "nl",
1102 "le", "nh", "no", NULL
1105 if (GET_CODE (XEXP (code, 0)) == REG
1106 && REGNO (XEXP (code, 0)) == CC_REGNUM
1107 && XEXP (code, 1) == const0_rtx)
1108 mask = s390_branch_condition_mask (code);
1110 mask = s390_compare_and_branch_condition_mask (code);
1112 gcc_assert (mask >= 0);
1117 gcc_assert (mask >= 1 && mask <= 14);
1119 return mnemonic[mask];
1122 /* Return the part of op which has a value different from def.
1123 The size of the part is determined by mode.
1124 Use this function only if you already know that op really
1125 contains such a part. */
1127 unsigned HOST_WIDE_INT
1128 s390_extract_part (rtx op, enum machine_mode mode, int def)
1130 unsigned HOST_WIDE_INT value = 0;
1131 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1132 int part_bits = GET_MODE_BITSIZE (mode);
1133 unsigned HOST_WIDE_INT part_mask
1134 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1137 for (i = 0; i < max_parts; i++)
1140 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1142 value >>= part_bits;
1144 if ((value & part_mask) != (def & part_mask))
1145 return value & part_mask;
1151 /* If OP is an integer constant of mode MODE with exactly one
1152 part of mode PART_MODE unequal to DEF, return the number of that
1153 part. Otherwise, return -1. */
1156 s390_single_part (rtx op,
1157 enum machine_mode mode,
1158 enum machine_mode part_mode,
1161 unsigned HOST_WIDE_INT value = 0;
1162 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1163 unsigned HOST_WIDE_INT part_mask
1164 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1167 if (GET_CODE (op) != CONST_INT)
1170 for (i = 0; i < n_parts; i++)
1173 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1175 value >>= GET_MODE_BITSIZE (part_mode);
1177 if ((value & part_mask) != (def & part_mask))
1185 return part == -1 ? -1 : n_parts - 1 - part;
1188 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1189 bits and no other bits are set in IN. POS and LENGTH can be used
1190 to obtain the start position and the length of the bitfield.
1192 POS gives the position of the first bit of the bitfield counting
1193 from the lowest order bit starting with zero. In order to use this
1194 value for S/390 instructions this has to be converted to "bits big
1198 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1199 int *pos, int *length)
1204 unsigned HOST_WIDE_INT mask = 1ULL;
1205 bool contiguous = false;
1207 for (i = 0; i < size; mask <<= 1, i++)
1231 /* Calculate a mask for all bits beyond the contiguous bits. */
1232 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1237 if (tmp_length + tmp_pos - 1 > size)
1241 *length = tmp_length;
1249 /* Check whether we can (and want to) split a double-word
1250 move in mode MODE from SRC to DST into two single-word
1251 moves, moving the subword FIRST_SUBWORD first. */
1254 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1256 /* Floating point registers cannot be split. */
1257 if (FP_REG_P (src) || FP_REG_P (dst))
1260 /* We don't need to split if operands are directly accessible. */
1261 if (s_operand (src, mode) || s_operand (dst, mode))
1264 /* Non-offsettable memory references cannot be split. */
1265 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1266 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1269 /* Moving the first subword must not clobber a register
1270 needed to move the second subword. */
1271 if (register_operand (dst, mode))
1273 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1274 if (reg_overlap_mentioned_p (subreg, src))
1281 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1282 and [MEM2, MEM2 + SIZE] do overlap and false
1286 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1288 rtx addr1, addr2, addr_delta;
1289 HOST_WIDE_INT delta;
1291 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1297 addr1 = XEXP (mem1, 0);
1298 addr2 = XEXP (mem2, 0);
1300 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1302 /* This overlapping check is used by peepholes merging memory block operations.
1303 Overlapping operations would otherwise be recognized by the S/390 hardware
1304 and would fall back to a slower implementation. Allowing overlapping
1305 operations would lead to slow code but not to wrong code. Therefore we are
1306 somewhat optimistic if we cannot prove that the memory blocks are
1308 That's why we return false here although this may accept operations on
1309 overlapping memory areas. */
1310 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1313 delta = INTVAL (addr_delta);
1316 || (delta > 0 && delta < size)
1317 || (delta < 0 && -delta < size))
1323 /* Check whether the address of memory reference MEM2 equals exactly
1324 the address of memory reference MEM1 plus DELTA. Return true if
1325 we can prove this to be the case, false otherwise. */
1328 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1330 rtx addr1, addr2, addr_delta;
1332 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1335 addr1 = XEXP (mem1, 0);
1336 addr2 = XEXP (mem2, 0);
1338 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1339 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1345 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1348 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1351 enum machine_mode wmode = mode;
1352 rtx dst = operands[0];
1353 rtx src1 = operands[1];
1354 rtx src2 = operands[2];
1357 /* If we cannot handle the operation directly, use a temp register. */
1358 if (!s390_logical_operator_ok_p (operands))
1359 dst = gen_reg_rtx (mode);
1361 /* QImode and HImode patterns make sense only if we have a destination
1362 in memory. Otherwise perform the operation in SImode. */
1363 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1366 /* Widen operands if required. */
1369 if (GET_CODE (dst) == SUBREG
1370 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1372 else if (REG_P (dst))
1373 dst = gen_rtx_SUBREG (wmode, dst, 0);
1375 dst = gen_reg_rtx (wmode);
1377 if (GET_CODE (src1) == SUBREG
1378 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1380 else if (GET_MODE (src1) != VOIDmode)
1381 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1383 if (GET_CODE (src2) == SUBREG
1384 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1386 else if (GET_MODE (src2) != VOIDmode)
1387 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1390 /* Emit the instruction. */
1391 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1392 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1393 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1395 /* Fix up the destination if needed. */
1396 if (dst != operands[0])
1397 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1400 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1403 s390_logical_operator_ok_p (rtx *operands)
1405 /* If the destination operand is in memory, it needs to coincide
1406 with one of the source operands. After reload, it has to be
1407 the first source operand. */
1408 if (GET_CODE (operands[0]) == MEM)
1409 return rtx_equal_p (operands[0], operands[1])
1410 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1415 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1416 operand IMMOP to switch from SS to SI type instructions. */
1419 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1421 int def = code == AND ? -1 : 0;
1425 gcc_assert (GET_CODE (*memop) == MEM);
1426 gcc_assert (!MEM_VOLATILE_P (*memop));
1428 mask = s390_extract_part (*immop, QImode, def);
1429 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1430 gcc_assert (part >= 0);
1432 *memop = adjust_address (*memop, QImode, part);
1433 *immop = gen_int_mode (mask, QImode);
1437 /* How to allocate a 'struct machine_function'. */
1439 static struct machine_function *
1440 s390_init_machine_status (void)
1442 return GGC_CNEW (struct machine_function);
1445 /* Change optimizations to be performed, depending on the
1448 LEVEL is the optimization level specified; 2 if `-O2' is
1449 specified, 1 if `-O' is specified, and 0 if neither is specified.
1451 SIZE is nonzero if `-Os' is specified and zero otherwise. */
1454 optimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED)
1456 /* ??? There are apparently still problems with -fcaller-saves. */
1457 flag_caller_saves = 0;
1459 /* By default, always emit DWARF-2 unwind info. This allows debugging
1460 without maintaining a stack frame back-chain. */
1461 flag_asynchronous_unwind_tables = 1;
1463 /* Use MVCLE instructions to decrease code size if requested. */
1465 target_flags |= MASK_MVCLE;
1468 /* Return true if ARG is the name of a processor. Set *TYPE and *FLAGS
1469 to the associated processor_type and processor_flags if so. */
1472 s390_handle_arch_option (const char *arg,
1473 enum processor_type *type,
1478 const char *const name; /* processor name or nickname. */
1479 const enum processor_type processor;
1480 const int flags; /* From enum processor_flags. */
1482 const processor_alias_table[] =
1484 {"g5", PROCESSOR_9672_G5, PF_IEEE_FLOAT},
1485 {"g6", PROCESSOR_9672_G6, PF_IEEE_FLOAT},
1486 {"z900", PROCESSOR_2064_Z900, PF_IEEE_FLOAT | PF_ZARCH},
1487 {"z990", PROCESSOR_2084_Z990, PF_IEEE_FLOAT | PF_ZARCH
1488 | PF_LONG_DISPLACEMENT},
1489 {"z9-109", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1490 | PF_LONG_DISPLACEMENT | PF_EXTIMM},
1491 {"z9-ec", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
1492 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP },
1493 {"z10", PROCESSOR_2097_Z10, PF_IEEE_FLOAT | PF_ZARCH
1494 | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP | PF_Z10},
1498 for (i = 0; i < ARRAY_SIZE (processor_alias_table); i++)
1499 if (strcmp (arg, processor_alias_table[i].name) == 0)
1501 *type = processor_alias_table[i].processor;
1502 *flags = processor_alias_table[i].flags;
1508 /* Implement TARGET_HANDLE_OPTION. */
1511 s390_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
1516 return s390_handle_arch_option (arg, &s390_arch, &s390_arch_flags);
1518 case OPT_mstack_guard_:
1519 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_guard) != 1)
1521 if (exact_log2 (s390_stack_guard) == -1)
1522 error ("stack guard value must be an exact power of 2");
1525 case OPT_mstack_size_:
1526 if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_size) != 1)
1528 if (exact_log2 (s390_stack_size) == -1)
1529 error ("stack size must be an exact power of 2");
1533 return s390_handle_arch_option (arg, &s390_tune, &s390_tune_flags);
1535 case OPT_mwarn_framesize_:
1536 return sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_warn_framesize) == 1;
1544 override_options (void)
1546 /* Set up function hooks. */
1547 init_machine_status = s390_init_machine_status;
1549 /* Architecture mode defaults according to ABI. */
1550 if (!(target_flags_explicit & MASK_ZARCH))
1553 target_flags |= MASK_ZARCH;
1555 target_flags &= ~MASK_ZARCH;
1558 /* Determine processor architectural level. */
1559 if (!s390_arch_string)
1561 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1562 s390_handle_arch_option (s390_arch_string, &s390_arch, &s390_arch_flags);
1565 /* Determine processor to tune for. */
1566 if (s390_tune == PROCESSOR_max)
1568 s390_tune = s390_arch;
1569 s390_tune_flags = s390_arch_flags;
1572 /* Sanity checks. */
1573 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1574 error ("z/Architecture mode not supported on %s", s390_arch_string);
1575 if (TARGET_64BIT && !TARGET_ZARCH)
1576 error ("64-bit ABI not supported in ESA/390 mode");
1578 if (TARGET_HARD_DFP && !TARGET_DFP)
1580 if (target_flags_explicit & MASK_HARD_DFP)
1582 if (!TARGET_CPU_DFP)
1583 error ("Hardware decimal floating point instructions"
1584 " not available on %s", s390_arch_string);
1586 error ("Hardware decimal floating point instructions"
1587 " not available in ESA/390 mode");
1590 target_flags &= ~MASK_HARD_DFP;
1593 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1595 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1596 error ("-mhard-dfp can't be used in conjunction with -msoft-float");
1598 target_flags &= ~MASK_HARD_DFP;
1601 /* Set processor cost function. */
1604 case PROCESSOR_2084_Z990:
1605 s390_cost = &z990_cost;
1607 case PROCESSOR_2094_Z9_109:
1608 s390_cost = &z9_109_cost;
1610 case PROCESSOR_2097_Z10:
1611 s390_cost = &z10_cost;
1614 s390_cost = &z900_cost;
1617 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1618 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1621 if (s390_stack_size)
1623 if (s390_stack_guard >= s390_stack_size)
1624 error ("stack size must be greater than the stack guard value");
1625 else if (s390_stack_size > 1 << 16)
1626 error ("stack size must not be greater than 64k");
1628 else if (s390_stack_guard)
1629 error ("-mstack-guard implies use of -mstack-size");
1631 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1632 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1633 target_flags |= MASK_LONG_DOUBLE_128;
1636 if (s390_tune == PROCESSOR_2097_Z10
1637 && !PARAM_SET_P (PARAM_MAX_UNROLLED_INSNS))
1638 set_param_value ("max-unrolled-insns", 100);
1641 /* Map for smallest class containing reg regno. */
1643 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1644 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1645 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1646 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1647 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1648 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1649 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1650 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1651 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1652 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1653 ACCESS_REGS, ACCESS_REGS
1656 /* Return attribute type of insn. */
1658 static enum attr_type
1659 s390_safe_attr_type (rtx insn)
1661 if (recog_memoized (insn) >= 0)
1662 return get_attr_type (insn);
1667 /* Return true if DISP is a valid short displacement. */
1670 s390_short_displacement (rtx disp)
1672 /* No displacement is OK. */
1676 /* Without the long displacement facility we don't need to
1677 distingiush between long and short displacement. */
1678 if (!TARGET_LONG_DISPLACEMENT)
1681 /* Integer displacement in range. */
1682 if (GET_CODE (disp) == CONST_INT)
1683 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1685 /* GOT offset is not OK, the GOT can be large. */
1686 if (GET_CODE (disp) == CONST
1687 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1688 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1689 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1692 /* All other symbolic constants are literal pool references,
1693 which are OK as the literal pool must be small. */
1694 if (GET_CODE (disp) == CONST)
1700 /* Decompose a RTL expression ADDR for a memory address into
1701 its components, returned in OUT.
1703 Returns false if ADDR is not a valid memory address, true
1704 otherwise. If OUT is NULL, don't return the components,
1705 but check for validity only.
1707 Note: Only addresses in canonical form are recognized.
1708 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1709 canonical form so that they will be recognized. */
1712 s390_decompose_address (rtx addr, struct s390_address *out)
1714 HOST_WIDE_INT offset = 0;
1715 rtx base = NULL_RTX;
1716 rtx indx = NULL_RTX;
1717 rtx disp = NULL_RTX;
1719 bool pointer = false;
1720 bool base_ptr = false;
1721 bool indx_ptr = false;
1722 bool literal_pool = false;
1724 /* We may need to substitute the literal pool base register into the address
1725 below. However, at this point we do not know which register is going to
1726 be used as base, so we substitute the arg pointer register. This is going
1727 to be treated as holding a pointer below -- it shouldn't be used for any
1729 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1731 /* Decompose address into base + index + displacement. */
1733 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1736 else if (GET_CODE (addr) == PLUS)
1738 rtx op0 = XEXP (addr, 0);
1739 rtx op1 = XEXP (addr, 1);
1740 enum rtx_code code0 = GET_CODE (op0);
1741 enum rtx_code code1 = GET_CODE (op1);
1743 if (code0 == REG || code0 == UNSPEC)
1745 if (code1 == REG || code1 == UNSPEC)
1747 indx = op0; /* index + base */
1753 base = op0; /* base + displacement */
1758 else if (code0 == PLUS)
1760 indx = XEXP (op0, 0); /* index + base + disp */
1761 base = XEXP (op0, 1);
1772 disp = addr; /* displacement */
1774 /* Extract integer part of displacement. */
1778 if (GET_CODE (disp) == CONST_INT)
1780 offset = INTVAL (disp);
1783 else if (GET_CODE (disp) == CONST
1784 && GET_CODE (XEXP (disp, 0)) == PLUS
1785 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1787 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1788 disp = XEXP (XEXP (disp, 0), 0);
1792 /* Strip off CONST here to avoid special case tests later. */
1793 if (disp && GET_CODE (disp) == CONST)
1794 disp = XEXP (disp, 0);
1796 /* We can convert literal pool addresses to
1797 displacements by basing them off the base register. */
1798 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1800 /* Either base or index must be free to hold the base register. */
1802 base = fake_pool_base, literal_pool = true;
1804 indx = fake_pool_base, literal_pool = true;
1808 /* Mark up the displacement. */
1809 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1810 UNSPEC_LTREL_OFFSET);
1813 /* Validate base register. */
1816 if (GET_CODE (base) == UNSPEC)
1817 switch (XINT (base, 1))
1821 disp = gen_rtx_UNSPEC (Pmode,
1822 gen_rtvec (1, XVECEXP (base, 0, 0)),
1823 UNSPEC_LTREL_OFFSET);
1827 base = XVECEXP (base, 0, 1);
1830 case UNSPEC_LTREL_BASE:
1831 if (XVECLEN (base, 0) == 1)
1832 base = fake_pool_base, literal_pool = true;
1834 base = XVECEXP (base, 0, 1);
1842 || (GET_MODE (base) != SImode
1843 && GET_MODE (base) != Pmode))
1846 if (REGNO (base) == STACK_POINTER_REGNUM
1847 || REGNO (base) == FRAME_POINTER_REGNUM
1848 || ((reload_completed || reload_in_progress)
1849 && frame_pointer_needed
1850 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1851 || REGNO (base) == ARG_POINTER_REGNUM
1853 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1854 pointer = base_ptr = true;
1856 if ((reload_completed || reload_in_progress)
1857 && base == cfun->machine->base_reg)
1858 pointer = base_ptr = literal_pool = true;
1861 /* Validate index register. */
1864 if (GET_CODE (indx) == UNSPEC)
1865 switch (XINT (indx, 1))
1869 disp = gen_rtx_UNSPEC (Pmode,
1870 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1871 UNSPEC_LTREL_OFFSET);
1875 indx = XVECEXP (indx, 0, 1);
1878 case UNSPEC_LTREL_BASE:
1879 if (XVECLEN (indx, 0) == 1)
1880 indx = fake_pool_base, literal_pool = true;
1882 indx = XVECEXP (indx, 0, 1);
1890 || (GET_MODE (indx) != SImode
1891 && GET_MODE (indx) != Pmode))
1894 if (REGNO (indx) == STACK_POINTER_REGNUM
1895 || REGNO (indx) == FRAME_POINTER_REGNUM
1896 || ((reload_completed || reload_in_progress)
1897 && frame_pointer_needed
1898 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1899 || REGNO (indx) == ARG_POINTER_REGNUM
1901 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1902 pointer = indx_ptr = true;
1904 if ((reload_completed || reload_in_progress)
1905 && indx == cfun->machine->base_reg)
1906 pointer = indx_ptr = literal_pool = true;
1909 /* Prefer to use pointer as base, not index. */
1910 if (base && indx && !base_ptr
1911 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
1918 /* Validate displacement. */
1921 /* If virtual registers are involved, the displacement will change later
1922 anyway as the virtual registers get eliminated. This could make a
1923 valid displacement invalid, but it is more likely to make an invalid
1924 displacement valid, because we sometimes access the register save area
1925 via negative offsets to one of those registers.
1926 Thus we don't check the displacement for validity here. If after
1927 elimination the displacement turns out to be invalid after all,
1928 this is fixed up by reload in any case. */
1929 if (base != arg_pointer_rtx
1930 && indx != arg_pointer_rtx
1931 && base != return_address_pointer_rtx
1932 && indx != return_address_pointer_rtx
1933 && base != frame_pointer_rtx
1934 && indx != frame_pointer_rtx
1935 && base != virtual_stack_vars_rtx
1936 && indx != virtual_stack_vars_rtx)
1937 if (!DISP_IN_RANGE (offset))
1942 /* All the special cases are pointers. */
1945 /* In the small-PIC case, the linker converts @GOT
1946 and @GOTNTPOFF offsets to possible displacements. */
1947 if (GET_CODE (disp) == UNSPEC
1948 && (XINT (disp, 1) == UNSPEC_GOT
1949 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
1955 /* Accept pool label offsets. */
1956 else if (GET_CODE (disp) == UNSPEC
1957 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
1960 /* Accept literal pool references. */
1961 else if (GET_CODE (disp) == UNSPEC
1962 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
1964 orig_disp = gen_rtx_CONST (Pmode, disp);
1967 /* If we have an offset, make sure it does not
1968 exceed the size of the constant pool entry. */
1969 rtx sym = XVECEXP (disp, 0, 0);
1970 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
1973 orig_disp = plus_constant (orig_disp, offset);
1988 out->disp = orig_disp;
1989 out->pointer = pointer;
1990 out->literal_pool = literal_pool;
1996 /* Decompose a RTL expression OP for a shift count into its components,
1997 and return the base register in BASE and the offset in OFFSET.
1999 Return true if OP is a valid shift count, false if not. */
2002 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2004 HOST_WIDE_INT off = 0;
2006 /* We can have an integer constant, an address register,
2007 or a sum of the two. */
2008 if (GET_CODE (op) == CONST_INT)
2013 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2015 off = INTVAL (XEXP (op, 1));
2018 while (op && GET_CODE (op) == SUBREG)
2019 op = SUBREG_REG (op);
2021 if (op && GET_CODE (op) != REG)
2033 /* Return true if CODE is a valid address without index. */
2036 s390_legitimate_address_without_index_p (rtx op)
2038 struct s390_address addr;
2040 if (!s390_decompose_address (XEXP (op, 0), &addr))
2049 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2050 and return these parts in SYMREF and ADDEND. You can pass NULL in
2051 SYMREF and/or ADDEND if you are not interested in these values. */
2054 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2056 HOST_WIDE_INT tmpaddend = 0;
2058 if (GET_CODE (addr) == CONST)
2059 addr = XEXP (addr, 0);
2061 if (GET_CODE (addr) == PLUS)
2063 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2064 && CONST_INT_P (XEXP (addr, 1)))
2066 tmpaddend = INTVAL (XEXP (addr, 1));
2067 addr = XEXP (addr, 0);
2073 if (GET_CODE (addr) != SYMBOL_REF)
2079 *addend = tmpaddend;
2085 /* Return true if the address in OP is valid for constraint letter C
2086 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2087 pool MEMs should be accepted. Only the Q, R, S, T constraint
2088 letters are allowed for C. */
2091 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2093 struct s390_address addr;
2094 bool decomposed = false;
2096 /* This check makes sure that no symbolic address (except literal
2097 pool references) are accepted by the R or T constraints. */
2098 if (s390_symref_operand_p (op, NULL, NULL))
2102 if (!s390_decompose_address (op, &addr))
2104 if (!addr.literal_pool)
2111 case 'Q': /* no index short displacement */
2112 if (!decomposed && !s390_decompose_address (op, &addr))
2116 if (!s390_short_displacement (addr.disp))
2120 case 'R': /* with index short displacement */
2121 if (TARGET_LONG_DISPLACEMENT)
2123 if (!decomposed && !s390_decompose_address (op, &addr))
2125 if (!s390_short_displacement (addr.disp))
2128 /* Any invalid address here will be fixed up by reload,
2129 so accept it for the most generic constraint. */
2132 case 'S': /* no index long displacement */
2133 if (!TARGET_LONG_DISPLACEMENT)
2135 if (!decomposed && !s390_decompose_address (op, &addr))
2139 if (s390_short_displacement (addr.disp))
2143 case 'T': /* with index long displacement */
2144 if (!TARGET_LONG_DISPLACEMENT)
2146 /* Any invalid address here will be fixed up by reload,
2147 so accept it for the most generic constraint. */
2148 if ((decomposed || s390_decompose_address (op, &addr))
2149 && s390_short_displacement (addr.disp))
2159 /* Evaluates constraint strings described by the regular expression
2160 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2161 the constraint given in STR, or 0 else. */
2164 s390_mem_constraint (const char *str, rtx op)
2171 /* Check for offsettable variants of memory constraints. */
2172 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2174 if ((reload_completed || reload_in_progress)
2175 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2177 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2179 /* Check for non-literal-pool variants of memory constraints. */
2182 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2187 if (GET_CODE (op) != MEM)
2189 return s390_check_qrst_address (c, XEXP (op, 0), true);
2191 return (s390_check_qrst_address ('Q', op, true)
2192 || s390_check_qrst_address ('R', op, true));
2194 return (s390_check_qrst_address ('S', op, true)
2195 || s390_check_qrst_address ('T', op, true));
2197 /* Simply check for the basic form of a shift count. Reload will
2198 take care of making sure we have a proper base register. */
2199 if (!s390_decompose_shift_count (op, NULL, NULL))
2203 return s390_check_qrst_address (str[1], op, true);
2211 /* Evaluates constraint strings starting with letter O. Input
2212 parameter C is the second letter following the "O" in the constraint
2213 string. Returns 1 if VALUE meets the respective constraint and 0
2217 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2225 return trunc_int_for_mode (value, SImode) == value;
2229 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2232 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2240 /* Evaluates constraint strings starting with letter N. Parameter STR
2241 contains the letters following letter "N" in the constraint string.
2242 Returns true if VALUE matches the constraint. */
2245 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2247 enum machine_mode mode, part_mode;
2249 int part, part_goal;
2255 part_goal = str[0] - '0';
2299 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2302 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2305 if (part_goal != -1 && part_goal != part)
2312 /* Returns true if the input parameter VALUE is a float zero. */
2315 s390_float_const_zero_p (rtx value)
2317 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2318 && value == CONST0_RTX (GET_MODE (value)));
2322 /* Compute a (partial) cost for rtx X. Return true if the complete
2323 cost has been computed, and false if subexpressions should be
2324 scanned. In either case, *TOTAL contains the cost result.
2325 CODE contains GET_CODE (x), OUTER_CODE contains the code
2326 of the superexpression of x. */
2329 s390_rtx_costs (rtx x, int code, int outer_code, int *total,
2330 bool speed ATTRIBUTE_UNUSED)
2353 *total = COSTS_N_INSNS (1);
2358 /* Check for multiply and add. */
2359 if ((GET_MODE (x) == DFmode || GET_MODE (x) == SFmode)
2360 && GET_CODE (XEXP (x, 0)) == MULT
2361 && TARGET_HARD_FLOAT && TARGET_FUSED_MADD)
2363 /* This is the multiply and add case. */
2364 if (GET_MODE (x) == DFmode)
2365 *total = s390_cost->madbr;
2367 *total = s390_cost->maebr;
2368 *total += (rtx_cost (XEXP (XEXP (x, 0), 0), MULT, speed)
2369 + rtx_cost (XEXP (XEXP (x, 0), 1), MULT, speed)
2370 + rtx_cost (XEXP (x, 1), (enum rtx_code) code, speed));
2371 return true; /* Do not do an additional recursive descent. */
2373 *total = COSTS_N_INSNS (1);
2377 switch (GET_MODE (x))
2381 rtx left = XEXP (x, 0);
2382 rtx right = XEXP (x, 1);
2383 if (GET_CODE (right) == CONST_INT
2384 && CONST_OK_FOR_K (INTVAL (right)))
2385 *total = s390_cost->mhi;
2386 else if (GET_CODE (left) == SIGN_EXTEND)
2387 *total = s390_cost->mh;
2389 *total = s390_cost->ms; /* msr, ms, msy */
2394 rtx left = XEXP (x, 0);
2395 rtx right = XEXP (x, 1);
2398 if (GET_CODE (right) == CONST_INT
2399 && CONST_OK_FOR_K (INTVAL (right)))
2400 *total = s390_cost->mghi;
2401 else if (GET_CODE (left) == SIGN_EXTEND)
2402 *total = s390_cost->msgf;
2404 *total = s390_cost->msg; /* msgr, msg */
2406 else /* TARGET_31BIT */
2408 if (GET_CODE (left) == SIGN_EXTEND
2409 && GET_CODE (right) == SIGN_EXTEND)
2410 /* mulsidi case: mr, m */
2411 *total = s390_cost->m;
2412 else if (GET_CODE (left) == ZERO_EXTEND
2413 && GET_CODE (right) == ZERO_EXTEND
2414 && TARGET_CPU_ZARCH)
2415 /* umulsidi case: ml, mlr */
2416 *total = s390_cost->ml;
2418 /* Complex calculation is required. */
2419 *total = COSTS_N_INSNS (40);
2425 *total = s390_cost->mult_df;
2428 *total = s390_cost->mxbr;
2437 if (GET_MODE (x) == TImode) /* 128 bit division */
2438 *total = s390_cost->dlgr;
2439 else if (GET_MODE (x) == DImode)
2441 rtx right = XEXP (x, 1);
2442 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2443 *total = s390_cost->dlr;
2444 else /* 64 by 64 bit division */
2445 *total = s390_cost->dlgr;
2447 else if (GET_MODE (x) == SImode) /* 32 bit division */
2448 *total = s390_cost->dlr;
2453 if (GET_MODE (x) == DImode)
2455 rtx right = XEXP (x, 1);
2456 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2458 *total = s390_cost->dsgfr;
2460 *total = s390_cost->dr;
2461 else /* 64 by 64 bit division */
2462 *total = s390_cost->dsgr;
2464 else if (GET_MODE (x) == SImode) /* 32 bit division */
2465 *total = s390_cost->dlr;
2466 else if (GET_MODE (x) == SFmode)
2468 *total = s390_cost->debr;
2470 else if (GET_MODE (x) == DFmode)
2472 *total = s390_cost->ddbr;
2474 else if (GET_MODE (x) == TFmode)
2476 *total = s390_cost->dxbr;
2481 if (GET_MODE (x) == SFmode)
2482 *total = s390_cost->sqebr;
2483 else if (GET_MODE (x) == DFmode)
2484 *total = s390_cost->sqdbr;
2486 *total = s390_cost->sqxbr;
2491 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2492 || outer_code == PLUS || outer_code == MINUS
2493 || outer_code == COMPARE)
2498 *total = COSTS_N_INSNS (1);
2499 if (GET_CODE (XEXP (x, 0)) == AND
2500 && GET_CODE (XEXP (x, 1)) == CONST_INT
2501 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2503 rtx op0 = XEXP (XEXP (x, 0), 0);
2504 rtx op1 = XEXP (XEXP (x, 0), 1);
2505 rtx op2 = XEXP (x, 1);
2507 if (memory_operand (op0, GET_MODE (op0))
2508 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2510 if (register_operand (op0, GET_MODE (op0))
2511 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2521 /* Return the cost of an address rtx ADDR. */
2524 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2526 struct s390_address ad;
2527 if (!s390_decompose_address (addr, &ad))
2530 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2533 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2534 otherwise return 0. */
2537 tls_symbolic_operand (rtx op)
2539 if (GET_CODE (op) != SYMBOL_REF)
2541 return SYMBOL_REF_TLS_MODEL (op);
2544 /* Split DImode access register reference REG (on 64-bit) into its constituent
2545 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2546 gen_highpart cannot be used as they assume all registers are word-sized,
2547 while our access registers have only half that size. */
2550 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2552 gcc_assert (TARGET_64BIT);
2553 gcc_assert (ACCESS_REG_P (reg));
2554 gcc_assert (GET_MODE (reg) == DImode);
2555 gcc_assert (!(REGNO (reg) & 1));
2557 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2558 *hi = gen_rtx_REG (SImode, REGNO (reg));
2561 /* Return true if OP contains a symbol reference */
2564 symbolic_reference_mentioned_p (rtx op)
2569 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2572 fmt = GET_RTX_FORMAT (GET_CODE (op));
2573 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2579 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2580 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2584 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2591 /* Return true if OP contains a reference to a thread-local symbol. */
2594 tls_symbolic_reference_mentioned_p (rtx op)
2599 if (GET_CODE (op) == SYMBOL_REF)
2600 return tls_symbolic_operand (op);
2602 fmt = GET_RTX_FORMAT (GET_CODE (op));
2603 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2609 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2610 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2614 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2622 /* Return true if OP is a legitimate general operand when
2623 generating PIC code. It is given that flag_pic is on
2624 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2627 legitimate_pic_operand_p (rtx op)
2629 /* Accept all non-symbolic constants. */
2630 if (!SYMBOLIC_CONST (op))
2633 /* Reject everything else; must be handled
2634 via emit_symbolic_move. */
2638 /* Returns true if the constant value OP is a legitimate general operand.
2639 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2642 legitimate_constant_p (rtx op)
2644 /* Accept all non-symbolic constants. */
2645 if (!SYMBOLIC_CONST (op))
2648 /* Accept immediate LARL operands. */
2649 if (TARGET_CPU_ZARCH && larl_operand (op, VOIDmode))
2652 /* Thread-local symbols are never legal constants. This is
2653 so that emit_call knows that computing such addresses
2654 might require a function call. */
2655 if (TLS_SYMBOLIC_CONST (op))
2658 /* In the PIC case, symbolic constants must *not* be
2659 forced into the literal pool. We accept them here,
2660 so that they will be handled by emit_symbolic_move. */
2664 /* All remaining non-PIC symbolic constants are
2665 forced into the literal pool. */
2669 /* Determine if it's legal to put X into the constant pool. This
2670 is not possible if X contains the address of a symbol that is
2671 not constant (TLS) or not known at final link time (PIC). */
2674 s390_cannot_force_const_mem (rtx x)
2676 switch (GET_CODE (x))
2680 /* Accept all non-symbolic constants. */
2684 /* Labels are OK iff we are non-PIC. */
2685 return flag_pic != 0;
2688 /* 'Naked' TLS symbol references are never OK,
2689 non-TLS symbols are OK iff we are non-PIC. */
2690 if (tls_symbolic_operand (x))
2693 return flag_pic != 0;
2696 return s390_cannot_force_const_mem (XEXP (x, 0));
2699 return s390_cannot_force_const_mem (XEXP (x, 0))
2700 || s390_cannot_force_const_mem (XEXP (x, 1));
2703 switch (XINT (x, 1))
2705 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2706 case UNSPEC_LTREL_OFFSET:
2714 case UNSPEC_GOTNTPOFF:
2715 case UNSPEC_INDNTPOFF:
2718 /* If the literal pool shares the code section, be put
2719 execute template placeholders into the pool as well. */
2721 return TARGET_CPU_ZARCH;
2733 /* Returns true if the constant value OP is a legitimate general
2734 operand during and after reload. The difference to
2735 legitimate_constant_p is that this function will not accept
2736 a constant that would need to be forced to the literal pool
2737 before it can be used as operand. */
2740 legitimate_reload_constant_p (rtx op)
2742 /* Accept la(y) operands. */
2743 if (GET_CODE (op) == CONST_INT
2744 && DISP_IN_RANGE (INTVAL (op)))
2747 /* Accept l(g)hi/l(g)fi operands. */
2748 if (GET_CODE (op) == CONST_INT
2749 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2752 /* Accept lliXX operands. */
2754 && GET_CODE (op) == CONST_INT
2755 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2756 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2760 && GET_CODE (op) == CONST_INT
2761 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2762 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2765 /* Accept larl operands. */
2766 if (TARGET_CPU_ZARCH
2767 && larl_operand (op, VOIDmode))
2770 /* Accept lzXX operands. */
2771 if (GET_CODE (op) == CONST_DOUBLE
2772 && CONST_DOUBLE_OK_FOR_CONSTRAINT_P (op, 'G', "G"))
2775 /* Accept double-word operands that can be split. */
2776 if (GET_CODE (op) == CONST_INT
2777 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2779 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2780 rtx hi = operand_subword (op, 0, 0, dword_mode);
2781 rtx lo = operand_subword (op, 1, 0, dword_mode);
2782 return legitimate_reload_constant_p (hi)
2783 && legitimate_reload_constant_p (lo);
2786 /* Everything else cannot be handled without reload. */
2790 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2791 return the class of reg to actually use. */
2794 s390_preferred_reload_class (rtx op, enum reg_class rclass)
2796 switch (GET_CODE (op))
2798 /* Constants we cannot reload must be forced into the
2803 if (legitimate_reload_constant_p (op))
2808 /* If a symbolic constant or a PLUS is reloaded,
2809 it is most likely being used as an address, so
2810 prefer ADDR_REGS. If 'class' is not a superset
2811 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2816 if (reg_class_subset_p (ADDR_REGS, rclass))
2828 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2829 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2833 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
2835 HOST_WIDE_INT addend;
2838 if (!s390_symref_operand_p (addr, &symref, &addend))
2841 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
2842 && !(addend & (alignment - 1)));
2845 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
2846 operand SCRATCH is used to reload the even part of the address and
2850 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
2852 HOST_WIDE_INT addend;
2855 if (!s390_symref_operand_p (addr, &symref, &addend))
2859 /* Easy case. The addend is even so larl will do fine. */
2860 emit_move_insn (reg, addr);
2863 /* We can leave the scratch register untouched if the target
2864 register is a valid base register. */
2865 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
2866 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
2869 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
2870 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
2873 emit_move_insn (scratch,
2874 gen_rtx_CONST (Pmode,
2875 gen_rtx_PLUS (Pmode, symref,
2876 GEN_INT (addend - 1))));
2878 emit_move_insn (scratch, symref);
2880 /* Increment the address using la in order to avoid clobbering cc. */
2881 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
2885 /* Generate what is necessary to move between REG and MEM using
2886 SCRATCH. The direction is given by TOMEM. */
2889 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
2891 /* Reload might have pulled a constant out of the literal pool.
2892 Force it back in. */
2893 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
2894 || GET_CODE (mem) == CONST)
2895 mem = force_const_mem (GET_MODE (reg), mem);
2897 gcc_assert (MEM_P (mem));
2899 /* For a load from memory we can leave the scratch register
2900 untouched if the target register is a valid base register. */
2902 && REGNO (reg) < FIRST_PSEUDO_REGISTER
2903 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
2904 && GET_MODE (reg) == GET_MODE (scratch))
2907 /* Load address into scratch register. Since we can't have a
2908 secondary reload for a secondary reload we have to cover the case
2909 where larl would need a secondary reload here as well. */
2910 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
2912 /* Now we can use a standard load/store to do the move. */
2914 emit_move_insn (replace_equiv_address (mem, scratch), reg);
2916 emit_move_insn (reg, replace_equiv_address (mem, scratch));
2919 /* Inform reload about cases where moving X with a mode MODE to a register in
2920 RCLASS requires an extra scratch or immediate register. Return the class
2921 needed for the immediate register. */
2923 static enum reg_class
2924 s390_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
2925 enum machine_mode mode, secondary_reload_info *sri)
2927 /* Intermediate register needed. */
2928 if (reg_classes_intersect_p (CC_REGS, rclass))
2929 return GENERAL_REGS;
2933 /* On z10 several optimizer steps may generate larl operands with
2936 && s390_symref_operand_p (x, NULL, NULL)
2938 && !s390_check_symref_alignment (x, 2))
2939 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
2940 : CODE_FOR_reloadsi_larl_odd_addend_z10);
2942 /* On z10 we need a scratch register when moving QI, TI or floating
2943 point mode values from or to a memory location with a SYMBOL_REF
2944 or if the symref addend of a SI or DI move is not aligned to the
2945 width of the access. */
2947 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
2948 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
2949 || (!TARGET_64BIT && mode == DImode)
2950 || ((mode == HImode || mode == SImode || mode == DImode)
2951 && (!s390_check_symref_alignment (XEXP (x, 0),
2952 GET_MODE_SIZE (mode))))))
2954 #define __SECONDARY_RELOAD_CASE(M,m) \
2957 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
2958 CODE_FOR_reload##m##di_tomem_z10; \
2960 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
2961 CODE_FOR_reload##m##si_tomem_z10; \
2964 switch (GET_MODE (x))
2966 __SECONDARY_RELOAD_CASE (QI, qi);
2967 __SECONDARY_RELOAD_CASE (HI, hi);
2968 __SECONDARY_RELOAD_CASE (SI, si);
2969 __SECONDARY_RELOAD_CASE (DI, di);
2970 __SECONDARY_RELOAD_CASE (TI, ti);
2971 __SECONDARY_RELOAD_CASE (SF, sf);
2972 __SECONDARY_RELOAD_CASE (DF, df);
2973 __SECONDARY_RELOAD_CASE (TF, tf);
2974 __SECONDARY_RELOAD_CASE (SD, sd);
2975 __SECONDARY_RELOAD_CASE (DD, dd);
2976 __SECONDARY_RELOAD_CASE (TD, td);
2981 #undef __SECONDARY_RELOAD_CASE
2985 /* We need a scratch register when loading a PLUS expression which
2986 is not a legitimate operand of the LOAD ADDRESS instruction. */
2987 if (in_p && s390_plus_operand (x, mode))
2988 sri->icode = (TARGET_64BIT ?
2989 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
2991 /* Performing a multiword move from or to memory we have to make sure the
2992 second chunk in memory is addressable without causing a displacement
2993 overflow. If that would be the case we calculate the address in
2994 a scratch register. */
2996 && GET_CODE (XEXP (x, 0)) == PLUS
2997 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2998 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
2999 + GET_MODE_SIZE (mode) - 1))
3001 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3002 in a s_operand address since we may fallback to lm/stm. So we only
3003 have to care about overflows in the b+i+d case. */
3004 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3005 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3006 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3007 /* For FP_REGS no lm/stm is available so this check is triggered
3008 for displacement overflows in b+i+d and b+d like addresses. */
3009 || (reg_classes_intersect_p (FP_REGS, rclass)
3010 && s390_class_max_nregs (FP_REGS, mode) > 1))
3013 sri->icode = (TARGET_64BIT ?
3014 CODE_FOR_reloaddi_nonoffmem_in :
3015 CODE_FOR_reloadsi_nonoffmem_in);
3017 sri->icode = (TARGET_64BIT ?
3018 CODE_FOR_reloaddi_nonoffmem_out :
3019 CODE_FOR_reloadsi_nonoffmem_out);
3023 /* A scratch address register is needed when a symbolic constant is
3024 copied to r0 compiling with -fPIC. In other cases the target
3025 register might be used as temporary (see legitimize_pic_address). */
3026 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3027 sri->icode = (TARGET_64BIT ?
3028 CODE_FOR_reloaddi_PIC_addr :
3029 CODE_FOR_reloadsi_PIC_addr);
3031 /* Either scratch or no register needed. */
3035 /* Generate code to load SRC, which is PLUS that is not a
3036 legitimate operand for the LA instruction, into TARGET.
3037 SCRATCH may be used as scratch register. */
3040 s390_expand_plus_operand (rtx target, rtx src,
3044 struct s390_address ad;
3046 /* src must be a PLUS; get its two operands. */
3047 gcc_assert (GET_CODE (src) == PLUS);
3048 gcc_assert (GET_MODE (src) == Pmode);
3050 /* Check if any of the two operands is already scheduled
3051 for replacement by reload. This can happen e.g. when
3052 float registers occur in an address. */
3053 sum1 = find_replacement (&XEXP (src, 0));
3054 sum2 = find_replacement (&XEXP (src, 1));
3055 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3057 /* If the address is already strictly valid, there's nothing to do. */
3058 if (!s390_decompose_address (src, &ad)
3059 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3060 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3062 /* Otherwise, one of the operands cannot be an address register;
3063 we reload its value into the scratch register. */
3064 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3066 emit_move_insn (scratch, sum1);
3069 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3071 emit_move_insn (scratch, sum2);
3075 /* According to the way these invalid addresses are generated
3076 in reload.c, it should never happen (at least on s390) that
3077 *neither* of the PLUS components, after find_replacements
3078 was applied, is an address register. */
3079 if (sum1 == scratch && sum2 == scratch)
3085 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3088 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3089 is only ever performed on addresses, so we can mark the
3090 sum as legitimate for LA in any case. */
3091 s390_load_address (target, src);
3095 /* Return true if ADDR is a valid memory address.
3096 STRICT specifies whether strict register checking applies. */
3099 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3101 struct s390_address ad;
3104 && larl_operand (addr, VOIDmode)
3105 && (mode == VOIDmode
3106 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3109 if (!s390_decompose_address (addr, &ad))
3114 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3117 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3123 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3124 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3128 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3129 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3135 /* Return true if OP is a valid operand for the LA instruction.
3136 In 31-bit, we need to prove that the result is used as an
3137 address, as LA performs only a 31-bit addition. */
3140 legitimate_la_operand_p (rtx op)
3142 struct s390_address addr;
3143 if (!s390_decompose_address (op, &addr))
3146 return (TARGET_64BIT || addr.pointer);
3149 /* Return true if it is valid *and* preferable to use LA to
3150 compute the sum of OP1 and OP2. */
3153 preferred_la_operand_p (rtx op1, rtx op2)
3155 struct s390_address addr;
3157 if (op2 != const0_rtx)
3158 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3160 if (!s390_decompose_address (op1, &addr))
3162 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3164 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3167 if (!TARGET_64BIT && !addr.pointer)
3173 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3174 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3180 /* Emit a forced load-address operation to load SRC into DST.
3181 This will use the LOAD ADDRESS instruction even in situations
3182 where legitimate_la_operand_p (SRC) returns false. */
3185 s390_load_address (rtx dst, rtx src)
3188 emit_move_insn (dst, src);
3190 emit_insn (gen_force_la_31 (dst, src));
3193 /* Return a legitimate reference for ORIG (an address) using the
3194 register REG. If REG is 0, a new pseudo is generated.
3196 There are two types of references that must be handled:
3198 1. Global data references must load the address from the GOT, via
3199 the PIC reg. An insn is emitted to do this load, and the reg is
3202 2. Static data references, constant pool addresses, and code labels
3203 compute the address as an offset from the GOT, whose base is in
3204 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3205 differentiate them from global data objects. The returned
3206 address is the PIC reg + an unspec constant.
3208 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3209 reg also appears in the address. */
3212 legitimize_pic_address (rtx orig, rtx reg)
3218 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3220 if (GET_CODE (addr) == LABEL_REF
3221 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3223 /* This is a local symbol. */
3224 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3226 /* Access local symbols PC-relative via LARL.
3227 This is the same as in the non-PIC case, so it is
3228 handled automatically ... */
3232 /* Access local symbols relative to the GOT. */
3234 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3236 if (reload_in_progress || reload_completed)
3237 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3239 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3240 addr = gen_rtx_CONST (Pmode, addr);
3241 addr = force_const_mem (Pmode, addr);
3242 emit_move_insn (temp, addr);
3244 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3247 s390_load_address (reg, new_rtx);
3252 else if (GET_CODE (addr) == SYMBOL_REF)
3255 reg = gen_reg_rtx (Pmode);
3259 /* Assume GOT offset < 4k. This is handled the same way
3260 in both 31- and 64-bit code (@GOT). */
3262 if (reload_in_progress || reload_completed)
3263 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3265 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3266 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3267 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3268 new_rtx = gen_const_mem (Pmode, new_rtx);
3269 emit_move_insn (reg, new_rtx);
3272 else if (TARGET_CPU_ZARCH)
3274 /* If the GOT offset might be >= 4k, we determine the position
3275 of the GOT entry via a PC-relative LARL (@GOTENT). */
3277 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3279 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3280 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3282 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3283 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3284 emit_move_insn (temp, new_rtx);
3286 new_rtx = gen_const_mem (Pmode, temp);
3287 emit_move_insn (reg, new_rtx);
3292 /* If the GOT offset might be >= 4k, we have to load it
3293 from the literal pool (@GOT). */
3295 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3297 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3298 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3300 if (reload_in_progress || reload_completed)
3301 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3303 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3304 addr = gen_rtx_CONST (Pmode, addr);
3305 addr = force_const_mem (Pmode, addr);
3306 emit_move_insn (temp, addr);
3308 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3309 new_rtx = gen_const_mem (Pmode, new_rtx);
3310 emit_move_insn (reg, new_rtx);
3316 if (GET_CODE (addr) == CONST)
3318 addr = XEXP (addr, 0);
3319 if (GET_CODE (addr) == UNSPEC)
3321 gcc_assert (XVECLEN (addr, 0) == 1);
3322 switch (XINT (addr, 1))
3324 /* If someone moved a GOT-relative UNSPEC
3325 out of the literal pool, force them back in. */
3328 new_rtx = force_const_mem (Pmode, orig);
3331 /* @GOT is OK as is if small. */
3334 new_rtx = force_const_mem (Pmode, orig);
3337 /* @GOTENT is OK as is. */
3341 /* @PLT is OK as is on 64-bit, must be converted to
3342 GOT-relative @PLTOFF on 31-bit. */
3344 if (!TARGET_CPU_ZARCH)
3346 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3348 if (reload_in_progress || reload_completed)
3349 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3351 addr = XVECEXP (addr, 0, 0);
3352 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3354 addr = gen_rtx_CONST (Pmode, addr);
3355 addr = force_const_mem (Pmode, addr);
3356 emit_move_insn (temp, addr);
3358 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3361 s390_load_address (reg, new_rtx);
3367 /* Everything else cannot happen. */
3373 gcc_assert (GET_CODE (addr) == PLUS);
3375 if (GET_CODE (addr) == PLUS)
3377 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3379 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3380 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3382 /* Check first to see if this is a constant offset
3383 from a local symbol reference. */
3384 if ((GET_CODE (op0) == LABEL_REF
3385 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3386 && GET_CODE (op1) == CONST_INT)
3388 if (TARGET_CPU_ZARCH
3389 && larl_operand (op0, VOIDmode)
3390 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3391 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3393 if (INTVAL (op1) & 1)
3395 /* LARL can't handle odd offsets, so emit a
3396 pair of LARL and LA. */
3397 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3399 if (!DISP_IN_RANGE (INTVAL (op1)))
3401 HOST_WIDE_INT even = INTVAL (op1) - 1;
3402 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3403 op0 = gen_rtx_CONST (Pmode, op0);
3407 emit_move_insn (temp, op0);
3408 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3412 s390_load_address (reg, new_rtx);
3418 /* If the offset is even, we can just use LARL.
3419 This will happen automatically. */
3424 /* Access local symbols relative to the GOT. */
3426 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3428 if (reload_in_progress || reload_completed)
3429 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3431 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3433 addr = gen_rtx_PLUS (Pmode, addr, op1);
3434 addr = gen_rtx_CONST (Pmode, addr);
3435 addr = force_const_mem (Pmode, addr);
3436 emit_move_insn (temp, addr);
3438 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3441 s390_load_address (reg, new_rtx);
3447 /* Now, check whether it is a GOT relative symbol plus offset
3448 that was pulled out of the literal pool. Force it back in. */
3450 else if (GET_CODE (op0) == UNSPEC
3451 && GET_CODE (op1) == CONST_INT
3452 && XINT (op0, 1) == UNSPEC_GOTOFF)
3454 gcc_assert (XVECLEN (op0, 0) == 1);
3456 new_rtx = force_const_mem (Pmode, orig);
3459 /* Otherwise, compute the sum. */
3462 base = legitimize_pic_address (XEXP (addr, 0), reg);
3463 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3464 base == reg ? NULL_RTX : reg);
3465 if (GET_CODE (new_rtx) == CONST_INT)
3466 new_rtx = plus_constant (base, INTVAL (new_rtx));
3469 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3471 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3472 new_rtx = XEXP (new_rtx, 1);
3474 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);