/* Subroutines used for code generation on IBM S/390 and zSeries
- Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
- Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
+ 2007 Free Software Foundation, Inc.
Contributed by Hartmut Penner (hpenner@de.ibm.com) and
Ulrich Weigand (uweigand@de.ibm.com).
#include "langhooks.h"
#include "optabs.h"
#include "tree-gimple.h"
+#include "df.h"
/* Define the specific costs for a given cpu. */
const int msgr; /* cost of an MSGR instruction. */
const int msr; /* cost of an MSR instruction. */
const int mult_df; /* cost of multiplication in DFmode. */
+ const int mxbr;
/* square root */
+ const int sqxbr; /* cost of square root in TFmode. */
const int sqdbr; /* cost of square root in DFmode. */
const int sqebr; /* cost of square root in SFmode. */
/* multiply and add */
const int madbr; /* cost of multiply and add in DFmode. */
const int maebr; /* cost of multiply and add in SFmode. */
/* division */
+ const int dxbr;
const int ddbr;
- const int ddr;
const int debr;
- const int der;
const int dlgr;
const int dlr;
const int dr;
COSTS_N_INSNS (10), /* MSGR */
COSTS_N_INSNS (4), /* MSR */
COSTS_N_INSNS (7), /* multiplication in DFmode */
+ COSTS_N_INSNS (13), /* MXBR */
+ COSTS_N_INSNS (136), /* SQXBR */
COSTS_N_INSNS (44), /* SQDBR */
COSTS_N_INSNS (35), /* SQEBR */
COSTS_N_INSNS (18), /* MADBR */
COSTS_N_INSNS (13), /* MAEBR */
+ COSTS_N_INSNS (134), /* DXBR */
COSTS_N_INSNS (30), /* DDBR */
- COSTS_N_INSNS (30), /* DDR */
COSTS_N_INSNS (27), /* DEBR */
- COSTS_N_INSNS (26), /* DER */
COSTS_N_INSNS (220), /* DLGR */
COSTS_N_INSNS (34), /* DLR */
COSTS_N_INSNS (34), /* DR */
COSTS_N_INSNS (4), /* MSGR */
COSTS_N_INSNS (4), /* MSR */
COSTS_N_INSNS (1), /* multiplication in DFmode */
+ COSTS_N_INSNS (28), /* MXBR */
+ COSTS_N_INSNS (130), /* SQXBR */
COSTS_N_INSNS (66), /* SQDBR */
COSTS_N_INSNS (38), /* SQEBR */
COSTS_N_INSNS (1), /* MADBR */
COSTS_N_INSNS (1), /* MAEBR */
+ COSTS_N_INSNS (60), /* DXBR */
COSTS_N_INSNS (40), /* DDBR */
- COSTS_N_INSNS (44), /* DDR */
- COSTS_N_INSNS (26), /* DDBR */
- COSTS_N_INSNS (28), /* DER */
+ COSTS_N_INSNS (26), /* DEBR */
COSTS_N_INSNS (176), /* DLGR */
COSTS_N_INSNS (31), /* DLR */
COSTS_N_INSNS (31), /* DR */
COSTS_N_INSNS (4), /* MSGR */
COSTS_N_INSNS (4), /* MSR */
COSTS_N_INSNS (1), /* multiplication in DFmode */
+ COSTS_N_INSNS (28), /* MXBR */
+ COSTS_N_INSNS (130), /* SQXBR */
COSTS_N_INSNS (66), /* SQDBR */
COSTS_N_INSNS (38), /* SQEBR */
COSTS_N_INSNS (1), /* MADBR */
COSTS_N_INSNS (1), /* MAEBR */
+ COSTS_N_INSNS (60), /* DXBR */
COSTS_N_INSNS (40), /* DDBR */
- COSTS_N_INSNS (37), /* DDR */
- COSTS_N_INSNS (26), /* DDBR */
- COSTS_N_INSNS (28), /* DER */
+ COSTS_N_INSNS (26), /* DEBR */
COSTS_N_INSNS (30), /* DLGR */
COSTS_N_INSNS (23), /* DLR */
COSTS_N_INSNS (23), /* DR */
rtx indx;
rtx disp;
bool pointer;
+ bool literal_pool;
};
/* Which cpu are we tuning for. */
HOST_WIDE_INT f4_offset;
HOST_WIDE_INT f8_offset;
HOST_WIDE_INT backchain_offset;
-
+
+ /* Number of first and last gpr where slots in the register
+ save area are reserved for. */
+ int first_save_gpr_slot;
+ int last_save_gpr_slot;
+
/* Number of first and last gpr to be saved, restored. */
int first_save_gpr;
int first_restore_gpr;
/* True if we may need to perform branch splitting. */
bool split_branches_pending_p;
+ /* True during final stage of literal pool processing. */
+ bool decomposed_literal_pool_addresses_ok_p;
+
/* Some local-dynamic TLS symbol name. */
const char *some_ld_name;
#define cfun_frame_layout (cfun->machine->frame_layout)
#define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
-#define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr - \
- cfun_frame_layout.first_save_gpr + 1) * UNITS_PER_WORD)
+#define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
+ cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_WORD)
#define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
(1 << (BITNUM)))
#define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
#define CONST_OK_FOR_On(x) \
CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
+#define REGNO_PAIR_OK(REGNO, MODE) \
+ (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
+
+static enum machine_mode
+s390_libgcc_cmp_return_mode (void)
+{
+ return TARGET_64BIT ? DImode : SImode;
+}
+
+static enum machine_mode
+s390_libgcc_shift_count_mode (void)
+{
+ return TARGET_64BIT ? DImode : SImode;
+}
+
+/* Return true if the back end supports mode MODE. */
+static bool
+s390_scalar_mode_supported_p (enum machine_mode mode)
+{
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ return true;
+ else
+ return default_scalar_mode_supported_p (mode);
+}
+
/* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
void
*op1 = constm1_rtx;
}
-
- /* Remove redundant UNSPEC_CMPINT conversions if possible. */
+ /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
if (GET_CODE (*op0) == UNSPEC
- && XINT (*op0, 1) == UNSPEC_CMPINT
+ && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
&& XVECLEN (*op0, 0) == 1
&& GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
&& GET_CODE (XVECEXP (*op0, 0, 0)) == REG
}
}
+ /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
+ if (GET_CODE (*op0) == UNSPEC
+ && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
+ && XVECLEN (*op0, 0) == 1
+ && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
+ && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
+ && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
+ && *op1 == const0_rtx)
+ {
+ enum rtx_code new_code = UNKNOWN;
+ switch (*code)
+ {
+ case EQ: new_code = EQ; break;
+ case NE: new_code = NE; break;
+ default: break;
+ }
+
+ if (new_code != UNKNOWN)
+ {
+ *op0 = XVECEXP (*op0, 0, 0);
+ *code = new_code;
+ }
+ }
+
/* Simplify cascaded EQ, NE with const0_rtx. */
if ((*code == NE || *code == EQ)
&& (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
return ret;
}
+/* Emit a SImode compare and swap instruction setting MEM to NEW if OLD
+ matches CMP.
+ Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
+ conditional branch testing the result. */
+
+static rtx
+s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem, rtx cmp, rtx new)
+{
+ rtx ret;
+
+ emit_insn (gen_sync_compare_and_swap_ccsi (old, mem, cmp, new));
+ ret = gen_rtx_fmt_ee (code, VOIDmode, s390_compare_emitted, const0_rtx);
+
+ s390_compare_emitted = NULL_RTX;
+
+ return ret;
+}
+
/* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
unconditional jump, else a conditional jump under condition COND. */
| PF_LONG_DISPLACEMENT},
{"z9-109", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
| PF_LONG_DISPLACEMENT | PF_EXTIMM},
+ {"z9-ec", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH
+ | PF_LONG_DISPLACEMENT | PF_EXTIMM | PF_DFP },
};
size_t i;
}
/* Sanity checks. */
- if (TARGET_ZARCH && !(s390_arch_flags & PF_ZARCH))
+ if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
error ("z/Architecture mode not supported on %s", s390_arch_string);
if (TARGET_64BIT && !TARGET_ZARCH)
error ("64-bit ABI not supported in ESA/390 mode");
+ if (TARGET_HARD_DFP && (!TARGET_CPU_DFP || !TARGET_ZARCH))
+ {
+ if (target_flags_explicit & MASK_SOFT_DFP)
+ {
+ if (!TARGET_CPU_DFP)
+ error ("Hardware decimal floating point instructions"
+ " not available on %s", s390_arch_string);
+ if (!TARGET_ZARCH)
+ error ("Hardware decimal floating point instructions"
+ " not available in ESA/390 mode");
+ }
+ else
+ target_flags |= MASK_SOFT_DFP;
+ }
+
+ if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
+ {
+ if ((target_flags_explicit & MASK_SOFT_DFP) && TARGET_HARD_DFP)
+ error ("-mhard-dfp can't be used in conjunction with -msoft-float");
+
+ target_flags |= MASK_SOFT_DFP;
+ }
+
/* Set processor cost function. */
if (s390_tune == PROCESSOR_2094_Z9_109)
s390_cost = &z9_109_cost;
if (s390_stack_size)
{
- if (!s390_stack_guard)
- error ("-mstack-size implies use of -mstack-guard");
- else if (s390_stack_guard >= s390_stack_size)
+ if (s390_stack_guard >= s390_stack_size)
error ("stack size must be greater than the stack guard value");
else if (s390_stack_size > 1 << 16)
error ("stack size must not be greater than 64k");
}
else if (s390_stack_guard)
error ("-mstack-guard implies use of -mstack-size");
+
+#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
+ if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
+ target_flags |= MASK_LONG_DOUBLE_128;
+#endif
}
/* Map for smallest class containing reg regno. */
bool pointer = false;
bool base_ptr = false;
bool indx_ptr = false;
+ bool literal_pool = false;
+
+ /* We may need to substitute the literal pool base register into the address
+ below. However, at this point we do not know which register is going to
+ be used as base, so we substitute the arg pointer register. This is going
+ to be treated as holding a pointer below -- it shouldn't be used for any
+ other purpose. */
+ rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
/* Decompose address into base + index + displacement. */
{
/* Either base or index must be free to hold the base register. */
if (!base)
- base = gen_rtx_REG (Pmode, BASE_REGNUM);
+ base = fake_pool_base, literal_pool = true;
else if (!indx)
- indx = gen_rtx_REG (Pmode, BASE_REGNUM);
+ indx = fake_pool_base, literal_pool = true;
else
return false;
else
return false;
- base = gen_rtx_REG (Pmode, BASE_REGNUM);
+ base = XVECEXP (base, 0, 1);
break;
case UNSPEC_LTREL_BASE:
- base = gen_rtx_REG (Pmode, BASE_REGNUM);
+ if (XVECLEN (base, 0) == 1)
+ base = fake_pool_base, literal_pool = true;
+ else
+ base = XVECEXP (base, 0, 1);
break;
default:
return false;
}
- if (GET_CODE (base) != REG || GET_MODE (base) != Pmode)
+ if (!REG_P (base)
+ || (GET_MODE (base) != SImode
+ && GET_MODE (base) != Pmode))
return false;
- if (REGNO (base) == BASE_REGNUM
- || REGNO (base) == STACK_POINTER_REGNUM
+ if (REGNO (base) == STACK_POINTER_REGNUM
|| REGNO (base) == FRAME_POINTER_REGNUM
|| ((reload_completed || reload_in_progress)
&& frame_pointer_needed
|| (flag_pic
&& REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
pointer = base_ptr = true;
+
+ if ((reload_completed || reload_in_progress)
+ && base == cfun->machine->base_reg)
+ pointer = base_ptr = literal_pool = true;
}
/* Validate index register. */
else
return false;
- indx = gen_rtx_REG (Pmode, BASE_REGNUM);
+ indx = XVECEXP (indx, 0, 1);
break;
case UNSPEC_LTREL_BASE:
- indx = gen_rtx_REG (Pmode, BASE_REGNUM);
+ if (XVECLEN (indx, 0) == 1)
+ indx = fake_pool_base, literal_pool = true;
+ else
+ indx = XVECEXP (indx, 0, 1);
break;
default:
return false;
}
- if (GET_CODE (indx) != REG || GET_MODE (indx) != Pmode)
+ if (!REG_P (indx)
+ || (GET_MODE (indx) != SImode
+ && GET_MODE (indx) != Pmode))
return false;
- if (REGNO (indx) == BASE_REGNUM
- || REGNO (indx) == STACK_POINTER_REGNUM
+ if (REGNO (indx) == STACK_POINTER_REGNUM
|| REGNO (indx) == FRAME_POINTER_REGNUM
|| ((reload_completed || reload_in_progress)
&& frame_pointer_needed
|| (flag_pic
&& REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
pointer = indx_ptr = true;
+
+ if ((reload_completed || reload_in_progress)
+ && indx == cfun->machine->base_reg)
+ pointer = indx_ptr = literal_pool = true;
}
/* Prefer to use pointer as base, not index. */
if (GET_CODE (disp) == UNSPEC
&& (XINT (disp, 1) == UNSPEC_GOT
|| XINT (disp, 1) == UNSPEC_GOTNTPOFF)
- && offset == 0
&& flag_pic == 1)
{
;
}
/* Accept chunkified literal pool symbol references. */
- else if (GET_CODE (disp) == MINUS
+ else if (cfun && cfun->machine
+ && cfun->machine->decomposed_literal_pool_addresses_ok_p
+ && GET_CODE (disp) == MINUS
&& GET_CODE (XEXP (disp, 0)) == LABEL_REF
&& GET_CODE (XEXP (disp, 1)) == LABEL_REF)
{
out->indx = indx;
out->disp = orig_disp;
out->pointer = pointer;
+ out->literal_pool = literal_pool;
}
return true;
/* Decompose a RTL expression OP for a shift count into its components,
and return the base register in BASE and the offset in OFFSET.
- If BITS is non-zero, the expression is used in a context where only
- that number to low-order bits is significant. We then allow OP to
- contain and outer AND that does not affect significant bits. If BITS
- is zero, we allow OP to contain any outer AND with a constant.
-
Return true if OP is a valid shift count, false if not. */
bool
-s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset, int bits)
+s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
{
HOST_WIDE_INT off = 0;
- /* Drop outer ANDs. */
- if (GET_CODE (op) == AND && GET_CODE (XEXP (op, 1)) == CONST_INT)
- {
- HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << bits) - 1;
- if ((INTVAL (XEXP (op, 1)) & mask) != mask)
- return false;
-
- op = XEXP (op, 0);
- }
-
/* We can have an integer constant, an address register,
or a sum of the two. */
if (GET_CODE (op) == CONST_INT)
return true;
}
-/* Return 1 if OP is a valid operand for a C constraint, 0 else. */
+
+/* Evaluates constraint strings described by the regular expression
+ ([A|B](Q|R|S|T))|U|W and returns 1 if OP is a valid operand for the
+ constraint given in STR, or 0 else. */
int
-s390_extra_constraint_str (rtx op, int c, const char * str)
+s390_mem_constraint (const char *str, rtx op)
{
struct s390_address addr;
-
- gcc_assert (c == str[0]);
+ char c = str[0];
/* Check for offsettable variants of memory constraints. */
if (c == 'A')
return 0;
if ((reload_completed || reload_in_progress)
- ? !offsettable_memref_p (op)
- : !offsettable_nonstrict_memref_p (op))
+ ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
return 0;
c = str[1];
return 0;
if (!s390_decompose_address (XEXP (op, 0), &addr))
return 0;
- if (addr.base && REG_P (addr.base) && REGNO (addr.base) == BASE_REGNUM)
- return 0;
- if (addr.indx && REG_P (addr.indx) && REGNO (addr.indx) == BASE_REGNUM)
+ if (addr.literal_pool)
return 0;
c = str[1];
case 'Y':
/* Simply check for the basic form of a shift count. Reload will
take care of making sure we have a proper base register. */
- if (!s390_decompose_shift_count (op, NULL, NULL, 0))
+ if (!s390_decompose_shift_count (op, NULL, NULL))
return 0;
break;
return 1;
}
-/* Return true if VALUE matches the constraint STR. */
+
+
+/* Evaluates constraint strings starting with letter O. Input
+ parameter C is the second letter following the "O" in the constraint
+ string. Returns 1 if VALUE meets the respective constraint and 0
+ otherwise. */
int
-s390_const_double_ok_for_constraint_p (rtx value,
- int c,
- const char * str)
+s390_O_constraint_str (const char c, HOST_WIDE_INT value)
{
- gcc_assert (c == str[0]);
+ if (!TARGET_EXTIMM)
+ return 0;
- switch (str[0])
+ switch (c)
{
- case 'G':
- /* The floating point zero constant. */
- return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
- && value == CONST0_RTX (GET_MODE (value)));
-
+ case 's':
+ return trunc_int_for_mode (value, SImode) == value;
+
+ case 'p':
+ return value == 0
+ || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
+
+ case 'n':
+ return value == -1
+ || s390_single_part (GEN_INT (value), DImode, SImode, -1) == 1;
+
default:
- return 0;
+ gcc_unreachable ();
}
}
-/* Return true if VALUE matches the constraint STR. */
+
+/* Evaluates constraint strings starting with letter N. Parameter STR
+ contains the letters following letter "N" in the constraint string.
+ Returns true if VALUE matches the constraint. */
int
-s390_const_ok_for_constraint_p (HOST_WIDE_INT value,
- int c,
- const char * str)
+s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
{
enum machine_mode mode, part_mode;
int def;
int part, part_goal;
- gcc_assert (c == str[0]);
-
- switch (str[0])
- {
- case 'I':
- return (unsigned int)value < 256;
-
- case 'J':
- return (unsigned int)value < 4096;
-
- case 'K':
- return value >= -32768 && value < 32768;
-
- case 'L':
- return (TARGET_LONG_DISPLACEMENT ?
- (value >= -524288 && value <= 524287)
- : (value >= 0 && value <= 4095));
- case 'M':
- return value == 2147483647;
-
- case 'N':
- if (str[1] == 'x')
- part_goal = -1;
- else
- part_goal = str[1] - '0';
-
- switch (str[2])
- {
- case 'Q': part_mode = QImode; break;
- case 'H': part_mode = HImode; break;
- case 'S': part_mode = SImode; break;
- default: return 0;
- }
-
- switch (str[3])
- {
- case 'H': mode = HImode; break;
- case 'S': mode = SImode; break;
- case 'D': mode = DImode; break;
- default: return 0;
- }
- switch (str[4])
- {
- case '0': def = 0; break;
- case 'F': def = -1; break;
- default: return 0;
- }
-
- if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
- return 0;
-
- part = s390_single_part (GEN_INT (value), mode, part_mode, def);
- if (part < 0)
- return 0;
- if (part_goal != -1 && part_goal != part)
- return 0;
+ if (str[0] == 'x')
+ part_goal = -1;
+ else
+ part_goal = str[0] - '0';
+ switch (str[1])
+ {
+ case 'Q':
+ part_mode = QImode;
break;
-
- case 'O':
- if (!TARGET_EXTIMM)
- return 0;
-
- switch (str[1])
- {
- case 's':
- return trunc_int_for_mode (value, SImode) == value;
-
- case 'p':
- return value == 0
- || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
-
- case 'n':
- return value == -1
- || s390_single_part (GEN_INT (value), DImode, SImode, -1) == 1;
-
- default:
- gcc_unreachable ();
- }
+ case 'H':
+ part_mode = HImode;
break;
+ case 'S':
+ part_mode = SImode;
+ break;
+ default:
+ return 0;
+ }
- case 'P':
- return legitimate_reload_constant_p (GEN_INT (value));
+ switch (str[2])
+ {
+ case 'H':
+ mode = HImode;
+ break;
+ case 'S':
+ mode = SImode;
+ break;
+ case 'D':
+ mode = DImode;
+ break;
+ default:
+ return 0;
+ }
+ switch (str[3])
+ {
+ case '0':
+ def = 0;
+ break;
+ case 'F':
+ def = -1;
+ break;
default:
return 0;
}
+ if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
+ return 0;
+
+ part = s390_single_part (GEN_INT (value), mode, part_mode, def);
+ if (part < 0)
+ return 0;
+ if (part_goal != -1 && part_goal != part)
+ return 0;
+
return 1;
}
+
+/* Returns true if the input parameter VALUE is a float zero. */
+
+int
+s390_float_const_zero_p (rtx value)
+{
+ return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
+ && value == CONST0_RTX (GET_MODE (value)));
+}
+
+
/* Compute a (partial) cost for rtx X. Return true if the complete
cost has been computed, and false if subexpressions should be
scanned. In either case, *TOTAL contains the cost result.
/* Check for multiply and add. */
if ((GET_MODE (x) == DFmode || GET_MODE (x) == SFmode)
&& GET_CODE (XEXP (x, 0)) == MULT
- && TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT && TARGET_FUSED_MADD)
+ && TARGET_HARD_FLOAT && TARGET_FUSED_MADD)
{
/* This is the multiply and add case. */
if (GET_MODE (x) == DFmode)
case DFmode:
*total = s390_cost->mult_df;
break;
+ case TFmode:
+ *total = s390_cost->mxbr;
+ break;
default:
return false;
}
*total = s390_cost->dlr;
else if (GET_MODE (x) == SFmode)
{
- if (TARGET_IEEE_FLOAT)
- *total = s390_cost->debr;
- else /* TARGET_IBM_FLOAT */
- *total = s390_cost->der;
+ *total = s390_cost->debr;
}
else if (GET_MODE (x) == DFmode)
{
- if (TARGET_IEEE_FLOAT)
- *total = s390_cost->ddbr;
- else /* TARGET_IBM_FLOAT */
- *total = s390_cost->ddr;
+ *total = s390_cost->ddbr;
+ }
+ else if (GET_MODE (x) == TFmode)
+ {
+ *total = s390_cost->dxbr;
}
return false;
case SQRT:
if (GET_MODE (x) == SFmode)
*total = s390_cost->sqebr;
- else /* DFmode */
+ else if (GET_MODE (x) == DFmode)
*total = s390_cost->sqdbr;
+ else /* TFmode */
+ *total = s390_cost->sqxbr;
return false;
case SIGN_EXTEND:
return class;
}
-/* Return the register class of a scratch register needed to
- load IN into a register of class CLASS in MODE.
+/* Inform reload about cases where moving X with a mode MODE to a register in
+ CLASS requires an extra scratch or immediate register. Return the class
+ needed for the immediate register. */
- We need a temporary when loading a PLUS expression which
- is not a legitimate operand of the LOAD ADDRESS instruction. */
-
-enum reg_class
-s390_secondary_input_reload_class (enum reg_class class,
- enum machine_mode mode, rtx in)
+static enum reg_class
+s390_secondary_reload (bool in_p, rtx x, enum reg_class class,
+ enum machine_mode mode, secondary_reload_info *sri)
{
- if (s390_plus_operand (in, mode))
- return ADDR_REGS;
-
+ /* Intermediate register needed. */
if (reg_classes_intersect_p (CC_REGS, class))
return GENERAL_REGS;
- return NO_REGS;
-}
-
-/* Return the register class of a scratch register needed to
- store a register of class CLASS in MODE into OUT:
-
- We need a temporary when storing a double-word to a
- non-offsettable memory address. */
-
-enum reg_class
-s390_secondary_output_reload_class (enum reg_class class,
- enum machine_mode mode, rtx out)
-{
- if ((TARGET_64BIT ? mode == TImode
- : (mode == DImode || mode == DFmode))
- && reg_classes_intersect_p (GENERAL_REGS, class)
- && GET_CODE (out) == MEM
- && GET_CODE (XEXP (out, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (out, 0), 0)) == PLUS
- && GET_CODE (XEXP (XEXP (out, 0), 1)) == CONST_INT
- && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (out, 0), 1))
+ /* We need a scratch register when loading a PLUS expression which
+ is not a legitimate operand of the LOAD ADDRESS instruction. */
+ if (in_p && s390_plus_operand (x, mode))
+ sri->icode = (TARGET_64BIT ?
+ CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
+
+ /* Performing a multiword move from or to memory we have to make sure the
+ second chunk in memory is addressable without causing a displacement
+ overflow. If that would be the case we calculate the address in
+ a scratch register. */
+ if (MEM_P (x)
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
+ GET_MODE_SIZE (mode) - 1))
- return ADDR_REGS;
-
- if (reg_classes_intersect_p (CC_REGS, class))
- return GENERAL_REGS;
+ {
+ /* For GENERAL_REGS a displacement overflow is no problem if occurring
+ in a s_operand address since we may fallback to lm/stm. So we only
+ have to care about overflows in the b+i+d case. */
+ if ((reg_classes_intersect_p (GENERAL_REGS, class)
+ && s390_class_max_nregs (GENERAL_REGS, mode) > 1
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
+ /* For FP_REGS no lm/stm is available so this check is triggered
+ for displacement overflows in b+i+d and b+d like addresses. */
+ || (reg_classes_intersect_p (FP_REGS, class)
+ && s390_class_max_nregs (FP_REGS, mode) > 1))
+ {
+ if (in_p)
+ sri->icode = (TARGET_64BIT ?
+ CODE_FOR_reloaddi_nonoffmem_in :
+ CODE_FOR_reloadsi_nonoffmem_in);
+ else
+ sri->icode = (TARGET_64BIT ?
+ CODE_FOR_reloaddi_nonoffmem_out :
+ CODE_FOR_reloadsi_nonoffmem_out);
+ }
+ }
+ /* Either scratch or no register needed. */
return NO_REGS;
}
/* If the address is already strictly valid, there's nothing to do. */
if (!s390_decompose_address (src, &ad)
- || (ad.base && !REG_OK_FOR_BASE_STRICT_P (ad.base))
- || (ad.indx && !REG_OK_FOR_INDEX_STRICT_P (ad.indx)))
+ || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
+ || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
{
/* Otherwise, one of the operands cannot be an address register;
we reload its value into the scratch register. */
if (strict)
{
- if (ad.base && !REG_OK_FOR_BASE_STRICT_P (ad.base))
+ if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
return false;
- if (ad.indx && !REG_OK_FOR_INDEX_STRICT_P (ad.indx))
+
+ if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
return false;
}
else
{
- if (ad.base && !REG_OK_FOR_BASE_NONSTRICT_P (ad.base))
- return false;
- if (ad.indx && !REG_OK_FOR_INDEX_NONSTRICT_P (ad.indx))
+ if (ad.base
+ && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
return false;
+
+ if (ad.indx
+ && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
+ return false;
}
-
return true;
}
if (!s390_decompose_address (op1, &addr))
return false;
- if (addr.base && !REG_OK_FOR_BASE_STRICT_P (addr.base))
+ if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
return false;
- if (addr.indx && !REG_OK_FOR_INDEX_STRICT_P (addr.indx))
+ if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
return false;
if (!TARGET_64BIT && !addr.pointer)
rtx new = orig;
rtx base;
+ gcc_assert (!TLS_SYMBOLIC_CONST (addr));
+
if (GET_CODE (addr) == LABEL_REF
|| (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
{
rtx temp = reg? reg : gen_reg_rtx (Pmode);
if (reload_in_progress || reload_completed)
- regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
+ df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
addr = gen_rtx_CONST (Pmode, addr);
in both 31- and 64-bit code (@GOT). */
if (reload_in_progress || reload_completed)
- regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
+ df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
new = gen_rtx_CONST (Pmode, new);
rtx temp = gen_reg_rtx (Pmode);
if (reload_in_progress || reload_completed)
- regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
+ df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
addr = gen_rtx_CONST (Pmode, addr);
rtx temp = reg? reg : gen_reg_rtx (Pmode);
if (reload_in_progress || reload_completed)
- regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
+ df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
addr = XVECEXP (addr, 0, 0);
addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
if (GET_CODE (addr) == PLUS)
{
rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
+
+ gcc_assert (!TLS_SYMBOLIC_CONST (op0));
+ gcc_assert (!TLS_SYMBOLIC_CONST (op1));
+
/* Check first to see if this is a constant offset
from a local symbol reference. */
if ((GET_CODE (op0) == LABEL_REF
|| (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
&& GET_CODE (op1) == CONST_INT)
{
- if (TARGET_CPU_ZARCH && larl_operand (op0, VOIDmode))
+ if (TARGET_CPU_ZARCH
+ && larl_operand (op0, VOIDmode)
+ && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
+ && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
{
if (INTVAL (op1) & 1)
{
if (!DISP_IN_RANGE (INTVAL (op1)))
{
- int even = INTVAL (op1) - 1;
+ HOST_WIDE_INT even = INTVAL (op1) - 1;
op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
op0 = gen_rtx_CONST (Pmode, op0);
op1 = const1_rtx;
rtx temp = reg? reg : gen_reg_rtx (Pmode);
if (reload_in_progress || reload_completed)
- regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
+ df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
UNSPEC_GOTOFF);
in both 31- and 64-bit code. */
if (reload_in_progress || reload_completed)
- regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
+ df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
new = gen_rtx_CONST (Pmode, new);
from the literal pool. */
if (reload_in_progress || reload_completed)
- regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
+ df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
new = gen_rtx_CONST (Pmode, new);
void
emit_symbolic_move (rtx *operands)
{
- rtx temp = no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode);
+ rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
if (GET_CODE (operands[0]) == MEM)
operands[1] = force_reg (Pmode, operands[1]);
if (legitimate_address_p (mode, x, FALSE))
return x;
}
+ else if (GET_CODE (x) == PLUS
+ && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
+ || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
+ {
+ return x;
+ }
else if (flag_pic)
{
if (SYMBOLIC_CONST (x)
if (temp != count)
emit_move_insn (count, temp);
- temp = expand_binop (mode, ashr_optab, count, GEN_INT (8), blocks, 1, 0);
+ temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1, 0);
if (temp != blocks)
emit_move_insn (blocks, temp);
void
s390_expand_setmem (rtx dst, rtx len, rtx val)
{
- gcc_assert (GET_CODE (len) != CONST_INT || INTVAL (len) > 0);
+ if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
+ return;
+
gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
- if (GET_CODE (len) == CONST_INT && INTVAL (len) <= 257)
+ if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
{
if (val == const0_rtx && INTVAL (len) <= 256)
emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
if (temp != count)
emit_move_insn (count, temp);
- temp = expand_binop (mode, ashr_optab, count, GEN_INT (8), blocks, 1, 0);
+ temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1, 0);
if (temp != blocks)
emit_move_insn (blocks, temp);
if (temp != count)
emit_move_insn (count, temp);
- temp = expand_binop (mode, ashr_optab, count, GEN_INT (8), blocks, 1, 0);
+ temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1, 0);
if (temp != blocks)
emit_move_insn (blocks, temp);
int bitsize = INTVAL (op1);
int bitpos = INTVAL (op2);
- /* We need byte alignement. */
+ /* We need byte alignment. */
if (bitsize % BITS_PER_UNIT)
return false;
return false;
}
+/* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
+ register that holds VAL of mode MODE shifted by COUNT bits. */
+
+static inline rtx
+s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
+{
+ val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
+ NULL_RTX, 1, OPTAB_DIRECT);
+ return expand_simple_binop (SImode, ASHIFT, val, count,
+ NULL_RTX, 1, OPTAB_DIRECT);
+}
+
+/* Structure to hold the initial parameters for a compare_and_swap operation
+ in HImode and QImode. */
+
+struct alignment_context
+{
+ rtx memsi; /* SI aligned memory location. */
+ rtx shift; /* Bit offset with regard to lsb. */
+ rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
+ rtx modemaski; /* ~modemask */
+ bool aligned; /* True if memory is aligned, false else. */
+};
+
+/* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
+ structure AC for transparent simplifying, if the memory alignment is known
+ to be at least 32bit. MEM is the memory location for the actual operation
+ and MODE its mode. */
+
+static void
+init_alignment_context (struct alignment_context *ac, rtx mem,
+ enum machine_mode mode)
+{
+ ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
+ ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
+
+ if (ac->aligned)
+ ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
+ else
+ {
+ /* Alignment is unknown. */
+ rtx byteoffset, addr, align;
+
+ /* Force the address into a register. */
+ addr = force_reg (Pmode, XEXP (mem, 0));
+
+ /* Align it to SImode. */
+ align = expand_simple_binop (Pmode, AND, addr,
+ GEN_INT (-GET_MODE_SIZE (SImode)),
+ NULL_RTX, 1, OPTAB_DIRECT);
+ /* Generate MEM. */
+ ac->memsi = gen_rtx_MEM (SImode, align);
+ MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
+ set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
+ set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
+
+ /* Calculate shiftcount. */
+ byteoffset = expand_simple_binop (Pmode, AND, addr,
+ GEN_INT (GET_MODE_SIZE (SImode) - 1),
+ NULL_RTX, 1, OPTAB_DIRECT);
+ /* As we already have some offset, evaluate the remaining distance. */
+ ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
+ NULL_RTX, 1, OPTAB_DIRECT);
+
+ }
+ /* Shift is the byte count, but we need the bitcount. */
+ ac->shift = expand_simple_binop (SImode, MULT, ac->shift, GEN_INT (BITS_PER_UNIT),
+ NULL_RTX, 1, OPTAB_DIRECT);
+ /* Calculate masks. */
+ ac->modemask = expand_simple_binop (SImode, ASHIFT,
+ GEN_INT (GET_MODE_MASK (mode)), ac->shift,
+ NULL_RTX, 1, OPTAB_DIRECT);
+ ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
+}
+
+/* Expand an atomic compare and swap operation for HImode and QImode. MEM is
+ the memory location, CMP the old value to compare MEM with and NEW the value
+ to set if CMP == MEM.
+ CMP is never in memory for compare_and_swap_cc because
+ expand_bool_compare_and_swap puts it into a register for later compare. */
+
+void
+s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx new)
+{
+ struct alignment_context ac;
+ rtx cmpv, newv, val, resv, cc;
+ rtx res = gen_reg_rtx (SImode);
+ rtx csloop = gen_label_rtx ();
+ rtx csend = gen_label_rtx ();
+
+ gcc_assert (register_operand (target, VOIDmode));
+ gcc_assert (MEM_P (mem));
+
+ init_alignment_context (&ac, mem, mode);
+
+ /* Shift the values to the correct bit positions. */
+ if (!(ac.aligned && MEM_P (cmp)))
+ cmp = s390_expand_mask_and_shift (cmp, mode, ac.shift);
+ if (!(ac.aligned && MEM_P (new)))
+ new = s390_expand_mask_and_shift (new, mode, ac.shift);
+
+ /* Load full word. Subsequent loads are performed by CS. */
+ val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
+ NULL_RTX, 1, OPTAB_DIRECT);
+
+ /* Start CS loop. */
+ emit_label (csloop);
+ /* val = "<mem>00..0<mem>"
+ * cmp = "00..0<cmp>00..0"
+ * new = "00..0<new>00..0"
+ */
+
+ /* Patch cmp and new with val at correct position. */
+ if (ac.aligned && MEM_P (cmp))
+ {
+ cmpv = force_reg (SImode, val);
+ store_bit_field (cmpv, GET_MODE_BITSIZE (mode), 0, SImode, cmp);
+ }
+ else
+ cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
+ NULL_RTX, 1, OPTAB_DIRECT));
+ if (ac.aligned && MEM_P (new))
+ {
+ newv = force_reg (SImode, val);
+ store_bit_field (newv, GET_MODE_BITSIZE (mode), 0, SImode, new);
+ }
+ else
+ newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new, val,
+ NULL_RTX, 1, OPTAB_DIRECT));
+
+ /* Jump to end if we're done (likely?). */
+ s390_emit_jump (csend, s390_emit_compare_and_swap (EQ, res, ac.memsi,
+ cmpv, newv));
+
+ /* Check for changes outside mode. */
+ resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
+ NULL_RTX, 1, OPTAB_DIRECT);
+ cc = s390_emit_compare (NE, resv, val);
+ emit_move_insn (val, resv);
+ /* Loop internal if so. */
+ s390_emit_jump (csloop, cc);
+
+ emit_label (csend);
+
+ /* Return the correct part of the bitfield. */
+ convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
+ NULL_RTX, 1, OPTAB_DIRECT), 1);
+}
+
+/* Expand an atomic operation CODE of mode MODE. MEM is the memory location
+ and VAL the value to play with. If AFTER is true then store the value
+ MEM holds after the operation, if AFTER is false then store the value MEM
+ holds before the operation. If TARGET is zero then discard that value, else
+ store it to TARGET. */
+
+void
+s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
+ rtx target, rtx mem, rtx val, bool after)
+{
+ struct alignment_context ac;
+ rtx cmp;
+ rtx new = gen_reg_rtx (SImode);
+ rtx orig = gen_reg_rtx (SImode);
+ rtx csloop = gen_label_rtx ();
+
+ gcc_assert (!target || register_operand (target, VOIDmode));
+ gcc_assert (MEM_P (mem));
+
+ init_alignment_context (&ac, mem, mode);
+
+ /* Shift val to the correct bit positions.
+ Preserve "icm", but prevent "ex icm". */
+ if (!(ac.aligned && code == SET && MEM_P (val)))
+ val = s390_expand_mask_and_shift (val, mode, ac.shift);
+
+ /* Further preparation insns. */
+ if (code == PLUS || code == MINUS)
+ emit_move_insn (orig, val);
+ else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
+ val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
+ NULL_RTX, 1, OPTAB_DIRECT);
+
+ /* Load full word. Subsequent loads are performed by CS. */
+ cmp = force_reg (SImode, ac.memsi);
+
+ /* Start CS loop. */
+ emit_label (csloop);
+ emit_move_insn (new, cmp);
+
+ /* Patch new with val at correct position. */
+ switch (code)
+ {
+ case PLUS:
+ case MINUS:
+ val = expand_simple_binop (SImode, code, new, orig,
+ NULL_RTX, 1, OPTAB_DIRECT);
+ val = expand_simple_binop (SImode, AND, val, ac.modemask,
+ NULL_RTX, 1, OPTAB_DIRECT);
+ /* FALLTHRU */
+ case SET:
+ if (ac.aligned && MEM_P (val))
+ store_bit_field (new, GET_MODE_BITSIZE (mode), 0, SImode, val);
+ else
+ {
+ new = expand_simple_binop (SImode, AND, new, ac.modemaski,
+ NULL_RTX, 1, OPTAB_DIRECT);
+ new = expand_simple_binop (SImode, IOR, new, val,
+ NULL_RTX, 1, OPTAB_DIRECT);
+ }
+ break;
+ case AND:
+ case IOR:
+ case XOR:
+ new = expand_simple_binop (SImode, code, new, val,
+ NULL_RTX, 1, OPTAB_DIRECT);
+ break;
+ case MULT: /* NAND */
+ new = expand_simple_binop (SImode, XOR, new, ac.modemask,
+ NULL_RTX, 1, OPTAB_DIRECT);
+ new = expand_simple_binop (SImode, AND, new, val,
+ NULL_RTX, 1, OPTAB_DIRECT);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
+ ac.memsi, cmp, new));
+
+ /* Return the correct part of the bitfield. */
+ if (target)
+ convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
+ after ? new : cmp, ac.shift,
+ NULL_RTX, 1, OPTAB_DIRECT), 1);
+}
+
/* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
We need to emit DTP-relative relocations. */
fputs ("@DTPOFF", file);
}
+#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
+/* Implement TARGET_MANGLE_TYPE. */
+
+static const char *
+s390_mangle_type (tree type)
+{
+ if (TYPE_MAIN_VARIANT (type) == long_double_type_node
+ && TARGET_LONG_DOUBLE_128)
+ return "g";
+
+ /* For all other types, use normal C++ mangling. */
+ return NULL;
+}
+#endif
+
/* In the name of slightly smaller debug output, and to cater to
general assembler lossage, recognize various UNSPEC sequences
and turn them back into a direct symbol reference. */
rtx base;
/* Extract base register and offset. */
- if (!s390_decompose_shift_count (op, &base, &offset, 0))
+ if (!s390_decompose_shift_count (op, &base, &offset))
gcc_unreachable ();
/* Sanity check. */
struct s390_address ad;
if (!s390_decompose_address (addr, &ad)
- || (ad.base && !REG_OK_FOR_BASE_STRICT_P (ad.base))
- || (ad.indx && !REG_OK_FOR_INDEX_STRICT_P (ad.indx)))
+ || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
+ || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
output_operand_lossage ("cannot decompose address");
if (ad.disp)
gcc_assert (GET_CODE (x) == MEM);
ret = s390_decompose_address (XEXP (x, 0), &ad);
gcc_assert (ret);
- gcc_assert (!ad.base || REG_OK_FOR_BASE_STRICT_P (ad.base));
+ gcc_assert (!ad.base || REGNO_OK_FOR_BASE_P (REGNO (ad.base)));
gcc_assert (!ad.indx);
if (ad.disp)
gcc_assert (GET_CODE (x) == MEM);
ret = s390_decompose_address (XEXP (x, 0), &ad);
gcc_assert (ret);
- gcc_assert (!ad.base || REG_OK_FOR_BASE_STRICT_P (ad.base));
+ gcc_assert (!ad.base || REGNO_OK_FOR_BASE_P (REGNO (ad.base)));
gcc_assert (!ad.indx);
if (ad.base)
gcc_assert (GET_CODE (x) == MEM);
ret = s390_decompose_address (XEXP (x, 0), &ad);
gcc_assert (ret);
- gcc_assert (!ad.base || REG_OK_FOR_BASE_STRICT_P (ad.base));
+ gcc_assert (!ad.base || REGNO_OK_FOR_BASE_P (REGNO (ad.base)));
gcc_assert (!ad.indx);
if (ad.disp)
/* We keep a list of constants which we have to add to internal
constant tables in the middle of large functions. */
-#define NR_C_MODES 7
+#define NR_C_MODES 11
enum machine_mode constant_modes[NR_C_MODES] =
{
- TImode,
- DFmode, DImode,
- SFmode, SImode,
+ TFmode, TImode, TDmode,
+ DFmode, DImode, DDmode,
+ SFmode, SImode, SDmode,
HImode,
QImode
};
switch (GET_MODE_CLASS (mode))
{
case MODE_FLOAT:
+ case MODE_DECIMAL_FLOAT:
gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
{
int i;
for (i = 0; i < 6; i++)
- if (!regs_ever_live[i])
+ if (!df_regs_ever_live_p (i))
return i;
return 0;
}
deal with this automatically. */
if (current_function_calls_eh_return || cfun->machine->has_landing_pad_p)
for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
- regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
+ if (current_function_calls_eh_return
+ || (cfun->machine->has_landing_pad_p
+ && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
+ regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
/* For nonlocal gotos all call-saved registers have to be saved.
This flag is also set for the unwinding code in libgcc.
cfun_frame_layout.high_fprs = 0;
if (TARGET_64BIT)
for (i = 24; i < 32; i++)
- if (regs_ever_live[i] && !global_regs[i])
+ if (df_regs_ever_live_p (i) && !global_regs[i])
{
cfun_set_fpr_bit (i - 16);
cfun_frame_layout.high_fprs++;
if (flag_pic)
clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
- |= regs_ever_live[PIC_OFFSET_TABLE_REGNUM];
+ |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
clobbered_regs[BASE_REGNUM]
|= (cfun->machine->base_reg
|| current_function_stdarg);
for (i = 6; i < 16; i++)
- if (clobbered_regs[i])
+ if (df_regs_ever_live_p (i) || clobbered_regs[i])
break;
for (j = 15; j > i; j--)
- if (clobbered_regs[j])
+ if (df_regs_ever_live_p (j) || clobbered_regs[j])
break;
if (i == 16)
{
/* Nothing to save/restore. */
+ cfun_frame_layout.first_save_gpr_slot = -1;
+ cfun_frame_layout.last_save_gpr_slot = -1;
cfun_frame_layout.first_save_gpr = -1;
cfun_frame_layout.first_restore_gpr = -1;
cfun_frame_layout.last_save_gpr = -1;
}
else
{
- /* Save / Restore from gpr i to j. */
- cfun_frame_layout.first_save_gpr = i;
- cfun_frame_layout.first_restore_gpr = i;
- cfun_frame_layout.last_save_gpr = j;
- cfun_frame_layout.last_restore_gpr = j;
+ /* Save slots for gprs from i to j. */
+ cfun_frame_layout.first_save_gpr_slot = i;
+ cfun_frame_layout.last_save_gpr_slot = j;
+
+ for (i = cfun_frame_layout.first_save_gpr_slot;
+ i < cfun_frame_layout.last_save_gpr_slot + 1;
+ i++)
+ if (clobbered_regs[i])
+ break;
+
+ for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
+ if (clobbered_regs[j])
+ break;
+
+ if (i == cfun_frame_layout.last_save_gpr_slot + 1)
+ {
+ /* Nothing to save/restore. */
+ cfun_frame_layout.first_save_gpr = -1;
+ cfun_frame_layout.first_restore_gpr = -1;
+ cfun_frame_layout.last_save_gpr = -1;
+ cfun_frame_layout.last_restore_gpr = -1;
+ }
+ else
+ {
+ /* Save / Restore from gpr i to j. */
+ cfun_frame_layout.first_save_gpr = i;
+ cfun_frame_layout.first_restore_gpr = i;
+ cfun_frame_layout.last_save_gpr = j;
+ cfun_frame_layout.last_restore_gpr = j;
+ }
}
if (current_function_stdarg)
if (cfun_frame_layout.first_save_gpr == -1
|| cfun_frame_layout.first_save_gpr > 2 + min_gpr)
- cfun_frame_layout.first_save_gpr = 2 + min_gpr;
+ {
+ cfun_frame_layout.first_save_gpr = 2 + min_gpr;
+ cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
+ }
if (cfun_frame_layout.last_save_gpr == -1
|| cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
- cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
+ {
+ cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
+ cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
+ }
}
/* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
if (!TARGET_64BIT)
for (i = 2; i < 4; i++)
- if (regs_ever_live[i + 16] && !global_regs[i + 16])
+ if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
cfun_set_fpr_bit (i);
}
cfun_frame_layout.f0_offset = 16 * UNITS_PER_WORD;
cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
- cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr
+ cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
* UNITS_PER_WORD);
}
else if (TARGET_BACKCHAIN) /* kernel stack layout */
- UNITS_PER_WORD);
cfun_frame_layout.gprs_offset
= (cfun_frame_layout.backchain_offset
- - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr + 1)
+ - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
* UNITS_PER_WORD);
if (TARGET_64BIT)
/* Try to predict whether we'll need the base register. */
base_used = cfun->machine->split_branches_pending_p
|| current_function_uses_const_pool
- || (!DISP_IN_RANGE (-frame_size)
- && !CONST_OK_FOR_K (-frame_size));
+ || (!DISP_IN_RANGE (frame_size)
+ && !CONST_OK_FOR_K (frame_size));
/* Decide which register to use as literal pool base. In small
leaf functions, try to use an unused call-clobbered register
as base register to avoid save/restore overhead. */
if (!base_used)
cfun->machine->base_reg = NULL_RTX;
- else if (current_function_is_leaf && !regs_ever_live[5])
+ else if (current_function_is_leaf && !df_regs_ever_live_p (5))
cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
else
cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
s390_register_info (clobbered_regs);
- regs_ever_live[BASE_REGNUM] = clobbered_regs[BASE_REGNUM];
- regs_ever_live[RETURN_REGNUM] = clobbered_regs[RETURN_REGNUM];
- regs_ever_live[STACK_POINTER_REGNUM] = clobbered_regs[STACK_POINTER_REGNUM];
+ df_set_regs_ever_live (BASE_REGNUM,
+ clobbered_regs[BASE_REGNUM] ? true : false);
+ df_set_regs_ever_live (RETURN_REGNUM,
+ clobbered_regs[RETURN_REGNUM] ? true : false);
+ df_set_regs_ever_live (STACK_POINTER_REGNUM,
+ clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
if (cfun->machine->base_reg)
- regs_ever_live[REGNO (cfun->machine->base_reg)] = 1;
+ df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
+}
+
+/* Return true if it is legal to put a value with MODE into REGNO. */
+
+bool
+s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
+{
+ switch (REGNO_REG_CLASS (regno))
+ {
+ case FP_REGS:
+ if (REGNO_PAIR_OK (regno, mode))
+ {
+ if (mode == SImode || mode == DImode)
+ return true;
+
+ if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
+ return true;
+ }
+ break;
+ case ADDR_REGS:
+ if (FRAME_REGNO_P (regno) && mode == Pmode)
+ return true;
+
+ /* fallthrough */
+ case GENERAL_REGS:
+ if (REGNO_PAIR_OK (regno, mode))
+ {
+ if (TARGET_64BIT
+ || (mode != TFmode && mode != TCmode && mode != TDmode))
+ return true;
+ }
+ break;
+ case CC_REGS:
+ if (GET_MODE_CLASS (mode) == MODE_CC)
+ return true;
+ break;
+ case ACCESS_REGS:
+ if (REGNO_PAIR_OK (regno, mode))
+ {
+ if (mode == SImode || mode == Pmode)
+ return true;
+ }
+ break;
+ default:
+ return false;
+ }
+
+ return false;
}
/* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
return true;
}
+/* Maximum number of registers to represent a value of mode MODE
+ in a register of class CLASS. */
+
+bool
+s390_class_max_nregs (enum reg_class class, enum machine_mode mode)
+{
+ switch (class)
+ {
+ case FP_REGS:
+ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
+ else
+ return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
+ case ACCESS_REGS:
+ return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
+ default:
+ break;
+ }
+ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+}
+
/* Return true if register FROM can be eliminated via register TO. */
bool
case RETURN_ADDRESS_POINTER_REGNUM:
s390_init_frame_layout ();
- index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr;
+ index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
gcc_assert (index >= 0);
offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
offset += index * UNITS_PER_WORD;
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
if (INSN_P (insn))
- annotate_constant_pool_refs (&PATTERN (insn));
+ {
+ annotate_constant_pool_refs (&PATTERN (insn));
+ df_insn_rescan (insn);
+ }
pop_topmost_sequence ();
if (cfun_frame_layout.first_save_gpr != -1)
{
insn = save_gprs (stack_pointer_rtx,
- cfun_frame_layout.gprs_offset,
+ cfun_frame_layout.gprs_offset +
+ UNITS_PER_WORD * (cfun_frame_layout.first_save_gpr
+ - cfun_frame_layout.first_save_gpr_slot),
cfun_frame_layout.first_save_gpr,
cfun_frame_layout.last_save_gpr);
emit_insn (insn);
if (s390_stack_size)
{
- HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
- & ~(s390_stack_guard - 1));
- rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
- GEN_INT (stack_check_mask));
+ HOST_WIDE_INT stack_guard;
- if (TARGET_64BIT)
- gen_cmpdi (t, const0_rtx);
+ if (s390_stack_guard)
+ stack_guard = s390_stack_guard;
else
- gen_cmpsi (t, const0_rtx);
+ {
+ /* If no value for stack guard is provided the smallest power of 2
+ larger than the current frame size is chosen. */
+ stack_guard = 1;
+ while (stack_guard < cfun_frame_layout.frame_size)
+ stack_guard <<= 1;
+ }
- emit_insn (gen_conditional_trap (gen_rtx_EQ (CCmode,
- gen_rtx_REG (CCmode,
- CC_REGNUM),
- const0_rtx),
- const0_rtx));
+ if (cfun_frame_layout.frame_size >= s390_stack_size)
+ {
+ warning (0, "frame size of function %qs is "
+ HOST_WIDE_INT_PRINT_DEC
+ " bytes exceeding user provided stack limit of "
+ HOST_WIDE_INT_PRINT_DEC " bytes. "
+ "An unconditional trap is added.",
+ current_function_name(), cfun_frame_layout.frame_size,
+ s390_stack_size);
+ emit_insn (gen_trap ());
+ }
+ else
+ {
+ HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
+ & ~(stack_guard - 1));
+ rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
+ GEN_INT (stack_check_mask));
+ if (TARGET_64BIT)
+ gen_cmpdi (t, const0_rtx);
+ else
+ gen_cmpsi (t, const0_rtx);
+
+ emit_insn (gen_conditional_trap (gen_rtx_EQ (CCmode,
+ gen_rtx_REG (CCmode,
+ CC_REGNUM),
+ const0_rtx),
+ const0_rtx));
+ }
}
if (s390_warn_framesize > 0
/* Set up got pointer, if needed. */
- if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
+ if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
{
rtx insns = s390_load_got ();
for (insn = insns; insn; insn = NEXT_INSN (insn))
- {
- annotate_constant_pool_refs (&PATTERN (insn));
-
- REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, NULL_RTX,
- REG_NOTES (insn));
- }
+ annotate_constant_pool_refs (&PATTERN (insn));
emit_insn (insns);
}
{
addr = plus_constant (frame_pointer,
offset + cfun_frame_layout.gprs_offset
- + (i - cfun_frame_layout.first_save_gpr)
+ + (i - cfun_frame_layout.first_save_gpr_slot)
* UNITS_PER_WORD);
addr = gen_rtx_MEM (Pmode, addr);
set_mem_alias_set (addr, get_frame_alias_set ());
addr = plus_constant (frame_pointer,
offset + cfun_frame_layout.gprs_offset
+ (RETURN_REGNUM
- - cfun_frame_layout.first_save_gpr)
+ - cfun_frame_layout.first_save_gpr_slot)
* UNITS_PER_WORD);
addr = gen_rtx_MEM (Pmode, addr);
set_mem_alias_set (addr, get_frame_alias_set ());
insn = restore_gprs (frame_pointer,
offset + cfun_frame_layout.gprs_offset
+ (cfun_frame_layout.first_restore_gpr
- - cfun_frame_layout.first_save_gpr)
+ - cfun_frame_layout.first_save_gpr_slot)
* UNITS_PER_WORD,
cfun_frame_layout.first_restore_gpr,
cfun_frame_layout.last_restore_gpr);
/* No type info available for some library calls ... */
if (!type)
- return mode == SFmode || mode == DFmode;
+ return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
/* The ABI says that record types with a single member are treated
just like that member would be. */
/* No type info available for some library calls ... */
if (!type)
return GET_MODE_CLASS (mode) == MODE_INT
- || (TARGET_SOFT_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT);
+ || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
/* We accept small integral (and similar) types. */
if (INTEGRAL_TYPE_P (type)
mode = promote_mode (type, TYPE_MODE (type), &unsignedp, 1);
}
- gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
- || GET_MODE_CLASS (mode) == MODE_FLOAT);
+ gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
gcc_assert (GET_MODE_SIZE (mode) <= 8);
- if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
return gen_rtx_REG (mode, 16);
else
return gen_rtx_REG (mode, 2);
f_sav = TREE_CHAIN (f_ovf);
valist = build_va_arg_indirect_ref (valist);
- gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
- fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
- ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
- sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
+ gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
+ fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
+ ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
+ sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
/* Count number of gp and fp argument registers used. */
if (cfun->va_list_gpr_size)
{
- t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
- build_int_cst (NULL_TREE, n_gpr));
+ t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gpr), gpr,
+ build_int_cst (NULL_TREE, n_gpr));
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
if (cfun->va_list_fpr_size)
{
- t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
- build_int_cst (NULL_TREE, n_fpr));
+ t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (fpr), fpr,
+ build_int_cst (NULL_TREE, n_fpr));
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
(int)n_gpr, (int)n_fpr, off);
- t = build (PLUS_EXPR, TREE_TYPE (ovf), t, build_int_cst (NULL_TREE, off));
+ t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), t, size_int (off));
- t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
+ t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovf), ovf, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
|| (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
{
t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
- t = build (PLUS_EXPR, TREE_TYPE (sav), t,
- build_int_cst (NULL_TREE, -RETURN_REGNUM * UNITS_PER_WORD));
+ t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
+ size_int (-RETURN_REGNUM * UNITS_PER_WORD));
- t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
+ t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (sav), sav, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
f_sav = TREE_CHAIN (f_ovf);
valist = build_va_arg_indirect_ref (valist);
- gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
- fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
- ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
- sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
+ gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
+ fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
+ ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
+ sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
size = int_size_in_bytes (type);
t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
gimplify_and_add (t, pre_p);
- t = build2 (PLUS_EXPR, ptr_type_node, sav,
- fold_convert (ptr_type_node, size_int (sav_ofs)));
+ t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav,
+ size_int (sav_ofs));
u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
- t = build2 (PLUS_EXPR, ptr_type_node, t, fold_convert (ptr_type_node, u));
+ t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, fold_convert (sizetype, u));
- t = build2 (MODIFY_EXPR, void_type_node, addr, t);
+ t = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
gimplify_and_add (t, pre_p);
t = build1 (GOTO_EXPR, void_type_node, lab_over);
t = ovf;
if (size < UNITS_PER_WORD)
- t = build2 (PLUS_EXPR, ptr_type_node, t,
- fold_convert (ptr_type_node, size_int (UNITS_PER_WORD - size)));
+ t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
+ size_int (UNITS_PER_WORD - size));
gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
- u = build2 (MODIFY_EXPR, void_type_node, addr, t);
+ u = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
gimplify_and_add (u, pre_p);
- t = build2 (PLUS_EXPR, ptr_type_node, t,
- fold_convert (ptr_type_node, size_int (size)));
- t = build2 (MODIFY_EXPR, ptr_type_node, ovf, t);
+ t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
+ size_int (size));
+ t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, ovf, t);
gimplify_and_add (t, pre_p);
t = build1 (LABEL_EXPR, void_type_node, lab_over);
tree ftype;
ftype = build_function_type (ptr_type_node, void_list_node);
- lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
- S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
- NULL, NULL_TREE);
+ add_builtin_function ("__builtin_thread_pointer", ftype,
+ S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
+ NULL, NULL_TREE);
ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
- lang_hooks.builtin_function ("__builtin_set_thread_pointer", ftype,
- S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
- NULL, NULL_TREE);
+ add_builtin_function ("__builtin_set_thread_pointer", ftype,
+ S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
+ NULL, NULL_TREE);
}
/* Expand an expression EXP that calls a built-in function,
unsigned int const *code_for_builtin =
TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
- tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
- tree arglist = TREE_OPERAND (exp, 1);
enum insn_code icode;
rtx op[MAX_ARGS], pat;
int arity;
bool nonvoid;
+ tree arg;
+ call_expr_arg_iterator iter;
if (fcode >= S390_BUILTIN_max)
internal_error ("bad builtin fcode");
nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
- for (arglist = TREE_OPERAND (exp, 1), arity = 0;
- arglist;
- arglist = TREE_CHAIN (arglist), arity++)
+ arity = 0;
+ FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
{
const struct insn_operand_data *insn_op;
- tree arg = TREE_VALUE (arglist);
if (arg == error_mark_node)
return NULL_RTX;
if (arity > MAX_ARGS)
if (!(*insn_op->predicate) (op[arity], insn_op->mode))
op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
+ arity++;
}
if (nonvoid)
plus_constant (addr, (TARGET_64BIT ? 24 : 12)))), fnaddr);
}
-/* Return rtx for 64-bit constant formed from the 32-bit subwords
- LOW and HIGH, independent of the host word size. */
-
-rtx
-s390_gen_rtx_const_DI (int high, int low)
-{
-#if HOST_BITS_PER_WIDE_INT >= 64
- HOST_WIDE_INT val;
- val = (HOST_WIDE_INT)high;
- val <<= 32;
- val |= (HOST_WIDE_INT)low;
-
- return GEN_INT (val);
-#else
-#if HOST_BITS_PER_WIDE_INT >= 32
- return immed_double_const ((HOST_WIDE_INT)low, (HOST_WIDE_INT)high, DImode);
-#else
- gcc_unreachable ();
-#endif
-#endif
-}
-
/* Output assembler code to FILE to increment profiler label # LABELNO
for profiling a function entry. */
return (mode == SImode || (TARGET_64BIT && mode == DImode));
}
-/* Checks whether the given ARGUMENT_LIST would use a caller
+/* Checks whether the given CALL_EXPR would use a caller
saved register. This is used to decide whether sibling call
optimization could be performed on the respective function
call. */
static bool
-s390_call_saved_register_used (tree argument_list)
+s390_call_saved_register_used (tree call_expr)
{
CUMULATIVE_ARGS cum;
tree parameter;
enum machine_mode mode;
tree type;
rtx parm_rtx;
- int reg;
+ int reg, i;
INIT_CUMULATIVE_ARGS (cum, NULL, NULL, 0, 0);
- while (argument_list)
+ for (i = 0; i < call_expr_nargs (call_expr); i++)
{
- parameter = TREE_VALUE (argument_list);
- argument_list = TREE_CHAIN (argument_list);
-
+ parameter = CALL_EXPR_ARG (call_expr, i);
gcc_assert (parameter);
/* For an undeclared variable passed as parameter we will get
/* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
which would have to be restored before the sibcall. */
- if (!TARGET_64BIT && flag_pic && decl && TREE_PUBLIC (decl))
+ if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
return false;
/* Register 6 on s390 is available as an argument register but unfortunately
"caller saved". This makes functions needing this register for arguments
not suitable for sibcalls. */
- if (TREE_OPERAND (exp, 1)
- && s390_call_saved_register_used (TREE_OPERAND (exp, 1)))
- return false;
-
- return true;
+ return !s390_call_saved_register_used (exp);
}
/* Return the fixed registers used for condition codes. */
machine_dependent_reorg might confuse insn length counts. */
split_all_insns_noflow ();
+ /* From here on decomposed literal pool addresses must be accepted. */
+ cfun->machine->decomposed_literal_pool_addresses_ok_p = true;
/* Install the main literal pool and the associated base
register load insns.
#define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
#endif
+#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
+#undef TARGET_MANGLE_TYPE
+#define TARGET_MANGLE_TYPE s390_mangle_type
+#endif
+
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
+
+#undef TARGET_SECONDARY_RELOAD
+#define TARGET_SECONDARY_RELOAD s390_secondary_reload
+
+#undef TARGET_LIBGCC_CMP_RETURN_MODE
+#define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
+
+#undef TARGET_LIBGCC_SHIFT_COUNT_MODE
+#define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
+
struct gcc_target targetm = TARGET_INITIALIZER;
#include "gt-s390.h"