/* Subroutines used for code generation on the DEC Alpha.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
- 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+ 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
This file is part of GCC.
enum alpha_fp_trap_mode alpha_fptm;
-/* Save information from a "cmpxx" operation until the branch or scc is
- emitted. */
-
-struct alpha_compare alpha_compare;
-
/* Nonzero if inside of a function, because the Alpha asm can't
handle .files inside of functions. */
{ "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
{ "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
{ "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
- { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
- { 0, 0, 0 }
+ { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX }
};
+ int const ct_size = ARRAY_SIZE (cpu_table);
int i;
/* Unicos/Mk doesn't have shared libraries. */
if (alpha_cpu_string)
{
- for (i = 0; cpu_table [i].name; i++)
+ for (i = 0; i < ct_size; i++)
if (! strcmp (alpha_cpu_string, cpu_table [i].name))
{
alpha_tune = alpha_cpu = cpu_table [i].processor;
target_flags |= cpu_table [i].flags;
break;
}
- if (! cpu_table [i].name)
+ if (i == ct_size)
error ("bad value %qs for -mcpu switch", alpha_cpu_string);
}
if (alpha_tune_string)
{
- for (i = 0; cpu_table [i].name; i++)
+ for (i = 0; i < ct_size; i++)
if (! strcmp (alpha_tune_string, cpu_table [i].name))
{
alpha_tune = cpu_table [i].processor;
break;
}
- if (! cpu_table [i].name)
+ if (i == ct_size)
error ("bad value %qs for -mcpu switch", alpha_tune_string);
}
rtx tmp = op;
if (GET_CODE (tmp) == SUBREG)
tmp = SUBREG_REG (tmp);
- if (GET_CODE (tmp) == REG
+ if (REG_P (tmp)
&& REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
{
op = reg_equiv_memory_loc[REGNO (tmp)];
tmp = NEXT_INSN (tmp);
if (!tmp)
return NULL_RTX;
- if (GET_CODE (tmp) == JUMP_INSN
+ if (JUMP_P (tmp)
&& GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
return PATTERN (tmp);
return NULL_RTX;
enum tls_model model;
if (GET_CODE (symbol) != SYMBOL_REF)
- return 0;
+ return TLS_MODEL_NONE;
model = SYMBOL_REF_TLS_MODEL (symbol);
/* Local-exec with a 64-bit size is the same code as initial-exec. */
any of those forms can be surrounded with an AND that clear the
low-order three bits; this is an "unaligned" access. */
-bool
-alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
+static bool
+alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
{
/* If this is an ldq_u type address, discard the outer AND. */
if (mode == DImode
&& GET_CODE (x) == AND
- && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) == -8)
x = XEXP (x, 0);
{
if (! strict
&& NONSTRICT_REG_OK_FP_BASE_P (x)
- && GET_CODE (ofs) == CONST_INT)
+ && CONST_INT_P (ofs))
return true;
if ((strict
? STRICT_REG_OK_FOR_BASE_P (x)
}
}
- /* If we're managing explicit relocations, LO_SUM is valid, as
- are small data symbols. */
- else if (TARGET_EXPLICIT_RELOCS)
+ /* If we're managing explicit relocations, LO_SUM is valid, as are small
+ data symbols. Avoid explicit relocations of modes larger than word
+ mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
+ else if (TARGET_EXPLICIT_RELOCS
+ && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
{
if (small_symbolic_operand (x, Pmode))
return true;
/* Try machine-dependent ways of modifying an illegitimate address
to be legitimate. If we find one, return the new, valid address. */
-rtx
-alpha_legitimize_address (rtx x, rtx scratch,
- enum machine_mode mode ATTRIBUTE_UNUSED)
+static rtx
+alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
{
HOST_WIDE_INT addend;
valid offset, compute the high part of the constant and add it to
the register. Then our address is (plus temp low-part-const). */
if (GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 0)) == REG
- && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && REG_P (XEXP (x, 0))
+ && CONST_INT_P (XEXP (x, 1))
&& ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
{
addend = INTVAL (XEXP (x, 1));
if (can_create_pseudo_p ()
&& GET_CODE (x) == CONST
&& GET_CODE (XEXP (x, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
{
addend = INTVAL (XEXP (XEXP (x, 0), 1));
x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
our address. */
if (can_create_pseudo_p ()
&& GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 0)) == REG
+ && REG_P (XEXP (x, 0))
&& GET_CODE (XEXP (x, 1)) == CONST
&& GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
- && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
{
addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
goto split_addend;
}
- /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
- if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
+ /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
+ Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
+ around +/- 32k offset. */
+ if (TARGET_EXPLICIT_RELOCS
+ && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
+ && symbolic_operand (x, Pmode))
{
rtx r0, r16, eqv, tga, tp, insn, dest, seq;
}
}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. Return X or the new, valid address. */
+
+static rtx
+alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
+ enum machine_mode mode)
+{
+ rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
+ return new_x ? new_x : x;
+}
+
/* Primarily this is required for TLS symbols, but given that our move
patterns *ought* to be able to handle any symbol at any time, we
should never be spilling symbolic operands to the constant pool, ever. */
/* We must recognize output that we have already generated ourselves. */
if (GET_CODE (x) == PLUS
&& GET_CODE (XEXP (x, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ && REG_P (XEXP (XEXP (x, 0), 0))
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1))
+ && CONST_INT_P (XEXP (x, 1)))
{
push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
- opnum, type);
+ opnum, (enum reload_type) type);
return x;
}
splitting the addend across an ldah and the mem insn. This
cuts number of extra insns needed from 3 to 1. */
if (GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 0)) == REG
+ && REG_P (XEXP (x, 0))
&& REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
&& REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
&& GET_CODE (XEXP (x, 1)) == CONST_INT)
push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
- opnum, type);
+ opnum, (enum reload_type) type);
return x;
}
else if (GET_CODE (XEXP (x, 0)) == MULT
&& const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
{
- *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
- + rtx_cost (XEXP (x, 1), outer_code, speed) + COSTS_N_INSNS (1));
+ *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
+ (enum rtx_code) outer_code, speed)
+ + rtx_cost (XEXP (x, 1),
+ (enum rtx_code) outer_code, speed)
+ + COSTS_N_INSNS (1));
return true;
}
return false;
return false;
case ASHIFT:
- if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ if (CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) <= 3)
{
*total = COSTS_N_INSNS (1);
return false;
case FLOAT_EXTEND:
- if (GET_CODE (XEXP (x, 0)) == MEM)
+ if (MEM_P (XEXP (x, 0)))
*total = 0;
else
*total = cost_data->fp_add;
rtx base;
HOST_WIDE_INT disp, offset;
- gcc_assert (GET_CODE (ref) == MEM);
+ gcc_assert (MEM_P (ref));
if (reload_in_progress
&& ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
rtx base;
HOST_WIDE_INT offset = 0;
- gcc_assert (GET_CODE (ref) == MEM);
+ gcc_assert (MEM_P (ref));
if (reload_in_progress
&& ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
return rclass;
/* These sorts of constants we can easily drop to memory. */
- if (GET_CODE (x) == CONST_INT
+ if (CONST_INT_P (x)
|| GET_CODE (x) == CONST_DOUBLE
|| GET_CODE (x) == CONST_VECTOR)
{
{
rtx x = *xp, orig = (rtx) data;
- if (GET_CODE (x) != MEM)
+ if (!MEM_P (x))
return 0;
MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
return -1;
}
-/* Given INSN, which is an INSN list or the PATTERN of a single insn
- generated to perform a memory operation, look for any MEMs in either
+/* Given SEQ, which is an INSN list, look for any MEMs in either
a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
volatile flags from REF into each of the MEMs found. If REF is not
a MEM, don't do anything. */
void
-alpha_set_memflags (rtx insn, rtx ref)
+alpha_set_memflags (rtx seq, rtx ref)
{
- rtx *base_ptr;
+ rtx insn;
- if (GET_CODE (ref) != MEM)
+ if (!MEM_P (ref))
return;
/* This is only called from alpha.md, after having had something
&& !MEM_READONLY_P (ref))
return;
- if (INSN_P (insn))
- base_ptr = &PATTERN (insn);
- else
- base_ptr = &insn;
- for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
+ for (insn = seq; insn; insn = NEXT_INSN (insn))
+ if (INSN_P (insn))
+ for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
+ else
+ gcc_unreachable ();
}
\f
static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
/* If we can't make any pseudos, TARGET is an SImode hard register, we
can't load this constant in one insn, do this in DImode. */
if (!can_create_pseudo_p () && mode == SImode
- && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
+ && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
{
result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
if (result)
x = simplify_subreg (DImode, x, GET_MODE (x), 0);
- if (GET_CODE (x) == CONST_INT)
+ if (CONST_INT_P (x))
{
i0 = INTVAL (x);
i1 = -(i0 < 0);
switch (GET_CODE (x))
{
- case CONST:
case LABEL_REF:
case HIGH:
return true;
+ case CONST:
+ if (GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
+ x = XEXP (XEXP (x, 0), 0);
+ else
+ return true;
+
+ if (GET_CODE (x) != SYMBOL_REF)
+ return true;
+
+ /* FALLTHRU */
+
case SYMBOL_REF:
/* TLS symbols are never valid. */
return SYMBOL_REF_TLS_MODEL (x) == 0;
bool
alpha_expand_mov (enum machine_mode mode, rtx *operands)
{
+ rtx tmp;
+
/* If the output is not a register, the input must be. */
- if (GET_CODE (operands[0]) == MEM
+ if (MEM_P (operands[0])
&& ! reg_or_0_operand (operands[1], mode))
operands[1] = force_reg (mode, operands[1]);
/* Allow legitimize_address to perform some simplifications. */
if (mode == Pmode && symbolic_operand (operands[1], mode))
{
- rtx tmp;
-
- tmp = alpha_legitimize_address (operands[1], operands[0], mode);
+ tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
if (tmp)
{
if (tmp == operands[0])
return false;
/* Split large integers. */
- if (GET_CODE (operands[1]) == CONST_INT
+ if (CONST_INT_P (operands[1])
|| GET_CODE (operands[1]) == CONST_DOUBLE
|| GET_CODE (operands[1]) == CONST_VECTOR)
{
}
/* Otherwise we've nothing left but to drop the thing to memory. */
- operands[1] = force_const_mem (mode, operands[1]);
+ tmp = force_const_mem (mode, operands[1]);
+
+ if (tmp == NULL_RTX)
+ return false;
+
if (reload_in_progress)
{
- emit_move_insn (operands[0], XEXP (operands[1], 0));
- operands[1] = replace_equiv_address (operands[1], operands[0]);
+ emit_move_insn (operands[0], XEXP (tmp, 0));
+ operands[1] = replace_equiv_address (tmp, operands[0]);
}
else
- operands[1] = validize_mem (operands[1]);
+ operands[1] = validize_mem (tmp);
return false;
}
get_aligned_mem (operands[1], &aligned_mem, &bitnum);
subtarget = operands[0];
- if (GET_CODE (subtarget) == REG)
+ if (REG_P (subtarget))
subtarget = gen_lowpart (DImode, subtarget), copyout = false;
else
subtarget = gen_reg_rtx (DImode), copyout = true;
temp2 = gen_reg_rtx (DImode);
subtarget = operands[0];
- if (GET_CODE (subtarget) == REG)
+ if (REG_P (subtarget))
subtarget = gen_lowpart (DImode, subtarget), copyout = false;
else
subtarget = gen_reg_rtx (DImode), copyout = true;
/* Generate the comparison for a conditional branch. */
-rtx
-alpha_emit_conditional_branch (enum rtx_code code)
+void
+alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
{
enum rtx_code cmp_code, branch_code;
- enum machine_mode cmp_mode, branch_mode = VOIDmode;
- rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
+ enum machine_mode branch_mode = VOIDmode;
+ enum rtx_code code = GET_CODE (operands[0]);
+ rtx op0 = operands[1], op1 = operands[2];
rtx tem;
- if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
+ if (cmp_mode == TFmode)
{
op0 = alpha_emit_xfloating_compare (&code, op0, op1);
op1 = const0_rtx;
- alpha_compare.fp_p = 0;
+ cmp_mode = DImode;
}
/* The general case: fold the comparison code to the types of compares
case GE: case GT: case GEU: case GTU:
/* For FP, we swap them, for INT, we reverse them. */
- if (alpha_compare.fp_p)
+ if (cmp_mode == DFmode)
{
cmp_code = swap_condition (code);
branch_code = NE;
gcc_unreachable ();
}
- if (alpha_compare.fp_p)
+ if (cmp_mode == DFmode)
{
- cmp_mode = DFmode;
- if (flag_unsafe_math_optimizations)
+ if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
{
/* When we are not as concerned about non-finite values, and we
are comparing against zero, we can branch directly. */
}
else
{
- cmp_mode = DImode;
-
/* The following optimizations are only for signed compares. */
if (code != LEU && code != LTU && code != GEU && code != GTU)
{
/* ??? Don't do this when comparing against symbols, otherwise
we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
be declared false out of hand (at least for non-weak). */
- else if (GET_CODE (op1) == CONST_INT
+ else if (CONST_INT_P (op1)
&& (code == EQ || code == NE)
&& !(symbolic_operand (op0, VOIDmode)
- || (GET_CODE (op0) == REG && REG_POINTER (op0))))
+ || (REG_P (op0) && REG_POINTER (op0))))
{
rtx n_op1 = GEN_INT (-INTVAL (op1));
emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
}
- /* Zero the operands. */
- memset (&alpha_compare, 0, sizeof (alpha_compare));
-
- /* Return the branch comparison. */
- return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
+ /* Emit the branch instruction. */
+ tem = gen_rtx_SET (VOIDmode, pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
+ gen_rtx_fmt_ee (branch_code,
+ branch_mode, tem,
+ CONST0_RTX (cmp_mode)),
+ gen_rtx_LABEL_REF (VOIDmode,
+ operands[3]),
+ pc_rtx));
+ emit_jump_insn (tem);
}
/* Certain simplifications can be done to make invalid setcc operations
valid. Return the final comparison, or NULL if we can't work. */
-rtx
-alpha_emit_setcc (enum rtx_code code)
+bool
+alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
{
enum rtx_code cmp_code;
- rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
- int fp_p = alpha_compare.fp_p;
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx op0 = operands[2], op1 = operands[3];
rtx tmp;
- /* Zero the operands. */
- memset (&alpha_compare, 0, sizeof (alpha_compare));
-
- if (fp_p && GET_MODE (op0) == TFmode)
+ if (cmp_mode == TFmode)
{
op0 = alpha_emit_xfloating_compare (&code, op0, op1);
op1 = const0_rtx;
- fp_p = 0;
+ cmp_mode = DImode;
}
- if (fp_p && !TARGET_FIX)
- return NULL_RTX;
+ if (cmp_mode == DFmode && !TARGET_FIX)
+ return 0;
/* The general case: fold the comparison code to the types of compares
that we have, choosing the branch as necessary. */
case EQ: case LE: case LT: case LEU: case LTU:
case UNORDERED:
/* We have these compares. */
- if (fp_p)
+ if (cmp_mode == DFmode)
cmp_code = code, code = NE;
break;
case NE:
- if (!fp_p && op1 == const0_rtx)
+ if (cmp_mode == DImode && op1 == const0_rtx)
break;
/* FALLTHRU */
case GE: case GT: case GEU: case GTU:
/* These normally need swapping, but for integer zero we have
special patterns that recognize swapped operands. */
- if (!fp_p && op1 == const0_rtx)
+ if (cmp_mode == DImode && op1 == const0_rtx)
break;
code = swap_condition (code);
- if (fp_p)
+ if (cmp_mode == DFmode)
cmp_code = code, code = NE;
tmp = op0, op0 = op1, op1 = tmp;
break;
gcc_unreachable ();
}
- if (!fp_p)
+ if (cmp_mode == DImode)
{
if (!register_operand (op0, DImode))
op0 = force_reg (DImode, op0);
/* Emit an initial compare instruction, if necessary. */
if (cmp_code != UNKNOWN)
{
- enum machine_mode mode = fp_p ? DFmode : DImode;
-
- tmp = gen_reg_rtx (mode);
+ tmp = gen_reg_rtx (cmp_mode);
emit_insn (gen_rtx_SET (VOIDmode, tmp,
- gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
+ gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
- op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
+ op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
op1 = const0_rtx;
}
- /* Return the setcc comparison. */
- return gen_rtx_fmt_ee (code, DImode, op0, op1);
+ /* Emit the setcc instruction. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_fmt_ee (code, DImode, op0, op1)));
+ return true;
}
{
enum rtx_code code = GET_CODE (cmp);
enum rtx_code cmov_code = NE;
- rtx op0 = alpha_compare.op0;
- rtx op1 = alpha_compare.op1;
- int fp_p = alpha_compare.fp_p;
+ rtx op0 = XEXP (cmp, 0);
+ rtx op1 = XEXP (cmp, 1);
enum machine_mode cmp_mode
= (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
- enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
enum machine_mode cmov_mode = VOIDmode;
int local_fast_math = flag_unsafe_math_optimizations;
rtx tem;
- /* Zero the operands. */
- memset (&alpha_compare, 0, sizeof (alpha_compare));
+ gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
- if (fp_p != FLOAT_MODE_P (mode))
+ if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
{
enum rtx_code cmp_code;
case GE: case GT: case GEU: case GTU:
/* These normally need swapping, but for integer zero we have
special patterns that recognize swapped operands. */
- if (!fp_p && op1 == const0_rtx)
+ if (cmp_mode == DImode && op1 == const0_rtx)
cmp_code = code, code = NE;
else
{
gcc_unreachable ();
}
- tem = gen_reg_rtx (cmp_op_mode);
+ tem = gen_reg_rtx (cmp_mode);
emit_insn (gen_rtx_SET (VOIDmode, tem,
- gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
+ gen_rtx_fmt_ee (cmp_code, cmp_mode,
op0, op1)));
- cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
- op0 = gen_lowpart (cmp_op_mode, tem);
- op1 = CONST0_RTX (cmp_op_mode);
- fp_p = !fp_p;
+ cmp_mode = cmp_mode == DImode ? DFmode : DImode;
+ op0 = gen_lowpart (cmp_mode, tem);
+ op1 = CONST0_RTX (cmp_mode);
local_fast_math = 1;
}
/* We may be able to use a conditional move directly.
This avoids emitting spurious compares. */
if (signed_comparison_operator (cmp, VOIDmode)
- && (!fp_p || local_fast_math)
+ && (cmp_mode == DImode || local_fast_math)
&& (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
gcc_unreachable ();
}
- if (!fp_p)
+ if (cmp_mode == DImode)
{
if (!reg_or_0_operand (op0, DImode))
op0 = force_reg (DImode, op0);
/* ??? We mark the branch mode to be CCmode to prevent the compare
and cmov from being combined, since the compare insn follows IEEE
rules that the cmov does not. */
- if (fp_p && !local_fast_math)
+ if (cmp_mode == DFmode && !local_fast_math)
cmov_mode = CCmode;
- tem = gen_reg_rtx (cmp_op_mode);
- emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
- return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
+ tem = gen_reg_rtx (cmp_mode);
+ emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
+ return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
}
/* Simplify a conditional move of two constants into a setcc with
/* Look up the function X_floating library function name for the
given operation. */
-struct xfloating_op GTY(())
+struct GTY(()) xfloating_op
{
const enum rtx_code code;
const char *const GTY((skip)) osf_func;
break;
case VOIDmode:
- gcc_assert (GET_CODE (operands[i]) == CONST_INT);
+ gcc_assert (CONST_INT_P (operands[i]));
/* FALLTHRU */
case DImode:
reg = gen_rtx_REG (DImode, regno);
emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
break;
case 8:
- emit_insn (gen_insql_le (insl, src, addr));
+ emit_insn (gen_insql_le (insl, gen_lowpart (DImode, src), addr));
break;
}
}
/* Look for additional alignment information from recorded register info. */
tmp = XEXP (orig_src, 0);
- if (GET_CODE (tmp) == REG)
+ if (REG_P (tmp))
src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
else if (GET_CODE (tmp) == PLUS
- && GET_CODE (XEXP (tmp, 0)) == REG
- && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
+ && REG_P (XEXP (tmp, 0))
+ && CONST_INT_P (XEXP (tmp, 1)))
{
unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
}
tmp = XEXP (orig_dst, 0);
- if (GET_CODE (tmp) == REG)
+ if (REG_P (tmp))
dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
else if (GET_CODE (tmp) == PLUS
- && GET_CODE (XEXP (tmp, 0)) == REG
- && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
+ && REG_P (XEXP (tmp, 0))
+ && CONST_INT_P (XEXP (tmp, 1)))
{
unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
/* Look for stricter alignment. */
tmp = XEXP (orig_dst, 0);
- if (GET_CODE (tmp) == REG)
+ if (REG_P (tmp))
align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
else if (GET_CODE (tmp) == PLUS
- && GET_CODE (XEXP (tmp, 0)) == REG
- && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
+ && REG_P (XEXP (tmp, 0))
+ && CONST_INT_P (XEXP (tmp, 1)))
{
HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
- REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
+ add_reg_note (x, REG_BR_PROB, very_unlikely);
}
/* A subroutine of the atomic operation splitters. Emit a load-locked
\f
/* Machine-specific function data. */
-struct machine_function GTY(())
+struct GTY(()) machine_function
{
/* For unicosmk. */
/* List of call information words for calls from this function. */
x = XVECEXP (x, 0, 0);
lituse = "lituse_tlsldm";
}
- else if (GET_CODE (x) == CONST_INT)
+ else if (CONST_INT_P (x))
lituse = "lituse_jsr";
else
{
break;
case 'r':
/* If this operand is the constant zero, write it as "$31". */
- if (GET_CODE (x) == REG)
+ if (REG_P (x))
fprintf (file, "%s", reg_names[REGNO (x)]);
else if (x == CONST0_RTX (GET_MODE (x)))
fprintf (file, "$31");
case 'R':
/* Similar, but for floating-point. */
- if (GET_CODE (x) == REG)
+ if (REG_P (x))
fprintf (file, "%s", reg_names[REGNO (x)]);
else if (x == CONST0_RTX (GET_MODE (x)))
fprintf (file, "$f31");
case 'N':
/* Write the 1's complement of a constant. */
- if (GET_CODE (x) != CONST_INT)
+ if (!CONST_INT_P (x))
output_operand_lossage ("invalid %%N value");
fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
case 'P':
/* Write 1 << C, for a constant C. */
- if (GET_CODE (x) != CONST_INT)
+ if (!CONST_INT_P (x))
output_operand_lossage ("invalid %%P value");
fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
case 'h':
/* Write the high-order 16 bits of a constant, sign-extended. */
- if (GET_CODE (x) != CONST_INT)
+ if (!CONST_INT_P (x))
output_operand_lossage ("invalid %%h value");
fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
case 'L':
/* Write the low-order 16 bits of a constant, sign-extended. */
- if (GET_CODE (x) != CONST_INT)
+ if (!CONST_INT_P (x))
output_operand_lossage ("invalid %%L value");
fprintf (file, HOST_WIDE_INT_PRINT_DEC,
fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
}
- else if (GET_CODE (x) == CONST_INT)
+ else if (CONST_INT_P (x))
{
HOST_WIDE_INT mask = 0, value = INTVAL (x);
case 'M':
/* 'b', 'w', 'l', or 'q' as the value of the constant. */
- if (GET_CODE (x) != CONST_INT
+ if (!CONST_INT_P (x)
|| (INTVAL (x) != 8 && INTVAL (x) != 16
&& INTVAL (x) != 32 && INTVAL (x) != 64))
output_operand_lossage ("invalid %%M value");
case 'U':
/* Similar, except do it from the mask. */
- if (GET_CODE (x) == CONST_INT)
+ if (CONST_INT_P (x))
{
HOST_WIDE_INT value = INTVAL (x);
/* Write the constant value divided by 8 for little-endian mode or
(56 - value) / 8 for big-endian mode. */
- if (GET_CODE (x) != CONST_INT
+ if (!CONST_INT_P (x)
|| (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
? 56
: 64)
case 'S':
/* Same, except compute (64 - c) / 8 */
- if (GET_CODE (x) != CONST_INT
+ if (!CONST_INT_P (x)
&& (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
&& (INTVAL (x) & 7) != 8)
output_operand_lossage ("invalid %%s value");
case 'A':
/* Write "_u" for unaligned access. */
- if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
+ if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
fprintf (file, "_u");
break;
case 0:
- if (GET_CODE (x) == REG)
+ if (REG_P (x))
fprintf (file, "%s", reg_names[REGNO (x)]);
- else if (GET_CODE (x) == MEM)
+ else if (MEM_P (x))
output_address (XEXP (x, 0));
else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
{
addr = XEXP (addr, 0);
if (GET_CODE (addr) == PLUS
- && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (addr, 1)))
{
offset = INTVAL (XEXP (addr, 1));
addr = XEXP (addr, 0);
#ifdef ENABLE_EXECUTE_STACK
emit_library_call (init_one_libfunc ("__enable_execute_stack"),
- 0, VOIDmode, 1, tramp, Pmode);
+ LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
#endif
if (jmpofs >= 0)
return ptr_type_node;
record = (*lang_hooks.types.make_type) (RECORD_TYPE);
- type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
+ type_decl = build_decl (BUILTINS_LOCATION,
+ TYPE_DECL, get_identifier ("__va_list_tag"), record);
TREE_CHAIN (record) = type_decl;
TYPE_NAME (record) = type_decl;
/* C++? SET_IS_AGGR_TYPE (record, 1); */
/* Dummy field to prevent alignment warnings. */
- space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
+ space = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, NULL_TREE, integer_type_node);
DECL_FIELD_CONTEXT (space) = record;
DECL_ARTIFICIAL (space) = 1;
DECL_IGNORED_P (space) = 1;
- ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
+ ofs = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__offset"),
integer_type_node);
DECL_FIELD_CONTEXT (ofs) = record;
TREE_CHAIN (ofs) = space;
- base = build_decl (FIELD_DECL, get_identifier ("__base"),
+ base = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__base"),
ptr_type_node);
DECL_FIELD_CONTEXT (base) = record;
TREE_CHAIN (base) = ofs;
ALPHA_BUILTIN_max
};
-static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
+static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
CODE_FOR_builtin_cmpbge,
CODE_FOR_builtin_extbl,
CODE_FOR_builtin_extwl,
insn_op = &insn_data[icode].operand[arity + nonvoid];
- op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
+ op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
if (!(*insn_op->predicate) (op[arity], insn_op->mode))
op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
tree op0 = fold_convert (vtype, op[0]);
tree op1 = fold_convert (vtype, op[1]);
tree val = fold_build2 (code, vtype, op0, op1);
- return fold_convert (long_integer_type_node, val);
+ return fold_build1 (VIEW_CONVERT_EXPR, long_integer_type_node, val);
}
static tree
/* When outputting a thunk, we don't have valid register life info,
but assemble_start_function wants to output .frame and .mask
directives. */
- if (crtl->is_thunk)
+ if (cfun->is_thunk)
{
*imaskP = 0;
*fmaskP = 0;
#if TARGET_ABI_OPEN_VMS
-const struct attribute_spec vms_attribute_table[] =
+static const struct attribute_spec vms_attribute_table[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
{ "overlaid", 0, 0, true, false, false, NULL },
return 1;
/* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
- if (crtl->is_thunk)
+ if (cfun->is_thunk)
return 1;
/* The nonlocal receiver pattern assumes that the gp is valid for
mem = gen_rtx_MEM (DImode, addr);
}
- REG_NOTES (insn)
- = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
- gen_rtx_SET (VOIDmode, mem, frame_reg),
- REG_NOTES (insn));
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, mem, frame_reg));
}
}
possibly intuit through the loop above. So we invent this
note it looks at instead. */
RTX_FRAME_RELATED_P (seq) = 1;
- REG_NOTES (seq)
- = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
- gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- gen_rtx_PLUS (Pmode, stack_pointer_rtx,
- GEN_INT (TARGET_ABI_UNICOSMK
- ? -frame_size + 64
- : -frame_size))),
- REG_NOTES (seq));
+ add_reg_note (seq, REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ GEN_INT (TARGET_ABI_UNICOSMK
+ ? -frame_size + 64
+ : -frame_size))));
}
if (!TARGET_ABI_UNICOSMK)
if (TARGET_ABI_OPEN_VMS)
{
+ /* Register frame procedures save the fp. */
if (alpha_procedure_type == PT_REGISTER)
- /* Register frame procedures save the fp.
- ?? Ought to have a dwarf2 save for this. */
- emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
- hard_frame_pointer_rtx);
+ {
+ rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
+ hard_frame_pointer_rtx);
+ add_reg_note (insn, REG_CFA_REGISTER, NULL);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
TREE_ASM_WRITTEN (name_tree) = 1;
}
+#if TARGET_ABI_OPEN_VMS
+ if (vms_debug_main
+ && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
+ {
+ targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
+ ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
+ switch_to_section (text_section);
+ vms_debug_main = NULL;
+ }
+#endif
+
alpha_fnname = fnname;
sa_size = alpha_sa_size ();
Otherwise, do it here. */
if (TARGET_ABI_OSF
&& ! alpha_function_needs_gp
- && ! crtl->is_thunk)
+ && ! cfun->is_thunk)
{
putc ('$', file);
assemble_name (file, fnname);
fputs ("\t.prologue 0\n", file);
else if (!flag_inhibit_size_directive)
fprintf (file, "\t.prologue %d\n",
- alpha_function_needs_gp || crtl->is_thunk);
+ alpha_function_needs_gp || cfun->is_thunk);
}
/* Write function epilogue. */
-/* ??? At some point we will want to support full unwind, and so will
- need to mark the epilogue as well. At the moment, we just confuse
- dwarf2out. */
-#undef FRP
-#define FRP(exp) exp
-
void
alpha_expand_epilogue (void)
{
HOST_WIDE_INT reg_offset;
int fp_is_frame_pointer, fp_offset;
rtx sa_reg, sa_reg_exp = NULL;
- rtx sp_adj1, sp_adj2, mem;
+ rtx sp_adj1, sp_adj2, mem, reg, insn;
rtx eh_ofs;
+ rtx cfa_restores = NULL_RTX;
int i;
sa_size = alpha_sa_size ();
if ((TARGET_ABI_OPEN_VMS
&& vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
|| (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
- FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
+ emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
/* Cope with very large offsets to the register save area. */
if (reg_offset + sa_size > 0x8000)
sa_reg = gen_rtx_REG (DImode, 22);
sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
- FRP (emit_move_insn (sa_reg, sa_reg_exp));
+ emit_move_insn (sa_reg, sa_reg_exp);
}
/* Restore registers in order, excepting a true frame pointer. */
mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
if (! eh_ofs)
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
+ reg = gen_rtx_REG (DImode, REG_RA);
+ emit_move_insn (reg, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
reg_offset += 8;
imask &= ~(1UL << REG_RA);
{
mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
+ reg = gen_rtx_REG (DImode, i);
+ emit_move_insn (reg, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
+ cfa_restores);
}
reg_offset += 8;
}
{
mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
+ reg = gen_rtx_REG (DFmode, i+32);
+ emit_move_insn (reg, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
reg_offset += 8;
}
}
mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
reg_offset));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
+ reg = gen_rtx_REG (DImode, i);
+ emit_move_insn (reg, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
reg_offset -= 8;
}
mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
reg_offset));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
+ reg = gen_rtx_REG (DFmode, i+32);
+ emit_move_insn (reg, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
reg_offset -= 8;
}
/* Restore the return address from the DSIB. */
-
- mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
+ mem = gen_rtx_MEM (DImode, plus_constant (hard_frame_pointer_rtx, -8));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
+ reg = gen_rtx_REG (DImode, REG_RA);
+ emit_move_insn (reg, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
}
if (frame_size || eh_ofs)
else if (TARGET_ABI_UNICOSMK)
{
sp_adj1 = gen_rtx_REG (DImode, 23);
- FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
+ emit_move_insn (sp_adj1, hard_frame_pointer_rtx);
sp_adj2 = const0_rtx;
}
else if (frame_size < 0x40007fffL)
else
{
sp_adj1 = gen_rtx_REG (DImode, 23);
- FRP (emit_move_insn (sp_adj1, sp_adj2));
+ emit_move_insn (sp_adj1, sp_adj2);
}
sp_adj2 = GEN_INT (low);
}
else
{
rtx tmp = gen_rtx_REG (DImode, 23);
- FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
- 3, false));
+ sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
if (!sp_adj2)
{
/* We can't drop new things to memory this late, afaik,
so build it up by pieces. */
- FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
- -(frame_size < 0)));
+ sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
+ -(frame_size < 0));
gcc_assert (sp_adj2);
}
}
mem = gen_rtx_MEM (DImode,
plus_constant (hard_frame_pointer_rtx, -16));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
+ emit_move_insn (hard_frame_pointer_rtx, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
+ hard_frame_pointer_rtx, cfa_restores);
}
else if (fp_is_frame_pointer)
{
emit_insn (gen_blockage ());
mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
+ emit_move_insn (hard_frame_pointer_rtx, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
+ hard_frame_pointer_rtx, cfa_restores);
}
else if (TARGET_ABI_OPEN_VMS)
{
emit_insn (gen_blockage ());
- FRP (emit_move_insn (hard_frame_pointer_rtx,
- gen_rtx_REG (DImode, vms_save_fp_regno)));
+ emit_move_insn (hard_frame_pointer_rtx,
+ gen_rtx_REG (DImode, vms_save_fp_regno));
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
+ hard_frame_pointer_rtx, cfa_restores);
}
/* Restore the stack pointer. */
emit_insn (gen_blockage ());
if (sp_adj2 == const0_rtx)
- FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
+ insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
else
- FRP (emit_move_insn (stack_pointer_rtx,
- gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
+ insn = emit_move_insn (stack_pointer_rtx,
+ gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
+ REG_NOTES (insn) = cfa_restores;
+ add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
}
else
{
+ gcc_assert (cfa_restores == NULL);
+
if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
{
emit_insn (gen_blockage ());
- FRP (emit_move_insn (hard_frame_pointer_rtx,
- gen_rtx_REG (DImode, vms_save_fp_regno)));
+ insn = emit_move_insn (hard_frame_pointer_rtx,
+ gen_rtx_REG (DImode, vms_save_fp_regno));
+ add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
}
else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
{
/* Decrement the frame pointer if the function does not have a
frame. */
-
emit_insn (gen_blockage ());
- FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
- hard_frame_pointer_rtx, constm1_rtx)));
+ emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
+ hard_frame_pointer_rtx, constm1_rtx));
}
}
}
insn = get_last_insn ();
if (!INSN_P (insn))
insn = prev_active_insn (insn);
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
+#if TARGET_ABI_OSF
+ if (cfun->is_thunk)
+ free_after_compilation (cfun);
+#endif
+
#if TARGET_ABI_OPEN_VMS
alpha_write_linkage (file, fnname, decl);
#endif
HOST_WIDE_INT hi, lo;
rtx this_rtx, insn, funexp;
+ gcc_assert (cfun->is_thunk);
+
/* We always require a valid GP. */
emit_insn (gen_prologue_ldgp ());
emit_note (NOTE_INSN_PROLOGUE_END);
final_start_function (insn, file, 1);
final (insn, file, 1);
final_end_function ();
- free_after_compilation (cfun);
}
#endif /* TARGET_ABI_OSF */
\f
for (i = get_insns (); i ; i = NEXT_INSN (i))
{
- if (GET_CODE (i) == NOTE)
+ if (NOTE_P (i))
{
switch (NOTE_KIND (i))
{
{
if (alpha_tp == ALPHA_TP_FUNC)
{
- if (GET_CODE (i) == JUMP_INSN
+ if (JUMP_P (i)
&& GET_CODE (PATTERN (i)) == RETURN)
goto close_shadow;
}
}
if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
- && GET_CODE (i) == INSN
+ && NONJUMP_INSN_P (i)
&& GET_CODE (PATTERN (i)) != USE
&& GET_CODE (PATTERN (i)) != CLOBBER
&& get_attr_trap (i) == TRAP_YES)
len += 4;
/* Haifa doesn't do well scheduling branches. */
- if (GET_CODE (insn) == JUMP_INSN)
+ if (JUMP_P (insn))
goto next_and_done;
next:
/* Haifa doesn't do well scheduling branches. */
/* ??? If this is predicted not-taken, slotting continues, except
that no more IBR, FBR, or JSR insns may be slotted. */
- if (GET_CODE (insn) == JUMP_INSN)
+ if (JUMP_P (insn))
goto next_and_done;
next:
ofs = prev_in_use = 0;
i = get_insns ();
- if (GET_CODE (i) == NOTE)
+ if (NOTE_P (i))
i = next_nonnote_insn (i);
ldgp = alpha_function_needs_gp ? 8 : 0;
next = (*next_group) (i, &in_use, &len);
/* When we see a label, resync alignment etc. */
- if (GET_CODE (i) == CODE_LABEL)
+ if (LABEL_P (i))
{
unsigned int new_align = 1 << label_to_alignment (i);
rtx prev, where;
where = prev = prev_nonnote_insn (i);
- if (!where || GET_CODE (where) != CODE_LABEL)
+ if (!where || !LABEL_P (where))
where = i;
/* Can't realign between a call and its gp reload. */
if (! (TARGET_EXPLICIT_RELOCS
- && prev && GET_CODE (prev) == CALL_INSN))
+ && prev && CALL_P (prev)))
{
emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
align = 1 << new_log_align;
where = prev_nonnote_insn (i);
if (where)
{
- if (GET_CODE (where) == CODE_LABEL)
+ if (LABEL_P (where))
{
rtx where2 = prev_nonnote_insn (where);
- if (where2 && GET_CODE (where2) == JUMP_INSN)
+ if (where2 && JUMP_P (where2))
where = where2;
}
- else if (GET_CODE (where) == INSN)
+ else if (NONJUMP_INSN_P (where))
where = i;
}
else
i = next;
}
}
+
+/* Insert an unop between a noreturn function call and GP load. */
+
+static void
+alpha_pad_noreturn (void)
+{
+ rtx insn, next;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (!CALL_P (insn)
+ || !find_reg_note (insn, REG_NORETURN, NULL_RTX))
+ continue;
+
+ next = next_active_insn (insn);
+
+ if (next)
+ {
+ rtx pat = PATTERN (next);
+
+ if (GET_CODE (pat) == SET
+ && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
+ && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
+ emit_insn_after (gen_unop (), insn);
+ }
+ }
+}
\f
/* Machine dependent reorg pass. */
static void
alpha_reorg (void)
{
+ /* Workaround for a linker error that triggers when an
+ exception handler immediatelly follows a noreturn function.
+
+ The instruction stream from an object file:
+
+ 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
+ 58: 00 00 ba 27 ldah gp,0(ra)
+ 5c: 00 00 bd 23 lda gp,0(gp)
+ 60: 00 00 7d a7 ldq t12,0(gp)
+ 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
+
+ was converted in the final link pass to:
+
+ fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
+ fdb28: 00 00 fe 2f unop
+ fdb2c: 00 00 fe 2f unop
+ fdb30: 30 82 7d a7 ldq t12,-32208(gp)
+ fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
+
+ GP load instructions were wrongly cleared by the linker relaxation
+ pass. This workaround prevents removal of GP loads by inserting
+ an unop instruction between a noreturn function call and
+ exception handler prologue. */
+
+ if (current_function_has_exception_handlers ())
+ alpha_pad_noreturn ();
+
if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
alpha_handle_trap_shadows ();
enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
-struct alpha_links GTY(())
+struct GTY(()) alpha_links
{
int num;
rtx linkage;
enum reloc_kind rkind;
};
-struct alpha_funcs GTY(())
+struct GTY(()) alpha_funcs
{
int num;
splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
int len;
x = DECL_RTL (cfun->decl);
- gcc_assert (GET_CODE (x) == MEM);
+ gcc_assert (MEM_P (x));
x = XEXP (x, 0);
gcc_assert (GET_CODE (x) == SYMBOL_REF);
fnname = XSTR (x, 0);
emit_insn (gen_blockage ());
/* Set the new frame pointer. */
-
FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
stack_pointer_rtx, GEN_INT (64))));
-
}
else
{
/* Increment the frame pointer register to indicate that we do not
have a frame. */
-
- FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
- hard_frame_pointer_rtx, const1_rtx)));
+ emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
+ hard_frame_pointer_rtx, const1_rtx));
}
}
#undef TARGET_INIT_LIBFUNCS
#define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
+#undef TARGET_LEGITIMIZE_ADDRESS
+#define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
+
#if TARGET_ABI_UNICOSMK
#undef TARGET_ASM_FILE_START
#define TARGET_ASM_FILE_START unicosmk_file_start
#undef TARGET_MACHINE_DEPENDENT_REORG
#define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
-#undef TARGET_PROMOTE_FUNCTION_ARGS
-#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
-#undef TARGET_PROMOTE_FUNCTION_RETURN
-#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
+#undef TARGET_PROMOTE_FUNCTION_MODE
+#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
#undef TARGET_PROMOTE_PROTOTYPES
#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
#undef TARGET_RETURN_IN_MEMORY
#define TARGET_MANGLE_TYPE alpha_mangle_type
#endif
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
+
struct gcc_target targetm = TARGET_INITIALIZER;
\f