/* Subroutines used for code generation on the DEC Alpha.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
- 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
This file is part of GCC.
#include "langhooks.h"
#include <splay-tree.h>
#include "cfglayout.h"
-#include "tree-gimple.h"
+#include "gimple.h"
#include "tree-flow.h"
#include "tree-stdarg.h"
#include "tm-constrs.h"
#include "df.h"
+#include "libfuncs.h"
/* Specify which cpu to schedule for. */
enum processor_type alpha_tune;
enum alpha_fp_trap_mode alpha_fptm;
-/* Save information from a "cmpxx" operation until the branch or scc is
- emitted. */
-
-struct alpha_compare alpha_compare;
-
/* Nonzero if inside of a function, because the Alpha asm can't
handle .files inside of functions. */
{ "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
{ "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
{ "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
- { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
- { 0, 0, 0 }
+ { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX }
};
+ int const ct_size = ARRAY_SIZE (cpu_table);
int i;
/* Unicos/Mk doesn't have shared libraries. */
if (alpha_cpu_string)
{
- for (i = 0; cpu_table [i].name; i++)
+ for (i = 0; i < ct_size; i++)
if (! strcmp (alpha_cpu_string, cpu_table [i].name))
{
alpha_tune = alpha_cpu = cpu_table [i].processor;
target_flags |= cpu_table [i].flags;
break;
}
- if (! cpu_table [i].name)
+ if (i == ct_size)
error ("bad value %qs for -mcpu switch", alpha_cpu_string);
}
if (alpha_tune_string)
{
- for (i = 0; cpu_table [i].name; i++)
+ for (i = 0; i < ct_size; i++)
if (! strcmp (alpha_tune_string, cpu_table [i].name))
{
alpha_tune = cpu_table [i].processor;
break;
}
- if (! cpu_table [i].name)
+ if (i == ct_size)
error ("bad value %qs for -mcpu switch", alpha_tune_string);
}
rtx tmp = op;
if (GET_CODE (tmp) == SUBREG)
tmp = SUBREG_REG (tmp);
- if (GET_CODE (tmp) == REG
+ if (REG_P (tmp)
&& REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
{
op = reg_equiv_memory_loc[REGNO (tmp)];
tmp = NEXT_INSN (tmp);
if (!tmp)
return NULL_RTX;
- if (GET_CODE (tmp) == JUMP_INSN
+ if (JUMP_P (tmp)
&& GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
return PATTERN (tmp);
return NULL_RTX;
enum tls_model model;
if (GET_CODE (symbol) != SYMBOL_REF)
- return 0;
+ return TLS_MODEL_NONE;
model = SYMBOL_REF_TLS_MODEL (symbol);
/* Local-exec with a 64-bit size is the same code as initial-exec. */
any of those forms can be surrounded with an AND that clear the
low-order three bits; this is an "unaligned" access. */
-bool
-alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
+static bool
+alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
{
/* If this is an ldq_u type address, discard the outer AND. */
if (mode == DImode
&& GET_CODE (x) == AND
- && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) == -8)
x = XEXP (x, 0);
{
if (! strict
&& NONSTRICT_REG_OK_FP_BASE_P (x)
- && GET_CODE (ofs) == CONST_INT)
+ && CONST_INT_P (ofs))
return true;
if ((strict
? STRICT_REG_OK_FOR_BASE_P (x)
}
}
- /* If we're managing explicit relocations, LO_SUM is valid, as
- are small data symbols. */
- else if (TARGET_EXPLICIT_RELOCS)
+ /* If we're managing explicit relocations, LO_SUM is valid, as are small
+ data symbols. Avoid explicit relocations of modes larger than word
+ mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
+ else if (TARGET_EXPLICIT_RELOCS
+ && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
{
if (small_symbolic_operand (x, Pmode))
return true;
/* Try machine-dependent ways of modifying an illegitimate address
to be legitimate. If we find one, return the new, valid address. */
-rtx
-alpha_legitimize_address (rtx x, rtx scratch,
- enum machine_mode mode ATTRIBUTE_UNUSED)
+static rtx
+alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
{
HOST_WIDE_INT addend;
valid offset, compute the high part of the constant and add it to
the register. Then our address is (plus temp low-part-const). */
if (GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 0)) == REG
- && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && REG_P (XEXP (x, 0))
+ && CONST_INT_P (XEXP (x, 1))
&& ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
{
addend = INTVAL (XEXP (x, 1));
if (can_create_pseudo_p ()
&& GET_CODE (x) == CONST
&& GET_CODE (XEXP (x, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
{
addend = INTVAL (XEXP (XEXP (x, 0), 1));
x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
our address. */
if (can_create_pseudo_p ()
&& GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 0)) == REG
+ && REG_P (XEXP (x, 0))
&& GET_CODE (XEXP (x, 1)) == CONST
&& GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
- && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
{
addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
goto split_addend;
}
- /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
- if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
+ /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
+ Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
+ around +/- 32k offset. */
+ if (TARGET_EXPLICIT_RELOCS
+ && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
+ && symbolic_operand (x, Pmode))
{
rtx r0, r16, eqv, tga, tp, insn, dest, seq;
emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
insn = gen_call_value_osf_tlsgd (r0, tga, seq);
insn = emit_call_insn (insn);
- CONST_OR_PURE_CALL_P (insn) = 1;
+ RTL_CONST_CALL_P (insn) = 1;
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
insn = get_insns ();
emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
insn = gen_call_value_osf_tlsldm (r0, tga, seq);
insn = emit_call_insn (insn);
- CONST_OR_PURE_CALL_P (insn) = 1;
+ RTL_CONST_CALL_P (insn) = 1;
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
insn = get_insns ();
}
}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. Return X or the new, valid address. */
+
+static rtx
+alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
+ enum machine_mode mode)
+{
+ rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
+ return new_x ? new_x : x;
+}
+
/* Primarily this is required for TLS symbols, but given that our move
patterns *ought* to be able to handle any symbol at any time, we
should never be spilling symbolic operands to the constant pool, ever. */
/* We must recognize output that we have already generated ourselves. */
if (GET_CODE (x) == PLUS
&& GET_CODE (XEXP (x, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ && REG_P (XEXP (XEXP (x, 0), 0))
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1))
+ && CONST_INT_P (XEXP (x, 1)))
{
push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
- opnum, type);
+ opnum, (enum reload_type) type);
return x;
}
splitting the addend across an ldah and the mem insn. This
cuts number of extra insns needed from 3 to 1. */
if (GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 0)) == REG
+ && REG_P (XEXP (x, 0))
&& REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
&& REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
&& GET_CODE (XEXP (x, 1)) == CONST_INT)
push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
- opnum, type);
+ opnum, (enum reload_type) type);
return x;
}
scanned. In either case, *TOTAL contains the cost result. */
static bool
-alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
+alpha_rtx_costs (rtx x, int code, int outer_code, int *total,
+ bool speed)
{
enum machine_mode mode = GET_MODE (x);
bool float_mode_p = FLOAT_MODE_P (mode);
const struct alpha_rtx_cost_data *cost_data;
- if (optimize_size)
+ if (!speed)
cost_data = &alpha_rtx_cost_size;
else
cost_data = &alpha_rtx_cost_data[alpha_tune];
*total = COSTS_N_INSNS (15);
else
/* Otherwise we do a load from the GOT. */
- *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
+ *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
return true;
case HIGH:
else if (GET_CODE (XEXP (x, 0)) == MULT
&& const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
{
- *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
- + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
+ *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
+ (enum rtx_code) outer_code, speed)
+ + rtx_cost (XEXP (x, 1),
+ (enum rtx_code) outer_code, speed)
+ + COSTS_N_INSNS (1));
return true;
}
return false;
return false;
case ASHIFT:
- if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ if (CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) <= 3)
{
*total = COSTS_N_INSNS (1);
return false;
case MEM:
- *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
+ *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
return true;
case NEG:
return false;
case FLOAT_EXTEND:
- if (GET_CODE (XEXP (x, 0)) == MEM)
+ if (MEM_P (XEXP (x, 0)))
*total = 0;
else
*total = cost_data->fp_add;
rtx base;
HOST_WIDE_INT disp, offset;
- gcc_assert (GET_CODE (ref) == MEM);
+ gcc_assert (MEM_P (ref));
if (reload_in_progress
&& ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
rtx base;
HOST_WIDE_INT offset = 0;
- gcc_assert (GET_CODE (ref) == MEM);
+ gcc_assert (MEM_P (ref));
if (reload_in_progress
&& ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
/* On the Alpha, all (non-symbolic) constants except zero go into
a floating-point register via memory. Note that we cannot
- return anything that is not a subset of CLASS, and that some
+ return anything that is not a subset of RCLASS, and that some
symbolic constants cannot be dropped to memory. */
enum reg_class
-alpha_preferred_reload_class(rtx x, enum reg_class class)
+alpha_preferred_reload_class(rtx x, enum reg_class rclass)
{
/* Zero is present in any register class. */
if (x == CONST0_RTX (GET_MODE (x)))
- return class;
+ return rclass;
/* These sorts of constants we can easily drop to memory. */
- if (GET_CODE (x) == CONST_INT
+ if (CONST_INT_P (x)
|| GET_CODE (x) == CONST_DOUBLE
|| GET_CODE (x) == CONST_VECTOR)
{
- if (class == FLOAT_REGS)
+ if (rclass == FLOAT_REGS)
return NO_REGS;
- if (class == ALL_REGS)
+ if (rclass == ALL_REGS)
return GENERAL_REGS;
- return class;
+ return rclass;
}
/* All other kinds of constants should not (and in the case of HIGH
cannot) be dropped to memory -- instead we use a GENERAL_REGS
secondary reload. */
if (CONSTANT_P (x))
- return (class == ALL_REGS ? GENERAL_REGS : class);
+ return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
- return class;
+ return rclass;
}
/* Inform reload about cases where moving X with a mode MODE to a register in
- CLASS requires an extra scratch or immediate register. Return the class
+ RCLASS requires an extra scratch or immediate register. Return the class
needed for the immediate register. */
static enum reg_class
-alpha_secondary_reload (bool in_p, rtx x, enum reg_class class,
+alpha_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
enum machine_mode mode, secondary_reload_info *sri)
{
/* Loading and storing HImode or QImode values to and from memory
/* We also cannot do integral arithmetic into FP regs, as might result
from register elimination into a DImode fp register. */
- if (class == FLOAT_REGS)
+ if (rclass == FLOAT_REGS)
{
if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
return GENERAL_REGS;
{
rtx x = *xp, orig = (rtx) data;
- if (GET_CODE (x) != MEM)
+ if (!MEM_P (x))
return 0;
MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
return -1;
}
-/* Given INSN, which is an INSN list or the PATTERN of a single insn
- generated to perform a memory operation, look for any MEMs in either
+/* Given SEQ, which is an INSN list, look for any MEMs in either
a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
volatile flags from REF into each of the MEMs found. If REF is not
a MEM, don't do anything. */
void
-alpha_set_memflags (rtx insn, rtx ref)
+alpha_set_memflags (rtx seq, rtx ref)
{
- rtx *base_ptr;
+ rtx insn;
- if (GET_CODE (ref) != MEM)
+ if (!MEM_P (ref))
return;
/* This is only called from alpha.md, after having had something
&& !MEM_READONLY_P (ref))
return;
- if (INSN_P (insn))
- base_ptr = &PATTERN (insn);
- else
- base_ptr = &insn;
- for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
+ for (insn = seq; insn; insn = NEXT_INSN (insn))
+ if (INSN_P (insn))
+ for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
+ else
+ gcc_unreachable ();
}
\f
static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
HOST_WIDE_INT c, int n, bool no_output)
{
- HOST_WIDE_INT new;
+ HOST_WIDE_INT new_const;
int i, bits;
/* Use a pseudo if highly optimizing and still generating RTL. */
rtx subtarget
/* First, see if minus some low bits, we've an easy load of
high bits. */
- new = ((c & 0xffff) ^ 0x8000) - 0x8000;
- if (new != 0)
+ new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
+ if (new_const != 0)
{
- temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
+ temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
if (temp)
{
if (no_output)
return temp;
- return expand_binop (mode, add_optab, temp, GEN_INT (new),
+ return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
target, 0, OPTAB_WIDEN);
}
}
if (bits > 0)
for (; bits > 0; bits--)
{
- new = c >> bits;
- temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
+ new_const = c >> bits;
+ temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
if (!temp && c < 0)
{
- new = (unsigned HOST_WIDE_INT)c >> bits;
- temp = alpha_emit_set_const (subtarget, mode, new,
+ new_const = (unsigned HOST_WIDE_INT)c >> bits;
+ temp = alpha_emit_set_const (subtarget, mode, new_const,
i, no_output);
}
if (temp)
if (bits > 0)
for (; bits > 0; bits--)
{
- new = c << bits;
- temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
+ new_const = c << bits;
+ temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
if (!temp)
{
- new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
- temp = alpha_emit_set_const (subtarget, mode, new,
+ new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
+ temp = alpha_emit_set_const (subtarget, mode, new_const,
i, no_output);
}
if (temp)
if (bits > 0)
for (; bits > 0; bits--)
{
- new = c << bits;
- temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
+ new_const = c << bits;
+ temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
if (!temp)
{
- new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
- temp = alpha_emit_set_const (subtarget, mode, new,
+ new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
+ temp = alpha_emit_set_const (subtarget, mode, new_const,
i, no_output);
}
if (temp)
constant except that all bytes that are 0 are changed to be 0xff. If we
can, then we can do a ZAPNOT to obtain the desired constant. */
- new = c;
+ new_const = c;
for (i = 0; i < 64; i += 8)
- if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
- new |= (HOST_WIDE_INT) 0xff << i;
+ if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
+ new_const |= (HOST_WIDE_INT) 0xff << i;
/* We are only called for SImode and DImode. If this is SImode, ensure that
we are sign extended to a full word. */
if (mode == SImode)
- new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
- if (new != c)
+ if (new_const != c)
{
- temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
+ temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
if (temp)
{
if (no_output)
return temp;
- return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
+ return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
target, 0, OPTAB_WIDEN);
}
}
/* If we can't make any pseudos, TARGET is an SImode hard register, we
can't load this constant in one insn, do this in DImode. */
if (!can_create_pseudo_p () && mode == SImode
- && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
+ && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
{
result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
if (result)
x = simplify_subreg (DImode, x, GET_MODE (x), 0);
- if (GET_CODE (x) == CONST_INT)
+ if (CONST_INT_P (x))
{
i0 = INTVAL (x);
i1 = -(i0 < 0);
switch (GET_CODE (x))
{
- case CONST:
case LABEL_REF:
case HIGH:
return true;
+ case CONST:
+ if (GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
+ x = XEXP (XEXP (x, 0), 0);
+ else
+ return true;
+
+ if (GET_CODE (x) != SYMBOL_REF)
+ return true;
+
+ /* FALLTHRU */
+
case SYMBOL_REF:
/* TLS symbols are never valid. */
return SYMBOL_REF_TLS_MODEL (x) == 0;
bool
alpha_expand_mov (enum machine_mode mode, rtx *operands)
{
+ rtx tmp;
+
/* If the output is not a register, the input must be. */
- if (GET_CODE (operands[0]) == MEM
+ if (MEM_P (operands[0])
&& ! reg_or_0_operand (operands[1], mode))
operands[1] = force_reg (mode, operands[1]);
/* Allow legitimize_address to perform some simplifications. */
if (mode == Pmode && symbolic_operand (operands[1], mode))
{
- rtx tmp;
-
- tmp = alpha_legitimize_address (operands[1], operands[0], mode);
+ tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
if (tmp)
{
if (tmp == operands[0])
return false;
/* Split large integers. */
- if (GET_CODE (operands[1]) == CONST_INT
+ if (CONST_INT_P (operands[1])
|| GET_CODE (operands[1]) == CONST_DOUBLE
|| GET_CODE (operands[1]) == CONST_VECTOR)
{
}
/* Otherwise we've nothing left but to drop the thing to memory. */
- operands[1] = force_const_mem (mode, operands[1]);
+ tmp = force_const_mem (mode, operands[1]);
+
+ if (tmp == NULL_RTX)
+ return false;
+
if (reload_in_progress)
{
- emit_move_insn (operands[0], XEXP (operands[1], 0));
- operands[1] = replace_equiv_address (operands[1], operands[0]);
+ emit_move_insn (operands[0], XEXP (tmp, 0));
+ operands[1] = replace_equiv_address (tmp, operands[0]);
}
else
- operands[1] = validize_mem (operands[1]);
+ operands[1] = validize_mem (tmp);
return false;
}
get_aligned_mem (operands[1], &aligned_mem, &bitnum);
subtarget = operands[0];
- if (GET_CODE (subtarget) == REG)
+ if (REG_P (subtarget))
subtarget = gen_lowpart (DImode, subtarget), copyout = false;
else
subtarget = gen_reg_rtx (DImode), copyout = true;
temp2 = gen_reg_rtx (DImode);
subtarget = operands[0];
- if (GET_CODE (subtarget) == REG)
+ if (REG_P (subtarget))
subtarget = gen_lowpart (DImode, subtarget), copyout = false;
else
subtarget = gen_reg_rtx (DImode), copyout = true;
/* Generate the comparison for a conditional branch. */
-rtx
-alpha_emit_conditional_branch (enum rtx_code code)
+void
+alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
{
enum rtx_code cmp_code, branch_code;
- enum machine_mode cmp_mode, branch_mode = VOIDmode;
- rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
+ enum machine_mode branch_mode = VOIDmode;
+ enum rtx_code code = GET_CODE (operands[0]);
+ rtx op0 = operands[1], op1 = operands[2];
rtx tem;
- if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
+ if (cmp_mode == TFmode)
{
op0 = alpha_emit_xfloating_compare (&code, op0, op1);
op1 = const0_rtx;
- alpha_compare.fp_p = 0;
+ cmp_mode = DImode;
}
/* The general case: fold the comparison code to the types of compares
case GE: case GT: case GEU: case GTU:
/* For FP, we swap them, for INT, we reverse them. */
- if (alpha_compare.fp_p)
+ if (cmp_mode == DFmode)
{
cmp_code = swap_condition (code);
branch_code = NE;
gcc_unreachable ();
}
- if (alpha_compare.fp_p)
+ if (cmp_mode == DFmode)
{
- cmp_mode = DFmode;
- if (flag_unsafe_math_optimizations)
+ if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
{
/* When we are not as concerned about non-finite values, and we
are comparing against zero, we can branch directly. */
}
else
{
- cmp_mode = DImode;
-
/* The following optimizations are only for signed compares. */
if (code != LEU && code != LTU && code != GEU && code != GTU)
{
/* ??? Don't do this when comparing against symbols, otherwise
we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
be declared false out of hand (at least for non-weak). */
- else if (GET_CODE (op1) == CONST_INT
+ else if (CONST_INT_P (op1)
&& (code == EQ || code == NE)
&& !(symbolic_operand (op0, VOIDmode)
- || (GET_CODE (op0) == REG && REG_POINTER (op0))))
+ || (REG_P (op0) && REG_POINTER (op0))))
{
rtx n_op1 = GEN_INT (-INTVAL (op1));
emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
}
- /* Zero the operands. */
- memset (&alpha_compare, 0, sizeof (alpha_compare));
-
- /* Return the branch comparison. */
- return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
+ /* Emit the branch instruction. */
+ tem = gen_rtx_SET (VOIDmode, pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
+ gen_rtx_fmt_ee (branch_code,
+ branch_mode, tem,
+ CONST0_RTX (cmp_mode)),
+ gen_rtx_LABEL_REF (VOIDmode,
+ operands[3]),
+ pc_rtx));
+ emit_jump_insn (tem);
}
/* Certain simplifications can be done to make invalid setcc operations
valid. Return the final comparison, or NULL if we can't work. */
-rtx
-alpha_emit_setcc (enum rtx_code code)
+bool
+alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
{
enum rtx_code cmp_code;
- rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
- int fp_p = alpha_compare.fp_p;
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx op0 = operands[2], op1 = operands[3];
rtx tmp;
- /* Zero the operands. */
- memset (&alpha_compare, 0, sizeof (alpha_compare));
-
- if (fp_p && GET_MODE (op0) == TFmode)
+ if (cmp_mode == TFmode)
{
op0 = alpha_emit_xfloating_compare (&code, op0, op1);
op1 = const0_rtx;
- fp_p = 0;
+ cmp_mode = DImode;
}
- if (fp_p && !TARGET_FIX)
- return NULL_RTX;
+ if (cmp_mode == DFmode && !TARGET_FIX)
+ return 0;
/* The general case: fold the comparison code to the types of compares
that we have, choosing the branch as necessary. */
case EQ: case LE: case LT: case LEU: case LTU:
case UNORDERED:
/* We have these compares. */
- if (fp_p)
+ if (cmp_mode == DFmode)
cmp_code = code, code = NE;
break;
case NE:
- if (!fp_p && op1 == const0_rtx)
+ if (cmp_mode == DImode && op1 == const0_rtx)
break;
/* FALLTHRU */
case GE: case GT: case GEU: case GTU:
/* These normally need swapping, but for integer zero we have
special patterns that recognize swapped operands. */
- if (!fp_p && op1 == const0_rtx)
+ if (cmp_mode == DImode && op1 == const0_rtx)
break;
code = swap_condition (code);
- if (fp_p)
+ if (cmp_mode == DFmode)
cmp_code = code, code = NE;
tmp = op0, op0 = op1, op1 = tmp;
break;
gcc_unreachable ();
}
- if (!fp_p)
+ if (cmp_mode == DImode)
{
if (!register_operand (op0, DImode))
op0 = force_reg (DImode, op0);
/* Emit an initial compare instruction, if necessary. */
if (cmp_code != UNKNOWN)
{
- enum machine_mode mode = fp_p ? DFmode : DImode;
-
- tmp = gen_reg_rtx (mode);
+ tmp = gen_reg_rtx (cmp_mode);
emit_insn (gen_rtx_SET (VOIDmode, tmp,
- gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
+ gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
- op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
+ op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
op1 = const0_rtx;
}
- /* Return the setcc comparison. */
- return gen_rtx_fmt_ee (code, DImode, op0, op1);
+ /* Emit the setcc instruction. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_fmt_ee (code, DImode, op0, op1)));
+ return true;
}
{
enum rtx_code code = GET_CODE (cmp);
enum rtx_code cmov_code = NE;
- rtx op0 = alpha_compare.op0;
- rtx op1 = alpha_compare.op1;
- int fp_p = alpha_compare.fp_p;
+ rtx op0 = XEXP (cmp, 0);
+ rtx op1 = XEXP (cmp, 1);
enum machine_mode cmp_mode
= (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
- enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
enum machine_mode cmov_mode = VOIDmode;
int local_fast_math = flag_unsafe_math_optimizations;
rtx tem;
- /* Zero the operands. */
- memset (&alpha_compare, 0, sizeof (alpha_compare));
+ gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
- if (fp_p != FLOAT_MODE_P (mode))
+ if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
{
enum rtx_code cmp_code;
case GE: case GT: case GEU: case GTU:
/* These normally need swapping, but for integer zero we have
special patterns that recognize swapped operands. */
- if (!fp_p && op1 == const0_rtx)
+ if (cmp_mode == DImode && op1 == const0_rtx)
cmp_code = code, code = NE;
else
{
gcc_unreachable ();
}
- tem = gen_reg_rtx (cmp_op_mode);
+ tem = gen_reg_rtx (cmp_mode);
emit_insn (gen_rtx_SET (VOIDmode, tem,
- gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
+ gen_rtx_fmt_ee (cmp_code, cmp_mode,
op0, op1)));
- cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
- op0 = gen_lowpart (cmp_op_mode, tem);
- op1 = CONST0_RTX (cmp_op_mode);
- fp_p = !fp_p;
+ cmp_mode = cmp_mode == DImode ? DFmode : DImode;
+ op0 = gen_lowpart (cmp_mode, tem);
+ op1 = CONST0_RTX (cmp_mode);
local_fast_math = 1;
}
/* We may be able to use a conditional move directly.
This avoids emitting spurious compares. */
if (signed_comparison_operator (cmp, VOIDmode)
- && (!fp_p || local_fast_math)
+ && (cmp_mode == DImode || local_fast_math)
&& (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
gcc_unreachable ();
}
- if (!fp_p)
+ if (cmp_mode == DImode)
{
if (!reg_or_0_operand (op0, DImode))
op0 = force_reg (DImode, op0);
/* ??? We mark the branch mode to be CCmode to prevent the compare
and cmov from being combined, since the compare insn follows IEEE
rules that the cmov does not. */
- if (fp_p && !local_fast_math)
+ if (cmp_mode == DFmode && !local_fast_math)
cmov_mode = CCmode;
- tem = gen_reg_rtx (cmp_op_mode);
- emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
- return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
+ tem = gen_reg_rtx (cmp_mode);
+ emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
+ return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
}
/* Simplify a conditional move of two constants into a setcc with
/* Look up the function X_floating library function name for the
given operation. */
-struct xfloating_op GTY(())
+struct GTY(()) xfloating_op
{
const enum rtx_code code;
const char *const GTY((skip)) osf_func;
break;
case VOIDmode:
- gcc_assert (GET_CODE (operands[i]) == CONST_INT);
+ gcc_assert (CONST_INT_P (operands[i]));
/* FALLTHRU */
case DImode:
reg = gen_rtx_REG (DImode, regno);
tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
const0_rtx, const0_rtx));
CALL_INSN_FUNCTION_USAGE (tmp) = usage;
- CONST_OR_PURE_CALL_P (tmp) = 1;
+ RTL_CONST_CALL_P (tmp) = 1;
tmp = get_insns ();
end_sequence ();
emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
break;
case 8:
- emit_insn (gen_insql_le (insl, src, addr));
+ emit_insn (gen_insql_le (insl, gen_lowpart (DImode, src), addr));
break;
}
}
/* Look for additional alignment information from recorded register info. */
tmp = XEXP (orig_src, 0);
- if (GET_CODE (tmp) == REG)
+ if (REG_P (tmp))
src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
else if (GET_CODE (tmp) == PLUS
- && GET_CODE (XEXP (tmp, 0)) == REG
- && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
+ && REG_P (XEXP (tmp, 0))
+ && CONST_INT_P (XEXP (tmp, 1)))
{
unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
}
tmp = XEXP (orig_dst, 0);
- if (GET_CODE (tmp) == REG)
+ if (REG_P (tmp))
dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
else if (GET_CODE (tmp) == PLUS
- && GET_CODE (XEXP (tmp, 0)) == REG
- && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
+ && REG_P (XEXP (tmp, 0))
+ && CONST_INT_P (XEXP (tmp, 1)))
{
unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
/* Look for stricter alignment. */
tmp = XEXP (orig_dst, 0);
- if (GET_CODE (tmp) == REG)
+ if (REG_P (tmp))
align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
else if (GET_CODE (tmp) == PLUS
- && GET_CODE (XEXP (tmp, 0)) == REG
- && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
+ && REG_P (XEXP (tmp, 0))
+ && CONST_INT_P (XEXP (tmp, 1)))
{
HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
- REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
+ add_reg_note (x, REG_BR_PROB, very_unlikely);
}
/* A subroutine of the atomic operation splitters. Emit a load-locked
emit_load_locked (mode, before, mem);
if (code == NOT)
- x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
+ {
+ x = gen_rtx_AND (mode, before, val);
+ emit_insn (gen_rtx_SET (VOIDmode, val, x));
+
+ x = gen_rtx_NOT (mode, val);
+ }
else
x = gen_rtx_fmt_ee (code, mode, before, val);
if (after)
enum machine_mode mode = GET_MODE (mem);
rtx label, x, cond = gen_lowpart (DImode, scratch);
- emit_insn (gen_memory_barrier ());
-
label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
emit_label (XEXP (label, 0));
x = gen_rtx_EQ (DImode, cond, const0_rtx);
emit_unlikely_jump (x, label);
+
+ emit_insn (gen_memory_barrier ());
}
void
mem = gen_rtx_MEM (DImode, align);
MEM_VOLATILE_P (mem) = 1;
- emit_insn (gen_memory_barrier ());
label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
emit_label (XEXP (label, 0));
x = gen_rtx_EQ (DImode, scratch, const0_rtx);
emit_unlikely_jump (x, label);
+
+ emit_insn (gen_memory_barrier ());
}
\f
/* Adjust the cost of a scheduling dependency. Return the new cost of
\f
/* Machine-specific function data. */
-struct machine_function GTY(())
+struct GTY(()) machine_function
{
/* For unicosmk. */
/* List of call information words for calls from this function. */
/* For TARGET_LD_BUGGY_LDGP. */
struct rtx_def *gp_save_rtx;
+
+ /* For VMS condition handlers. */
+ bool uses_condition_handler;
};
/* How to allocate a 'struct machine_function'. */
ggc_alloc_cleared (sizeof (struct machine_function)));
}
+/* Support for frame based VMS condition handlers. */
+
+/* A VMS condition handler may be established for a function with a call to
+ __builtin_establish_vms_condition_handler, and cancelled with a call to
+ __builtin_revert_vms_condition_handler.
+
+ The VMS Condition Handling Facility knows about the existence of a handler
+ from the procedure descriptor .handler field. As the VMS native compilers,
+ we store the user specified handler's address at a fixed location in the
+ stack frame and point the procedure descriptor at a common wrapper which
+ fetches the real handler's address and issues an indirect call.
+
+ The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
+
+ We force the procedure kind to PT_STACK, and the fixed frame location is
+ fp+8, just before the register save area. We use the handler_data field in
+ the procedure descriptor to state the fp offset at which the installed
+ handler address can be found. */
+
+#define VMS_COND_HANDLER_FP_OFFSET 8
+
+/* Expand code to store the currently installed user VMS condition handler
+ into TARGET and install HANDLER as the new condition handler. */
+
+void
+alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
+{
+ rtx handler_slot_address
+ = plus_constant (hard_frame_pointer_rtx, VMS_COND_HANDLER_FP_OFFSET);
+
+ rtx handler_slot
+ = gen_rtx_MEM (DImode, handler_slot_address);
+
+ emit_move_insn (target, handler_slot);
+ emit_move_insn (handler_slot, handler);
+
+ /* Notify the start/prologue/epilogue emitters that the condition handler
+ slot is needed. In addition to reserving the slot space, this will force
+ the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
+ use above is correct. */
+ cfun->machine->uses_condition_handler = true;
+}
+
+/* Expand code to store the current VMS condition handler into TARGET and
+ nullify it. */
+
+void
+alpha_expand_builtin_revert_vms_condition_handler (rtx target)
+{
+ /* We implement this by establishing a null condition handler, with the tiny
+ side effect of setting uses_condition_handler. This is a little bit
+ pessimistic if no actual builtin_establish call is ever issued, which is
+ not a real problem and expected never to happen anyway. */
+
+ alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
+}
+
/* Functions to save and restore alpha_return_addr_rtx. */
/* Start the ball rolling with RETURN_ADDR_RTX. */
x = XVECEXP (x, 0, 0);
lituse = "lituse_tlsldm";
}
- else if (GET_CODE (x) == CONST_INT)
+ else if (CONST_INT_P (x))
lituse = "lituse_jsr";
else
{
break;
case 'r':
/* If this operand is the constant zero, write it as "$31". */
- if (GET_CODE (x) == REG)
+ if (REG_P (x))
fprintf (file, "%s", reg_names[REGNO (x)]);
else if (x == CONST0_RTX (GET_MODE (x)))
fprintf (file, "$31");
case 'R':
/* Similar, but for floating-point. */
- if (GET_CODE (x) == REG)
+ if (REG_P (x))
fprintf (file, "%s", reg_names[REGNO (x)]);
else if (x == CONST0_RTX (GET_MODE (x)))
fprintf (file, "$f31");
case 'N':
/* Write the 1's complement of a constant. */
- if (GET_CODE (x) != CONST_INT)
+ if (!CONST_INT_P (x))
output_operand_lossage ("invalid %%N value");
fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
case 'P':
/* Write 1 << C, for a constant C. */
- if (GET_CODE (x) != CONST_INT)
+ if (!CONST_INT_P (x))
output_operand_lossage ("invalid %%P value");
fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
case 'h':
/* Write the high-order 16 bits of a constant, sign-extended. */
- if (GET_CODE (x) != CONST_INT)
+ if (!CONST_INT_P (x))
output_operand_lossage ("invalid %%h value");
fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
case 'L':
/* Write the low-order 16 bits of a constant, sign-extended. */
- if (GET_CODE (x) != CONST_INT)
+ if (!CONST_INT_P (x))
output_operand_lossage ("invalid %%L value");
fprintf (file, HOST_WIDE_INT_PRINT_DEC,
fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
}
- else if (GET_CODE (x) == CONST_INT)
+ else if (CONST_INT_P (x))
{
HOST_WIDE_INT mask = 0, value = INTVAL (x);
case 'M':
/* 'b', 'w', 'l', or 'q' as the value of the constant. */
- if (GET_CODE (x) != CONST_INT
+ if (!CONST_INT_P (x)
|| (INTVAL (x) != 8 && INTVAL (x) != 16
&& INTVAL (x) != 32 && INTVAL (x) != 64))
output_operand_lossage ("invalid %%M value");
case 'U':
/* Similar, except do it from the mask. */
- if (GET_CODE (x) == CONST_INT)
+ if (CONST_INT_P (x))
{
HOST_WIDE_INT value = INTVAL (x);
/* Write the constant value divided by 8 for little-endian mode or
(56 - value) / 8 for big-endian mode. */
- if (GET_CODE (x) != CONST_INT
+ if (!CONST_INT_P (x)
|| (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
? 56
: 64)
case 'S':
/* Same, except compute (64 - c) / 8 */
- if (GET_CODE (x) != CONST_INT
+ if (!CONST_INT_P (x)
&& (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
&& (INTVAL (x) & 7) != 8)
output_operand_lossage ("invalid %%s value");
case 'A':
/* Write "_u" for unaligned access. */
- if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
+ if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
fprintf (file, "_u");
break;
case 0:
- if (GET_CODE (x) == REG)
+ if (REG_P (x))
fprintf (file, "%s", reg_names[REGNO (x)]);
- else if (GET_CODE (x) == MEM)
+ else if (MEM_P (x))
output_address (XEXP (x, 0));
else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
{
addr = XEXP (addr, 0);
if (GET_CODE (addr) == PLUS
- && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (addr, 1)))
{
offset = INTVAL (XEXP (addr, 1));
addr = XEXP (addr, 0);
cxt = convert_memory_address (mode, cxt);
#endif
+ if (TARGET_ABI_OPEN_VMS)
+ {
+ rtx temp1, traddr;
+ const char *fnname;
+ char *trname;
+
+ /* Construct the name of the trampoline entry point. */
+ fnname = XSTR (fnaddr, 0);
+ trname = (char *) alloca (strlen (fnname) + 5);
+ strcpy (trname, fnname);
+ strcat (trname, "..tr");
+ traddr = gen_rtx_SYMBOL_REF
+ (mode, ggc_alloc_string (trname, strlen (trname) + 1));
+
+ /* Trampoline (or "bounded") procedure descriptor is constructed from
+ the function's procedure descriptor with certain fields zeroed IAW
+ the VMS calling standard. This is stored in the first quadword. */
+ temp1 = force_reg (DImode, gen_rtx_MEM (DImode, fnaddr));
+ temp1 = expand_and (DImode, temp1,
+ GEN_INT (0xffff0fff0000fff0), NULL_RTX);
+ addr = memory_address (mode, plus_constant (tramp, 0));
+ emit_move_insn (gen_rtx_MEM (DImode, addr), temp1);
+
+ /* Trampoline transfer address is stored in the second quadword
+ of the trampoline. */
+ addr = memory_address (mode, plus_constant (tramp, 8));
+ emit_move_insn (gen_rtx_MEM (mode, addr), traddr);
+ }
+
/* Store function address and CXT. */
addr = memory_address (mode, plus_constant (tramp, fnofs));
emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
#ifdef ENABLE_EXECUTE_STACK
emit_library_call (init_one_libfunc ("__enable_execute_stack"),
- 0, VOIDmode, 1, tramp, Pmode);
+ LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
#endif
if (jmpofs >= 0)
enum machine_mode mode)
{
unsigned int regnum, dummy;
- enum mode_class class;
+ enum mode_class mclass;
gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
if (valtype)
mode = TYPE_MODE (valtype);
- class = GET_MODE_CLASS (mode);
- switch (class)
+ mclass = GET_MODE_CLASS (mode);
+ switch (mclass)
{
case MODE_INT:
PROMOTE_MODE (mode, dummy, valtype);
return ptr_type_node;
record = (*lang_hooks.types.make_type) (RECORD_TYPE);
- type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
+ type_decl = build_decl (BUILTINS_LOCATION,
+ TYPE_DECL, get_identifier ("__va_list_tag"), record);
TREE_CHAIN (record) = type_decl;
TYPE_NAME (record) = type_decl;
/* C++? SET_IS_AGGR_TYPE (record, 1); */
/* Dummy field to prevent alignment warnings. */
- space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
+ space = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, NULL_TREE, integer_type_node);
DECL_FIELD_CONTEXT (space) = record;
DECL_ARTIFICIAL (space) = 1;
DECL_IGNORED_P (space) = 1;
- ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
+ ofs = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__offset"),
integer_type_node);
DECL_FIELD_CONTEXT (ofs) = record;
TREE_CHAIN (ofs) = space;
- base = build_decl (FIELD_DECL, get_identifier ("__base"),
+ base = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__base"),
ptr_type_node);
DECL_FIELD_CONTEXT (base) = record;
TREE_CHAIN (base) = ofs;
/* Helper function for alpha_stdarg_optimize_hook. Skip over casts
and constant additions. */
-static tree
+static gimple
va_list_skip_additions (tree lhs)
{
- tree rhs, stmt;
-
- if (TREE_CODE (lhs) != SSA_NAME)
- return lhs;
+ gimple stmt;
for (;;)
{
+ enum tree_code code;
+
stmt = SSA_NAME_DEF_STMT (lhs);
- if (TREE_CODE (stmt) == PHI_NODE)
+ if (gimple_code (stmt) == GIMPLE_PHI)
return stmt;
- if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT
- || GIMPLE_STMT_OPERAND (stmt, 0) != lhs)
- return lhs;
-
- rhs = GIMPLE_STMT_OPERAND (stmt, 1);
- if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
- rhs = TREE_OPERAND (rhs, 0);
+ if (!is_gimple_assign (stmt)
+ || gimple_assign_lhs (stmt) != lhs)
+ return NULL;
- if ((TREE_CODE (rhs) != NOP_EXPR
- && TREE_CODE (rhs) != CONVERT_EXPR
- && ((TREE_CODE (rhs) != PLUS_EXPR
- && TREE_CODE (rhs) != POINTER_PLUS_EXPR)
- || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
- || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
- || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
- return rhs;
+ if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
+ return stmt;
+ code = gimple_assign_rhs_code (stmt);
+ if (!CONVERT_EXPR_CODE_P (code)
+ && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
+ || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
+ || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
+ return stmt;
- lhs = TREE_OPERAND (rhs, 0);
+ lhs = gimple_assign_rhs1 (stmt);
}
}
current statement. */
static bool
-alpha_stdarg_optimize_hook (struct stdarg_info *si, const_tree lhs, const_tree rhs)
+alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
{
- tree base, offset, arg1, arg2;
+ tree base, offset, rhs;
int offset_arg = 1;
+ gimple base_stmt;
+ if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
+ != GIMPLE_SINGLE_RHS)
+ return false;
+
+ rhs = gimple_assign_rhs1 (stmt);
while (handled_component_p (rhs))
rhs = TREE_OPERAND (rhs, 0);
if (TREE_CODE (rhs) != INDIRECT_REF
|| TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
return false;
- lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
- if (lhs == NULL_TREE
- || TREE_CODE (lhs) != POINTER_PLUS_EXPR)
+ stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
+ if (stmt == NULL
+ || !is_gimple_assign (stmt)
+ || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
return false;
- base = TREE_OPERAND (lhs, 0);
+ base = gimple_assign_rhs1 (stmt);
if (TREE_CODE (base) == SSA_NAME)
- base = va_list_skip_additions (base);
+ {
+ base_stmt = va_list_skip_additions (base);
+ if (base_stmt
+ && is_gimple_assign (base_stmt)
+ && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
+ base = gimple_assign_rhs1 (base_stmt);
+ }
if (TREE_CODE (base) != COMPONENT_REF
|| TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
{
- base = TREE_OPERAND (lhs, 0);
+ base = gimple_assign_rhs2 (stmt);
if (TREE_CODE (base) == SSA_NAME)
- base = va_list_skip_additions (base);
+ {
+ base_stmt = va_list_skip_additions (base);
+ if (base_stmt
+ && is_gimple_assign (base_stmt)
+ && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
+ base = gimple_assign_rhs1 (base_stmt);
+ }
if (TREE_CODE (base) != COMPONENT_REF
|| TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
|| !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
return false;
- offset = TREE_OPERAND (lhs, offset_arg);
+ offset = gimple_op (stmt, 1 + offset_arg);
if (TREE_CODE (offset) == SSA_NAME)
- offset = va_list_skip_additions (offset);
-
- if (TREE_CODE (offset) == PHI_NODE)
{
- HOST_WIDE_INT sub;
-
- if (PHI_NUM_ARGS (offset) != 2)
- goto escapes;
+ gimple offset_stmt = va_list_skip_additions (offset);
- arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
- arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
- if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
+ if (offset_stmt
+ && gimple_code (offset_stmt) == GIMPLE_PHI)
{
- tree tem = arg1;
- arg1 = arg2;
- arg2 = tem;
+ HOST_WIDE_INT sub;
+ gimple arg1_stmt, arg2_stmt;
+ tree arg1, arg2;
+ enum tree_code code1, code2;
- if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
+ if (gimple_phi_num_args (offset_stmt) != 2)
goto escapes;
- }
- if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
- goto escapes;
- sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
- if (TREE_CODE (arg2) == MINUS_EXPR)
- sub = -sub;
- if (sub < -48 || sub > -32)
- goto escapes;
+ arg1_stmt
+ = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
+ arg2_stmt
+ = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
+ if (arg1_stmt == NULL
+ || !is_gimple_assign (arg1_stmt)
+ || arg2_stmt == NULL
+ || !is_gimple_assign (arg2_stmt))
+ goto escapes;
+
+ code1 = gimple_assign_rhs_code (arg1_stmt);
+ code2 = gimple_assign_rhs_code (arg2_stmt);
+ if (code1 == COMPONENT_REF
+ && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
+ /* Do nothing. */;
+ else if (code2 == COMPONENT_REF
+ && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
+ {
+ gimple tem = arg1_stmt;
+ code2 = code1;
+ arg1_stmt = arg2_stmt;
+ arg2_stmt = tem;
+ }
+ else
+ goto escapes;
- arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
- if (arg1 != arg2)
- goto escapes;
+ if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
+ goto escapes;
- if (TREE_CODE (arg1) == SSA_NAME)
- arg1 = va_list_skip_additions (arg1);
+ sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
+ if (code2 == MINUS_EXPR)
+ sub = -sub;
+ if (sub < -48 || sub > -32)
+ goto escapes;
- if (TREE_CODE (arg1) != COMPONENT_REF
- || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
- || get_base_address (arg1) != base)
- goto escapes;
+ arg1 = gimple_assign_rhs1 (arg1_stmt);
+ arg2 = gimple_assign_rhs1 (arg2_stmt);
+ if (TREE_CODE (arg2) == SSA_NAME)
+ {
+ arg2_stmt = va_list_skip_additions (arg2);
+ if (arg2_stmt == NULL
+ || !is_gimple_assign (arg2_stmt)
+ || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
+ goto escapes;
+ arg2 = gimple_assign_rhs1 (arg2_stmt);
+ }
+ if (arg1 != arg2)
+ goto escapes;
- /* Need floating point regs. */
- cfun->va_list_fpr_size |= 2;
+ if (TREE_CODE (arg1) != COMPONENT_REF
+ || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
+ || get_base_address (arg1) != base)
+ goto escapes;
+
+ /* Need floating point regs. */
+ cfun->va_list_fpr_size |= 2;
+ return false;
+ }
+ if (offset_stmt
+ && is_gimple_assign (offset_stmt)
+ && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
+ offset = gimple_assign_rhs1 (offset_stmt);
}
- else if (TREE_CODE (offset) != COMPONENT_REF
- || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
- || get_base_address (offset) != base)
+ if (TREE_CODE (offset) != COMPONENT_REF
+ || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
+ || get_base_address (offset) != base)
goto escapes;
else
/* Need general regs. */
{
nextarg = plus_constant (nextarg, offset);
nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (valist), valist,
+ t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist,
make_tree (ptr_type_node, nextarg));
TREE_SIDE_EFFECTS (t) = 1;
t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
size_int (offset));
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (base_field), base_field, t);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset_field),
- offset_field, t);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
}
static tree
-alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
+alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
+ gimple_seq *pre_p)
{
- tree type_size, ptr_type, addend, t, addr, internal_post;
+ tree type_size, ptr_type, addend, t, addr;
+ gimple_seq internal_post;
/* If the type could not be passed in registers, skip the block
reserved for the registers. */
if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
{
t = build_int_cst (TREE_TYPE (offset), 6*8);
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset), offset,
- build2 (MAX_EXPR, TREE_TYPE (offset), offset, t));
- gimplify_and_add (t, pre_p);
+ gimplify_assign (offset,
+ build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
+ pre_p);
}
addend = offset;
fold_convert (sizetype, addend));
internal_post = NULL;
gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
- append_to_statement_list (internal_post, pre_p);
+ gimple_seq_add_seq (pre_p, internal_post);
/* Update the offset field. */
type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
t = size_binop (MULT_EXPR, t, size_int (8));
}
t = fold_convert (TREE_TYPE (offset), t);
- t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset,
- build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t));
- gimplify_and_add (t, pre_p);
+ gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
+ pre_p);
return build_va_arg_indirect_ref (addr);
}
static tree
-alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
+alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
+ gimple_seq *post_p)
{
tree offset_field, base_field, offset, base, t, r;
bool indirect;
r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
/* Stuff the offset temporary back into its field. */
- t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset_field,
- fold_convert (TREE_TYPE (offset_field), offset));
- gimplify_and_add (t, pre_p);
+ gimplify_assign (unshare_expr (offset_field),
+ fold_convert (TREE_TYPE (offset_field), offset), pre_p);
if (indirect)
r = build_va_arg_indirect_ref (r);
ALPHA_BUILTIN_RPCC,
ALPHA_BUILTIN_THREAD_POINTER,
ALPHA_BUILTIN_SET_THREAD_POINTER,
+ ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
+ ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
/* TARGET_MAX */
ALPHA_BUILTIN_MINUB8,
ALPHA_BUILTIN_max
};
-static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
+static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
CODE_FOR_builtin_cmpbge,
CODE_FOR_builtin_extbl,
CODE_FOR_builtin_extwl,
CODE_FOR_builtin_rpcc,
CODE_FOR_load_tp,
CODE_FOR_set_tp,
+ CODE_FOR_builtin_establish_vms_condition_handler,
+ CODE_FOR_builtin_revert_vms_condition_handler,
/* TARGET_MAX */
CODE_FOR_builtin_minub8,
NULL, NULL);
TREE_NOTHROW (decl) = 1;
+ if (TARGET_ABI_OPEN_VMS)
+ {
+ ftype = build_function_type_list (ptr_type_node, ptr_type_node,
+ NULL_TREE);
+ add_builtin_function ("__builtin_establish_vms_condition_handler", ftype,
+ ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
+ BUILT_IN_MD, NULL, NULL_TREE);
+
+ ftype = build_function_type_list (ptr_type_node, void_type_node,
+ NULL_TREE);
+ add_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
+ ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
+ BUILT_IN_MD, NULL, NULL_TREE);
+ }
+
alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
insn_op = &insn_data[icode].operand[arity + nonvoid];
- op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
+ op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
if (!(*insn_op->predicate) (op[arity], insn_op->mode))
op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
tree op0 = fold_convert (vtype, op[0]);
tree op1 = fold_convert (vtype, op[1]);
tree val = fold_build2 (code, vtype, op0, op1);
- return fold_convert (long_integer_type_node, val);
+ return fold_build1 (VIEW_CONVERT_EXPR, long_integer_type_node, val);
}
static tree
/* When outputting a thunk, we don't have valid register life info,
but assemble_start_function wants to output .frame and .mask
directives. */
- if (crtl->is_thunk)
+ if (cfun->is_thunk)
{
*imaskP = 0;
*fmaskP = 0;
if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
vms_save_fp_regno = i;
- if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
+ /* A VMS condition handler requires a stack procedure in our
+ implementation. (not required by the calling standard). */
+ if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
+ || cfun->machine->uses_condition_handler)
vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
else if (alpha_procedure_type == PT_NULL)
vms_base_regno = REG_PV;
vms_unwind_regno = (vms_base_regno == REG_PV
? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
- /* If this is a stack procedure, allow space for saving FP and RA. */
+ /* If this is a stack procedure, allow space for saving FP, RA and
+ a condition handler slot if needed. */
if (alpha_procedure_type == PT_STACK)
- sa_size += 2;
+ sa_size += 2 + cfun->machine->uses_condition_handler;
}
else
{
#if TARGET_ABI_OPEN_VMS
-const struct attribute_spec vms_attribute_table[] =
+static const struct attribute_spec vms_attribute_table[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
{ "overlaid", 0, 0, true, false, false, NULL },
return 1;
/* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
- if (crtl->is_thunk)
+ if (cfun->is_thunk)
return 1;
/* The nonlocal receiver pattern assumes that the gp is valid for
mem = gen_rtx_MEM (DImode, addr);
}
- REG_NOTES (insn)
- = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
- gen_rtx_SET (VOIDmode, mem, frame_reg),
- REG_NOTES (insn));
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, mem, frame_reg));
}
}
+ crtl->args.pretend_args_size));
if (TARGET_ABI_OPEN_VMS)
- reg_offset = 8;
+ reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
else
reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
possibly intuit through the loop above. So we invent this
note it looks at instead. */
RTX_FRAME_RELATED_P (seq) = 1;
- REG_NOTES (seq)
- = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
- gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- gen_rtx_PLUS (Pmode, stack_pointer_rtx,
- GEN_INT (TARGET_ABI_UNICOSMK
- ? -frame_size + 64
- : -frame_size))),
- REG_NOTES (seq));
+ add_reg_note (seq, REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ GEN_INT (TARGET_ABI_UNICOSMK
+ ? -frame_size + 64
+ : -frame_size))));
}
if (!TARGET_ABI_UNICOSMK)
if (TARGET_ABI_OPEN_VMS)
{
+ /* Register frame procedures save the fp. */
if (alpha_procedure_type == PT_REGISTER)
- /* Register frame procedures save the fp.
- ?? Ought to have a dwarf2 save for this. */
- emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
- hard_frame_pointer_rtx);
+ {
+ rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
+ hard_frame_pointer_rtx);
+ add_reg_note (insn, REG_CFA_REGISTER, NULL);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
/* Offset from base reg to register save area. */
HOST_WIDE_INT reg_offset;
char *entry_label = (char *) alloca (strlen (fnname) + 6);
+ char *tramp_label = (char *) alloca (strlen (fnname) + 6);
int i;
/* Don't emit an extern directive for functions defined in the same file. */
TREE_ASM_WRITTEN (name_tree) = 1;
}
+#if TARGET_ABI_OPEN_VMS
+ if (vms_debug_main
+ && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
+ {
+ targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
+ ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
+ switch_to_section (text_section);
+ vms_debug_main = NULL;
+ }
+#endif
+
alpha_fnname = fnname;
sa_size = alpha_sa_size ();
+ crtl->args.pretend_args_size));
if (TARGET_ABI_OPEN_VMS)
- reg_offset = 8;
+ reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
else
reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
Otherwise, do it here. */
if (TARGET_ABI_OSF
&& ! alpha_function_needs_gp
- && ! crtl->is_thunk)
+ && ! cfun->is_thunk)
{
putc ('$', file);
assemble_name (file, fnname);
fputs ("..ng:\n", file);
}
}
+ /* Nested functions on VMS that are potentially called via trampoline
+ get a special transfer entry point that loads the called functions
+ procedure descriptor and static chain. */
+ if (TARGET_ABI_OPEN_VMS
+ && !TREE_PUBLIC (decl)
+ && DECL_CONTEXT (decl)
+ && !TYPE_P (DECL_CONTEXT (decl)))
+ {
+ strcpy (tramp_label, fnname);
+ strcat (tramp_label, "..tr");
+ ASM_OUTPUT_LABEL (file, tramp_label);
+ fprintf (file, "\tldq $1,24($27)\n");
+ fprintf (file, "\tldq $27,16($27)\n");
+ }
strcpy (entry_label, fnname);
if (TARGET_ABI_OPEN_VMS)
}
#if TARGET_ABI_OPEN_VMS
+ /* If a user condition handler has been installed at some point, emit
+ the procedure descriptor bits to point the Condition Handling Facility
+ at the indirection wrapper, and state the fp offset at which the user
+ handler may be found. */
+ if (cfun->machine->uses_condition_handler)
+ {
+ fprintf (file, "\t.handler __gcc_shell_handler\n");
+ fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
+ }
+
/* Ifdef'ed cause link_section are only available then. */
switch_to_section (readonly_data_section);
fprintf (file, "\t.align 3\n");
fputs ("\t.prologue 0\n", file);
else if (!flag_inhibit_size_directive)
fprintf (file, "\t.prologue %d\n",
- alpha_function_needs_gp || crtl->is_thunk);
+ alpha_function_needs_gp || cfun->is_thunk);
}
/* Write function epilogue. */
-/* ??? At some point we will want to support full unwind, and so will
- need to mark the epilogue as well. At the moment, we just confuse
- dwarf2out. */
-#undef FRP
-#define FRP(exp) exp
-
void
alpha_expand_epilogue (void)
{
HOST_WIDE_INT reg_offset;
int fp_is_frame_pointer, fp_offset;
rtx sa_reg, sa_reg_exp = NULL;
- rtx sp_adj1, sp_adj2, mem;
+ rtx sp_adj1, sp_adj2, mem, reg, insn;
rtx eh_ofs;
+ rtx cfa_restores = NULL_RTX;
int i;
sa_size = alpha_sa_size ();
if (TARGET_ABI_OPEN_VMS)
{
if (alpha_procedure_type == PT_STACK)
- reg_offset = 8;
+ reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
else
reg_offset = 0;
}
if ((TARGET_ABI_OPEN_VMS
&& vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
|| (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
- FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
+ emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
/* Cope with very large offsets to the register save area. */
if (reg_offset + sa_size > 0x8000)
sa_reg = gen_rtx_REG (DImode, 22);
sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
- FRP (emit_move_insn (sa_reg, sa_reg_exp));
+ emit_move_insn (sa_reg, sa_reg_exp);
}
/* Restore registers in order, excepting a true frame pointer. */
mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
if (! eh_ofs)
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
+ reg = gen_rtx_REG (DImode, REG_RA);
+ emit_move_insn (reg, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
reg_offset += 8;
imask &= ~(1UL << REG_RA);
{
mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
+ reg = gen_rtx_REG (DImode, i);
+ emit_move_insn (reg, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
+ cfa_restores);
}
reg_offset += 8;
}
{
mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
+ reg = gen_rtx_REG (DFmode, i+32);
+ emit_move_insn (reg, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
reg_offset += 8;
}
}
mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
reg_offset));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
+ reg = gen_rtx_REG (DImode, i);
+ emit_move_insn (reg, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
reg_offset -= 8;
}
mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
reg_offset));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
+ reg = gen_rtx_REG (DFmode, i+32);
+ emit_move_insn (reg, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
reg_offset -= 8;
}
/* Restore the return address from the DSIB. */
-
- mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
+ mem = gen_rtx_MEM (DImode, plus_constant (hard_frame_pointer_rtx, -8));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
+ reg = gen_rtx_REG (DImode, REG_RA);
+ emit_move_insn (reg, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
}
if (frame_size || eh_ofs)
else if (TARGET_ABI_UNICOSMK)
{
sp_adj1 = gen_rtx_REG (DImode, 23);
- FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
+ emit_move_insn (sp_adj1, hard_frame_pointer_rtx);
sp_adj2 = const0_rtx;
}
else if (frame_size < 0x40007fffL)
else
{
sp_adj1 = gen_rtx_REG (DImode, 23);
- FRP (emit_move_insn (sp_adj1, sp_adj2));
+ emit_move_insn (sp_adj1, sp_adj2);
}
sp_adj2 = GEN_INT (low);
}
else
{
rtx tmp = gen_rtx_REG (DImode, 23);
- FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
- 3, false));
+ sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
if (!sp_adj2)
{
/* We can't drop new things to memory this late, afaik,
so build it up by pieces. */
- FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
- -(frame_size < 0)));
+ sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
+ -(frame_size < 0));
gcc_assert (sp_adj2);
}
}
mem = gen_rtx_MEM (DImode,
plus_constant (hard_frame_pointer_rtx, -16));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
+ emit_move_insn (hard_frame_pointer_rtx, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
+ hard_frame_pointer_rtx, cfa_restores);
}
else if (fp_is_frame_pointer)
{
emit_insn (gen_blockage ());
mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
+ emit_move_insn (hard_frame_pointer_rtx, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
+ hard_frame_pointer_rtx, cfa_restores);
}
else if (TARGET_ABI_OPEN_VMS)
{
emit_insn (gen_blockage ());
- FRP (emit_move_insn (hard_frame_pointer_rtx,
- gen_rtx_REG (DImode, vms_save_fp_regno)));
+ emit_move_insn (hard_frame_pointer_rtx,
+ gen_rtx_REG (DImode, vms_save_fp_regno));
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
+ hard_frame_pointer_rtx, cfa_restores);
}
/* Restore the stack pointer. */
emit_insn (gen_blockage ());
if (sp_adj2 == const0_rtx)
- FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
+ insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
else
- FRP (emit_move_insn (stack_pointer_rtx,
- gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
+ insn = emit_move_insn (stack_pointer_rtx,
+ gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
+ REG_NOTES (insn) = cfa_restores;
+ add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
}
else
{
+ gcc_assert (cfa_restores == NULL);
+
if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
{
emit_insn (gen_blockage ());
- FRP (emit_move_insn (hard_frame_pointer_rtx,
- gen_rtx_REG (DImode, vms_save_fp_regno)));
+ insn = emit_move_insn (hard_frame_pointer_rtx,
+ gen_rtx_REG (DImode, vms_save_fp_regno));
+ add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
}
else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
{
/* Decrement the frame pointer if the function does not have a
frame. */
-
emit_insn (gen_blockage ());
- FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
- hard_frame_pointer_rtx, constm1_rtx)));
+ emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
+ hard_frame_pointer_rtx, constm1_rtx));
}
}
}
insn = get_last_insn ();
if (!INSN_P (insn))
insn = prev_active_insn (insn);
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
+#if TARGET_ABI_OSF
+ if (cfun->is_thunk)
+ free_after_compilation (cfun);
+#endif
+
#if TARGET_ABI_OPEN_VMS
alpha_write_linkage (file, fnname, decl);
#endif
}
}
+#if TARGET_ABI_OPEN_VMS
+void avms_asm_output_external (FILE *file, tree decl ATTRIBUTE_UNUSED, const char *name)
+{
+#ifdef DO_CRTL_NAMES
+ DO_CRTL_NAMES;
+#endif
+}
+#endif
+
#if TARGET_ABI_OSF
/* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
tree function)
{
HOST_WIDE_INT hi, lo;
- rtx this, insn, funexp;
+ rtx this_rtx, insn, funexp;
+
+ gcc_assert (cfun->is_thunk);
/* We always require a valid GP. */
emit_insn (gen_prologue_ldgp ());
/* Find the "this" pointer. If the function returns a structure,
the structure return pointer is in $16. */
if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
- this = gen_rtx_REG (Pmode, 17);
+ this_rtx = gen_rtx_REG (Pmode, 17);
else
- this = gen_rtx_REG (Pmode, 16);
+ this_rtx = gen_rtx_REG (Pmode, 16);
/* Add DELTA. When possible we use ldah+lda. Otherwise load the
entire constant for the add. */
if (hi + lo == delta)
{
if (hi)
- emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
+ emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
if (lo)
- emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
+ emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
}
else
{
rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
delta, -(delta < 0));
- emit_insn (gen_adddi3 (this, this, tmp));
+ emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
}
/* Add a delta stored in the vtable at VCALL_OFFSET. */
rtx tmp, tmp2;
tmp = gen_rtx_REG (Pmode, 0);
- emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
+ emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
tmp2 = tmp;
emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
- emit_insn (gen_adddi3 (this, this, tmp));
+ emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
}
/* Generate a tail call to the target function. */
final_start_function (insn, file, 1);
final (insn, file, 1);
final_end_function ();
- free_after_compilation (cfun);
}
#endif /* TARGET_ABI_OSF */
\f
for (i = get_insns (); i ; i = NEXT_INSN (i))
{
- if (GET_CODE (i) == NOTE)
+ if (NOTE_P (i))
{
switch (NOTE_KIND (i))
{
{
if (alpha_tp == ALPHA_TP_FUNC)
{
- if (GET_CODE (i) == JUMP_INSN
+ if (JUMP_P (i)
&& GET_CODE (PATTERN (i)) == RETURN)
goto close_shadow;
}
}
if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
- && GET_CODE (i) == INSN
+ && NONJUMP_INSN_P (i)
&& GET_CODE (PATTERN (i)) != USE
&& GET_CODE (PATTERN (i)) != CLOBBER
&& get_attr_trap (i) == TRAP_YES)
len += 4;
/* Haifa doesn't do well scheduling branches. */
- if (GET_CODE (insn) == JUMP_INSN)
+ if (JUMP_P (insn))
goto next_and_done;
next:
/* Haifa doesn't do well scheduling branches. */
/* ??? If this is predicted not-taken, slotting continues, except
that no more IBR, FBR, or JSR insns may be slotted. */
- if (GET_CODE (insn) == JUMP_INSN)
+ if (JUMP_P (insn))
goto next_and_done;
next:
ofs = prev_in_use = 0;
i = get_insns ();
- if (GET_CODE (i) == NOTE)
+ if (NOTE_P (i))
i = next_nonnote_insn (i);
ldgp = alpha_function_needs_gp ? 8 : 0;
next = (*next_group) (i, &in_use, &len);
/* When we see a label, resync alignment etc. */
- if (GET_CODE (i) == CODE_LABEL)
+ if (LABEL_P (i))
{
unsigned int new_align = 1 << label_to_alignment (i);
rtx prev, where;
where = prev = prev_nonnote_insn (i);
- if (!where || GET_CODE (where) != CODE_LABEL)
+ if (!where || !LABEL_P (where))
where = i;
/* Can't realign between a call and its gp reload. */
if (! (TARGET_EXPLICIT_RELOCS
- && prev && GET_CODE (prev) == CALL_INSN))
+ && prev && CALL_P (prev)))
{
emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
align = 1 << new_log_align;
where = prev_nonnote_insn (i);
if (where)
{
- if (GET_CODE (where) == CODE_LABEL)
+ if (LABEL_P (where))
{
rtx where2 = prev_nonnote_insn (where);
- if (where2 && GET_CODE (where2) == JUMP_INSN)
+ if (where2 && JUMP_P (where2))
where = where2;
}
- else if (GET_CODE (where) == INSN)
+ else if (NONJUMP_INSN_P (where))
where = i;
}
else
i = next;
}
}
+
+/* Insert an unop between a noreturn function call and GP load. */
+
+static void
+alpha_pad_noreturn (void)
+{
+ rtx insn, next;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (!CALL_P (insn)
+ || !find_reg_note (insn, REG_NORETURN, NULL_RTX))
+ continue;
+
+ next = next_active_insn (insn);
+
+ if (next)
+ {
+ rtx pat = PATTERN (next);
+
+ if (GET_CODE (pat) == SET
+ && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
+ && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
+ emit_insn_after (gen_unop (), insn);
+ }
+ }
+}
\f
/* Machine dependent reorg pass. */
static void
alpha_reorg (void)
{
+ /* Workaround for a linker error that triggers when an
+ exception handler immediatelly follows a noreturn function.
+
+ The instruction stream from an object file:
+
+ 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
+ 58: 00 00 ba 27 ldah gp,0(ra)
+ 5c: 00 00 bd 23 lda gp,0(gp)
+ 60: 00 00 7d a7 ldq t12,0(gp)
+ 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
+
+ was converted in the final link pass to:
+
+ fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
+ fdb28: 00 00 fe 2f unop
+ fdb2c: 00 00 fe 2f unop
+ fdb30: 30 82 7d a7 ldq t12,-32208(gp)
+ fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
+
+ GP load instructions were wrongly cleared by the linker relaxation
+ pass. This workaround prevents removal of GP loads by inserting
+ an unop instruction between a noreturn function call and
+ exception handler prologue. */
+
+ if (current_function_has_exception_handlers ())
+ alpha_pad_noreturn ();
+
if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
alpha_handle_trap_shadows ();
enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
-struct alpha_links GTY(())
+struct GTY(()) alpha_links
{
int num;
+ const char *target;
rtx linkage;
enum links_kind lkind;
enum reloc_kind rkind;
};
-struct alpha_funcs GTY(())
+struct GTY(()) alpha_funcs
{
int num;
splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
return GEN_INT (regval);
}
\f
-/* Make (or fake) .linkage entry for function call.
-
- IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
-
- Return an SYMBOL_REF rtx for the linkage. */
+/* Register the need for a (fake) .linkage entry for calls to function NAME.
+ IS_LOCAL is 1 if this is for a definition, 0 if this is for a real call.
+ Return a SYMBOL_REF suited to the call instruction. */
rtx
alpha_need_linkage (const char *name, int is_local)
{
splay_tree_node node;
struct alpha_links *al;
+ const char *target;
+ tree id;
if (name[0] == '*')
name++;
/* Assume external if no definition. */
al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
- /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
- get_identifier (name);
+ /* Ensure we have an IDENTIFIER so assemble_name can mark it used
+ and find the ultimate alias target like assemble_name. */
+ id = get_identifier (name);
+ target = NULL;
+ while (IDENTIFIER_TRANSPARENT_ALIAS (id))
+ {
+ id = TREE_CHAIN (id);
+ target = IDENTIFIER_POINTER (id);
+ }
- /* Construct a SYMBOL_REF for us to call. */
- {
- size_t name_len = strlen (name);
- char *linksym = alloca (name_len + 6);
- linksym[0] = '$';
- memcpy (linksym + 1, name, name_len);
- memcpy (linksym + 1 + name_len, "..lk", 5);
- al->linkage = gen_rtx_SYMBOL_REF (Pmode,
- ggc_alloc_string (linksym, name_len + 5));
- }
+ al->target = target ? target : name;
+ al->linkage = gen_rtx_SYMBOL_REF (Pmode, name);
splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
(splay_tree_value) al);
return al->linkage;
}
+/* Return a SYMBOL_REF representing the reference to the .linkage entry
+ of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
+ this is the reference to the linkage pointer value, 0 if this is the
+ reference to the function entry value. RFLAG is 1 if this a reduced
+ reference (code address only), 0 if this is a full reference. */
+
rtx
-alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
+alpha_use_linkage (rtx func, tree cfundecl, int lflag, int rflag)
{
splay_tree_node cfunnode;
struct alpha_funcs *cfaf;
struct alpha_links *al;
- const char *name = XSTR (linkage, 0);
+ const char *name = XSTR (func, 0);
cfaf = (struct alpha_funcs *) 0;
al = (struct alpha_links *) 0;
{
size_t name_len;
size_t buflen;
- char buf [512];
char *linksym;
splay_tree_node node = 0;
struct alpha_links *anl;
name++;
name_len = strlen (name);
+ linksym = (char *) alloca (name_len + 50);
al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
al->num = cfaf->num;
{
anl = (struct alpha_links *) node->value;
al->lkind = anl->lkind;
+ name = anl->target;
}
- sprintf (buf, "$%d..%s..lk", cfaf->num, name);
- buflen = strlen (buf);
- linksym = alloca (buflen + 1);
- memcpy (linksym, buf, buflen + 1);
+ sprintf (linksym, "$%d..%s..lk", cfaf->num, name);
+ buflen = strlen (linksym);
al->linkage = gen_rtx_SYMBOL_REF
(Pmode, ggc_alloc_string (linksym, buflen + 1));
}
rtx
-alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
+alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
tree cfundecl ATTRIBUTE_UNUSED,
int lflag ATTRIBUTE_UNUSED,
int rflag ATTRIBUTE_UNUSED)
int len;
x = DECL_RTL (cfun->decl);
- gcc_assert (GET_CODE (x) == MEM);
+ gcc_assert (MEM_P (x));
x = XEXP (x, 0);
gcc_assert (GET_CODE (x) == SYMBOL_REF);
fnname = XSTR (x, 0);
emit_insn (gen_blockage ());
/* Set the new frame pointer. */
-
FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
stack_pointer_rtx, GEN_INT (64))));
-
}
else
{
/* Increment the frame pointer register to indicate that we do not
have a frame. */
-
- FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
- hard_frame_pointer_rtx, const1_rtx)));
+ emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
+ hard_frame_pointer_rtx, const1_rtx));
}
}
set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
+ abort_libfunc = init_one_libfunc ("decc$abort");
+ memcmp_libfunc = init_one_libfunc ("decc$memcmp");
+#ifdef MEM_LIBFUNCS_INIT
+ MEM_LIBFUNCS_INIT;
+#endif
}
}
/* Default unaligned ops are provided for ELF systems. To get unaligned
data for non-ELF systems, we have to turn off auto alignment. */
-#ifndef OBJECT_FORMAT_ELF
+#if !defined (OBJECT_FORMAT_ELF) || TARGET_ABI_OPEN_VMS
#undef TARGET_ASM_UNALIGNED_HI_OP
#define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
#undef TARGET_ASM_UNALIGNED_SI_OP
#undef TARGET_INIT_LIBFUNCS
#define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
+#undef TARGET_LEGITIMIZE_ADDRESS
+#define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
+
#if TARGET_ABI_UNICOSMK
#undef TARGET_ASM_FILE_START
#define TARGET_ASM_FILE_START unicosmk_file_start
#undef TARGET_RTX_COSTS
#define TARGET_RTX_COSTS alpha_rtx_costs
#undef TARGET_ADDRESS_COST
-#define TARGET_ADDRESS_COST hook_int_rtx_0
+#define TARGET_ADDRESS_COST hook_int_rtx_bool_0
#undef TARGET_MACHINE_DEPENDENT_REORG
#define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
-#undef TARGET_PROMOTE_FUNCTION_ARGS
-#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
-#undef TARGET_PROMOTE_FUNCTION_RETURN
-#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
+#undef TARGET_PROMOTE_FUNCTION_MODE
+#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
#undef TARGET_PROMOTE_PROTOTYPES
#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
#undef TARGET_RETURN_IN_MEMORY
#define TARGET_MANGLE_TYPE alpha_mangle_type
#endif
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
+
struct gcc_target targetm = TARGET_INITIALIZER;
\f