#include "tree-stdarg.h"
#include "tm-constrs.h"
#include "df.h"
+#include "libfuncs.h"
/* Specify which cpu to schedule for. */
enum processor_type alpha_tune;
enum alpha_fp_trap_mode alpha_fptm;
-/* Save information from a "cmpxx" operation until the branch or scc is
- emitted. */
-
-struct alpha_compare alpha_compare;
-
/* Nonzero if inside of a function, because the Alpha asm can't
handle .files inside of functions. */
{ "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
{ "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
{ "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
- { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
- { 0, 0, 0 }
+ { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX }
};
+ int const ct_size = ARRAY_SIZE (cpu_table);
int i;
/* Unicos/Mk doesn't have shared libraries. */
if (alpha_cpu_string)
{
- for (i = 0; cpu_table [i].name; i++)
+ for (i = 0; i < ct_size; i++)
if (! strcmp (alpha_cpu_string, cpu_table [i].name))
{
alpha_tune = alpha_cpu = cpu_table [i].processor;
target_flags |= cpu_table [i].flags;
break;
}
- if (! cpu_table [i].name)
+ if (i == ct_size)
error ("bad value %qs for -mcpu switch", alpha_cpu_string);
}
if (alpha_tune_string)
{
- for (i = 0; cpu_table [i].name; i++)
+ for (i = 0; i < ct_size; i++)
if (! strcmp (alpha_tune_string, cpu_table [i].name))
{
alpha_tune = cpu_table [i].processor;
break;
}
- if (! cpu_table [i].name)
+ if (i == ct_size)
error ("bad value %qs for -mcpu switch", alpha_tune_string);
}
enum tls_model model;
if (GET_CODE (symbol) != SYMBOL_REF)
- return 0;
+ return TLS_MODEL_NONE;
model = SYMBOL_REF_TLS_MODEL (symbol);
/* Local-exec with a 64-bit size is the same code as initial-exec. */
any of those forms can be surrounded with an AND that clear the
low-order three bits; this is an "unaligned" access. */
-bool
-alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
+static bool
+alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
{
/* If this is an ldq_u type address, discard the outer AND. */
if (mode == DImode
/* Try machine-dependent ways of modifying an illegitimate address
to be legitimate. If we find one, return the new, valid address. */
-rtx
-alpha_legitimize_address (rtx x, rtx scratch, enum machine_mode mode)
+static rtx
+alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
{
HOST_WIDE_INT addend;
}
}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. Return X or the new, valid address. */
+
+static rtx
+alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
+ enum machine_mode mode)
+{
+ rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
+ return new_x ? new_x : x;
+}
+
/* Primarily this is required for TLS symbols, but given that our move
patterns *ought* to be able to handle any symbol at any time, we
should never be spilling symbolic operands to the constant pool, ever. */
switch (GET_CODE (x))
{
- case CONST:
case LABEL_REF:
case HIGH:
return true;
+ case CONST:
+ if (GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
+ x = XEXP (XEXP (x, 0), 0);
+ else
+ return true;
+
+ if (GET_CODE (x) != SYMBOL_REF)
+ return true;
+
+ /* FALLTHRU */
+
case SYMBOL_REF:
/* TLS symbols are never valid. */
return SYMBOL_REF_TLS_MODEL (x) == 0;
/* Allow legitimize_address to perform some simplifications. */
if (mode == Pmode && symbolic_operand (operands[1], mode))
{
- tmp = alpha_legitimize_address (operands[1], operands[0], mode);
+ tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
if (tmp)
{
if (tmp == operands[0])
/* Generate the comparison for a conditional branch. */
-rtx
-alpha_emit_conditional_branch (enum rtx_code code)
+void
+alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
{
enum rtx_code cmp_code, branch_code;
- enum machine_mode cmp_mode, branch_mode = VOIDmode;
- rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
+ enum machine_mode branch_mode = VOIDmode;
+ enum rtx_code code = GET_CODE (operands[0]);
+ rtx op0 = operands[1], op1 = operands[2];
rtx tem;
- if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
+ if (cmp_mode == TFmode)
{
op0 = alpha_emit_xfloating_compare (&code, op0, op1);
op1 = const0_rtx;
- alpha_compare.fp_p = 0;
+ cmp_mode = DImode;
}
/* The general case: fold the comparison code to the types of compares
case GE: case GT: case GEU: case GTU:
/* For FP, we swap them, for INT, we reverse them. */
- if (alpha_compare.fp_p)
+ if (cmp_mode == DFmode)
{
cmp_code = swap_condition (code);
branch_code = NE;
gcc_unreachable ();
}
- if (alpha_compare.fp_p)
+ if (cmp_mode == DFmode)
{
- cmp_mode = DFmode;
if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
{
/* When we are not as concerned about non-finite values, and we
}
else
{
- cmp_mode = DImode;
-
/* The following optimizations are only for signed compares. */
if (code != LEU && code != LTU && code != GEU && code != GTU)
{
emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
}
- /* Zero the operands. */
- memset (&alpha_compare, 0, sizeof (alpha_compare));
-
- /* Return the branch comparison. */
- return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
+ /* Emit the branch instruction. */
+ tem = gen_rtx_SET (VOIDmode, pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
+ gen_rtx_fmt_ee (branch_code,
+ branch_mode, tem,
+ CONST0_RTX (cmp_mode)),
+ gen_rtx_LABEL_REF (VOIDmode,
+ operands[3]),
+ pc_rtx));
+ emit_jump_insn (tem);
}
/* Certain simplifications can be done to make invalid setcc operations
valid. Return the final comparison, or NULL if we can't work. */
-rtx
-alpha_emit_setcc (enum rtx_code code)
+bool
+alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
{
enum rtx_code cmp_code;
- rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
- int fp_p = alpha_compare.fp_p;
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx op0 = operands[2], op1 = operands[3];
rtx tmp;
- /* Zero the operands. */
- memset (&alpha_compare, 0, sizeof (alpha_compare));
-
- if (fp_p && GET_MODE (op0) == TFmode)
+ if (cmp_mode == TFmode)
{
op0 = alpha_emit_xfloating_compare (&code, op0, op1);
op1 = const0_rtx;
- fp_p = 0;
+ cmp_mode = DImode;
}
- if (fp_p && !TARGET_FIX)
- return NULL_RTX;
+ if (cmp_mode == DFmode && !TARGET_FIX)
+ return 0;
/* The general case: fold the comparison code to the types of compares
that we have, choosing the branch as necessary. */
case EQ: case LE: case LT: case LEU: case LTU:
case UNORDERED:
/* We have these compares. */
- if (fp_p)
+ if (cmp_mode == DFmode)
cmp_code = code, code = NE;
break;
case NE:
- if (!fp_p && op1 == const0_rtx)
+ if (cmp_mode == DImode && op1 == const0_rtx)
break;
/* FALLTHRU */
case GE: case GT: case GEU: case GTU:
/* These normally need swapping, but for integer zero we have
special patterns that recognize swapped operands. */
- if (!fp_p && op1 == const0_rtx)
+ if (cmp_mode == DImode && op1 == const0_rtx)
break;
code = swap_condition (code);
- if (fp_p)
+ if (cmp_mode == DFmode)
cmp_code = code, code = NE;
tmp = op0, op0 = op1, op1 = tmp;
break;
gcc_unreachable ();
}
- if (!fp_p)
+ if (cmp_mode == DImode)
{
if (!register_operand (op0, DImode))
op0 = force_reg (DImode, op0);
/* Emit an initial compare instruction, if necessary. */
if (cmp_code != UNKNOWN)
{
- enum machine_mode mode = fp_p ? DFmode : DImode;
-
- tmp = gen_reg_rtx (mode);
+ tmp = gen_reg_rtx (cmp_mode);
emit_insn (gen_rtx_SET (VOIDmode, tmp,
- gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
+ gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
- op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
+ op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
op1 = const0_rtx;
}
- /* Return the setcc comparison. */
- return gen_rtx_fmt_ee (code, DImode, op0, op1);
+ /* Emit the setcc instruction. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_fmt_ee (code, DImode, op0, op1)));
+ return true;
}
{
enum rtx_code code = GET_CODE (cmp);
enum rtx_code cmov_code = NE;
- rtx op0 = alpha_compare.op0;
- rtx op1 = alpha_compare.op1;
- int fp_p = alpha_compare.fp_p;
+ rtx op0 = XEXP (cmp, 0);
+ rtx op1 = XEXP (cmp, 1);
enum machine_mode cmp_mode
= (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
- enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
enum machine_mode cmov_mode = VOIDmode;
int local_fast_math = flag_unsafe_math_optimizations;
rtx tem;
- /* Zero the operands. */
- memset (&alpha_compare, 0, sizeof (alpha_compare));
+ gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
- if (fp_p != FLOAT_MODE_P (mode))
+ if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
{
enum rtx_code cmp_code;
case GE: case GT: case GEU: case GTU:
/* These normally need swapping, but for integer zero we have
special patterns that recognize swapped operands. */
- if (!fp_p && op1 == const0_rtx)
+ if (cmp_mode == DImode && op1 == const0_rtx)
cmp_code = code, code = NE;
else
{
gcc_unreachable ();
}
- tem = gen_reg_rtx (cmp_op_mode);
+ tem = gen_reg_rtx (cmp_mode);
emit_insn (gen_rtx_SET (VOIDmode, tem,
- gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
+ gen_rtx_fmt_ee (cmp_code, cmp_mode,
op0, op1)));
- cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
- op0 = gen_lowpart (cmp_op_mode, tem);
- op1 = CONST0_RTX (cmp_op_mode);
- fp_p = !fp_p;
+ cmp_mode = cmp_mode == DImode ? DFmode : DImode;
+ op0 = gen_lowpart (cmp_mode, tem);
+ op1 = CONST0_RTX (cmp_mode);
local_fast_math = 1;
}
/* We may be able to use a conditional move directly.
This avoids emitting spurious compares. */
if (signed_comparison_operator (cmp, VOIDmode)
- && (!fp_p || local_fast_math)
+ && (cmp_mode == DImode || local_fast_math)
&& (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
gcc_unreachable ();
}
- if (!fp_p)
+ if (cmp_mode == DImode)
{
if (!reg_or_0_operand (op0, DImode))
op0 = force_reg (DImode, op0);
/* ??? We mark the branch mode to be CCmode to prevent the compare
and cmov from being combined, since the compare insn follows IEEE
rules that the cmov does not. */
- if (fp_p && !local_fast_math)
+ if (cmp_mode == DFmode && !local_fast_math)
cmov_mode = CCmode;
- tem = gen_reg_rtx (cmp_op_mode);
- emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
- return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
+ tem = gen_reg_rtx (cmp_mode);
+ emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
+ return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
}
/* Simplify a conditional move of two constants into a setcc with
emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
break;
case 8:
- emit_insn (gen_insql_le (insl, src, addr));
+ emit_insn (gen_insql_le (insl, gen_lowpart (DImode, src), addr));
break;
}
}
/* For TARGET_LD_BUGGY_LDGP. */
struct rtx_def *gp_save_rtx;
+
+ /* For VMS condition handlers. */
+ bool uses_condition_handler;
};
/* How to allocate a 'struct machine_function'. */
ggc_alloc_cleared (sizeof (struct machine_function)));
}
+/* Support for frame based VMS condition handlers. */
+
+/* A VMS condition handler may be established for a function with a call to
+ __builtin_establish_vms_condition_handler, and cancelled with a call to
+ __builtin_revert_vms_condition_handler.
+
+ The VMS Condition Handling Facility knows about the existence of a handler
+ from the procedure descriptor .handler field. As the VMS native compilers,
+ we store the user specified handler's address at a fixed location in the
+ stack frame and point the procedure descriptor at a common wrapper which
+ fetches the real handler's address and issues an indirect call.
+
+ The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
+
+ We force the procedure kind to PT_STACK, and the fixed frame location is
+ fp+8, just before the register save area. We use the handler_data field in
+ the procedure descriptor to state the fp offset at which the installed
+ handler address can be found. */
+
+#define VMS_COND_HANDLER_FP_OFFSET 8
+
+/* Expand code to store the currently installed user VMS condition handler
+ into TARGET and install HANDLER as the new condition handler. */
+
+void
+alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
+{
+ rtx handler_slot_address
+ = plus_constant (hard_frame_pointer_rtx, VMS_COND_HANDLER_FP_OFFSET);
+
+ rtx handler_slot
+ = gen_rtx_MEM (DImode, handler_slot_address);
+
+ emit_move_insn (target, handler_slot);
+ emit_move_insn (handler_slot, handler);
+
+ /* Notify the start/prologue/epilogue emitters that the condition handler
+ slot is needed. In addition to reserving the slot space, this will force
+ the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
+ use above is correct. */
+ cfun->machine->uses_condition_handler = true;
+}
+
+/* Expand code to store the current VMS condition handler into TARGET and
+ nullify it. */
+
+void
+alpha_expand_builtin_revert_vms_condition_handler (rtx target)
+{
+ /* We implement this by establishing a null condition handler, with the tiny
+ side effect of setting uses_condition_handler. This is a little bit
+ pessimistic if no actual builtin_establish call is ever issued, which is
+ not a real problem and expected never to happen anyway. */
+
+ alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
+}
+
/* Functions to save and restore alpha_return_addr_rtx. */
/* Start the ball rolling with RETURN_ADDR_RTX. */
cxt = convert_memory_address (mode, cxt);
#endif
+ if (TARGET_ABI_OPEN_VMS)
+ {
+ rtx temp1, traddr;
+ const char *fnname;
+ char *trname;
+
+ /* Construct the name of the trampoline entry point. */
+ fnname = XSTR (fnaddr, 0);
+ trname = (char *) alloca (strlen (fnname) + 5);
+ strcpy (trname, fnname);
+ strcat (trname, "..tr");
+ traddr = gen_rtx_SYMBOL_REF
+ (mode, ggc_alloc_string (trname, strlen (trname) + 1));
+
+ /* Trampoline (or "bounded") procedure descriptor is constructed from
+ the function's procedure descriptor with certain fields zeroed IAW
+ the VMS calling standard. This is stored in the first quadword. */
+ temp1 = force_reg (DImode, gen_rtx_MEM (DImode, fnaddr));
+ temp1 = expand_and (DImode, temp1,
+ GEN_INT (0xffff0fff0000fff0), NULL_RTX);
+ addr = memory_address (mode, plus_constant (tramp, 0));
+ emit_move_insn (gen_rtx_MEM (DImode, addr), temp1);
+
+ /* Trampoline transfer address is stored in the second quadword
+ of the trampoline. */
+ addr = memory_address (mode, plus_constant (tramp, 8));
+ emit_move_insn (gen_rtx_MEM (mode, addr), traddr);
+ }
+
/* Store function address and CXT. */
addr = memory_address (mode, plus_constant (tramp, fnofs));
emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
#ifdef ENABLE_EXECUTE_STACK
emit_library_call (init_one_libfunc ("__enable_execute_stack"),
- 0, VOIDmode, 1, tramp, Pmode);
+ LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
#endif
if (jmpofs >= 0)
return ptr_type_node;
record = (*lang_hooks.types.make_type) (RECORD_TYPE);
- type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
+ type_decl = build_decl (BUILTINS_LOCATION,
+ TYPE_DECL, get_identifier ("__va_list_tag"), record);
TREE_CHAIN (record) = type_decl;
TYPE_NAME (record) = type_decl;
/* C++? SET_IS_AGGR_TYPE (record, 1); */
/* Dummy field to prevent alignment warnings. */
- space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
+ space = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, NULL_TREE, integer_type_node);
DECL_FIELD_CONTEXT (space) = record;
DECL_ARTIFICIAL (space) = 1;
DECL_IGNORED_P (space) = 1;
- ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
+ ofs = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__offset"),
integer_type_node);
DECL_FIELD_CONTEXT (ofs) = record;
TREE_CHAIN (ofs) = space;
- base = build_decl (FIELD_DECL, get_identifier ("__base"),
+ base = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__base"),
ptr_type_node);
DECL_FIELD_CONTEXT (base) = record;
TREE_CHAIN (base) = ofs;
ALPHA_BUILTIN_RPCC,
ALPHA_BUILTIN_THREAD_POINTER,
ALPHA_BUILTIN_SET_THREAD_POINTER,
+ ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
+ ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
/* TARGET_MAX */
ALPHA_BUILTIN_MINUB8,
ALPHA_BUILTIN_max
};
-static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
+static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
CODE_FOR_builtin_cmpbge,
CODE_FOR_builtin_extbl,
CODE_FOR_builtin_extwl,
CODE_FOR_builtin_rpcc,
CODE_FOR_load_tp,
CODE_FOR_set_tp,
+ CODE_FOR_builtin_establish_vms_condition_handler,
+ CODE_FOR_builtin_revert_vms_condition_handler,
/* TARGET_MAX */
CODE_FOR_builtin_minub8,
NULL, NULL);
TREE_NOTHROW (decl) = 1;
+ if (TARGET_ABI_OPEN_VMS)
+ {
+ ftype = build_function_type_list (ptr_type_node, ptr_type_node,
+ NULL_TREE);
+ add_builtin_function ("__builtin_establish_vms_condition_handler", ftype,
+ ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
+ BUILT_IN_MD, NULL, NULL_TREE);
+
+ ftype = build_function_type_list (ptr_type_node, void_type_node,
+ NULL_TREE);
+ add_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
+ ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
+ BUILT_IN_MD, NULL, NULL_TREE);
+ }
+
alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
vms_save_fp_regno = i;
- if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
+ /* A VMS condition handler requires a stack procedure in our
+ implementation. (not required by the calling standard). */
+ if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
+ || cfun->machine->uses_condition_handler)
vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
else if (alpha_procedure_type == PT_NULL)
vms_base_regno = REG_PV;
vms_unwind_regno = (vms_base_regno == REG_PV
? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
- /* If this is a stack procedure, allow space for saving FP and RA. */
+ /* If this is a stack procedure, allow space for saving FP, RA and
+ a condition handler slot if needed. */
if (alpha_procedure_type == PT_STACK)
- sa_size += 2;
+ sa_size += 2 + cfun->machine->uses_condition_handler;
}
else
{
}
#if TARGET_ABI_OPEN_VMS
+#define COMMON_OBJECT "common_object"
+
+static tree
+common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
+ tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs ATTRIBUTE_UNUSED)
+{
+ tree decl = *node;
+ gcc_assert (DECL_P (decl));
+
+ DECL_COMMON (decl) = 1;
+ return NULL_TREE;
+}
-const struct attribute_spec vms_attribute_table[] =
+static const struct attribute_spec vms_attribute_table[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
- { "overlaid", 0, 0, true, false, false, NULL },
- { "global", 0, 0, true, false, false, NULL },
- { "initialize", 0, 0, true, false, false, NULL },
- { NULL, 0, 0, false, false, false, NULL }
+ { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler },
+ { NULL, 0, 0, false, false, false, NULL }
};
+void
+vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
+ unsigned HOST_WIDE_INT size,
+ unsigned int align)
+{
+ tree attr = DECL_ATTRIBUTES (decl);
+ fprintf (file, "%s", COMMON_ASM_OP);
+ assemble_name (file, name);
+ fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
+ /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
+ fprintf (file, ",%u", align / BITS_PER_UNIT);
+ if (attr)
+ {
+ attr = lookup_attribute (COMMON_OBJECT, attr);
+ if (attr)
+ fprintf (file, ",%s",
+ IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
+ }
+ fputc ('\n', file);
+}
+
+#undef COMMON_OBJECT
+
#endif
static int
+ crtl->args.pretend_args_size));
if (TARGET_ABI_OPEN_VMS)
- reg_offset = 8;
+ reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
else
reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
if (TARGET_ABI_OPEN_VMS)
{
+ /* Register frame procedures save the fp. */
if (alpha_procedure_type == PT_REGISTER)
- /* Register frame procedures save the fp.
- ?? Ought to have a dwarf2 save for this. */
- emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
- hard_frame_pointer_rtx);
+ {
+ rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
+ hard_frame_pointer_rtx);
+ add_reg_note (insn, REG_CFA_REGISTER, NULL);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
/* Offset from base reg to register save area. */
HOST_WIDE_INT reg_offset;
char *entry_label = (char *) alloca (strlen (fnname) + 6);
+ char *tramp_label = (char *) alloca (strlen (fnname) + 6);
int i;
/* Don't emit an extern directive for functions defined in the same file. */
TREE_ASM_WRITTEN (name_tree) = 1;
}
+#if TARGET_ABI_OPEN_VMS
+ if (vms_debug_main
+ && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
+ {
+ targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
+ ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
+ switch_to_section (text_section);
+ vms_debug_main = NULL;
+ }
+#endif
+
alpha_fnname = fnname;
sa_size = alpha_sa_size ();
+ crtl->args.pretend_args_size));
if (TARGET_ABI_OPEN_VMS)
- reg_offset = 8;
+ reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
else
reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
fputs ("..ng:\n", file);
}
}
+ /* Nested functions on VMS that are potentially called via trampoline
+ get a special transfer entry point that loads the called functions
+ procedure descriptor and static chain. */
+ if (TARGET_ABI_OPEN_VMS
+ && !TREE_PUBLIC (decl)
+ && DECL_CONTEXT (decl)
+ && !TYPE_P (DECL_CONTEXT (decl)))
+ {
+ strcpy (tramp_label, fnname);
+ strcat (tramp_label, "..tr");
+ ASM_OUTPUT_LABEL (file, tramp_label);
+ fprintf (file, "\tldq $1,24($27)\n");
+ fprintf (file, "\tldq $27,16($27)\n");
+ }
strcpy (entry_label, fnname);
if (TARGET_ABI_OPEN_VMS)
}
#if TARGET_ABI_OPEN_VMS
+ /* If a user condition handler has been installed at some point, emit
+ the procedure descriptor bits to point the Condition Handling Facility
+ at the indirection wrapper, and state the fp offset at which the user
+ handler may be found. */
+ if (cfun->machine->uses_condition_handler)
+ {
+ fprintf (file, "\t.handler __gcc_shell_handler\n");
+ fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
+ }
+
/* Ifdef'ed cause link_section are only available then. */
switch_to_section (readonly_data_section);
fprintf (file, "\t.align 3\n");
/* Write function epilogue. */
-/* ??? At some point we will want to support full unwind, and so will
- need to mark the epilogue as well. At the moment, we just confuse
- dwarf2out. */
-#undef FRP
-#define FRP(exp) exp
-
void
alpha_expand_epilogue (void)
{
HOST_WIDE_INT reg_offset;
int fp_is_frame_pointer, fp_offset;
rtx sa_reg, sa_reg_exp = NULL;
- rtx sp_adj1, sp_adj2, mem;
+ rtx sp_adj1, sp_adj2, mem, reg, insn;
rtx eh_ofs;
+ rtx cfa_restores = NULL_RTX;
int i;
sa_size = alpha_sa_size ();
if (TARGET_ABI_OPEN_VMS)
{
if (alpha_procedure_type == PT_STACK)
- reg_offset = 8;
+ reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
else
reg_offset = 0;
}
if ((TARGET_ABI_OPEN_VMS
&& vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
|| (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
- FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
+ emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
/* Cope with very large offsets to the register save area. */
if (reg_offset + sa_size > 0x8000)
sa_reg = gen_rtx_REG (DImode, 22);
sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
- FRP (emit_move_insn (sa_reg, sa_reg_exp));
+ emit_move_insn (sa_reg, sa_reg_exp);
}
/* Restore registers in order, excepting a true frame pointer. */
mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
if (! eh_ofs)
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
+ reg = gen_rtx_REG (DImode, REG_RA);
+ emit_move_insn (reg, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
reg_offset += 8;
imask &= ~(1UL << REG_RA);
{
mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
+ reg = gen_rtx_REG (DImode, i);
+ emit_move_insn (reg, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
+ cfa_restores);
}
reg_offset += 8;
}
{
mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
+ reg = gen_rtx_REG (DFmode, i+32);
+ emit_move_insn (reg, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
reg_offset += 8;
}
}
mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
reg_offset));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
+ reg = gen_rtx_REG (DImode, i);
+ emit_move_insn (reg, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
reg_offset -= 8;
}
mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
reg_offset));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
+ reg = gen_rtx_REG (DFmode, i+32);
+ emit_move_insn (reg, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
reg_offset -= 8;
}
/* Restore the return address from the DSIB. */
-
- mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
+ mem = gen_rtx_MEM (DImode, plus_constant (hard_frame_pointer_rtx, -8));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
+ reg = gen_rtx_REG (DImode, REG_RA);
+ emit_move_insn (reg, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
}
if (frame_size || eh_ofs)
else if (TARGET_ABI_UNICOSMK)
{
sp_adj1 = gen_rtx_REG (DImode, 23);
- FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
+ emit_move_insn (sp_adj1, hard_frame_pointer_rtx);
sp_adj2 = const0_rtx;
}
else if (frame_size < 0x40007fffL)
else
{
sp_adj1 = gen_rtx_REG (DImode, 23);
- FRP (emit_move_insn (sp_adj1, sp_adj2));
+ emit_move_insn (sp_adj1, sp_adj2);
}
sp_adj2 = GEN_INT (low);
}
else
{
rtx tmp = gen_rtx_REG (DImode, 23);
- FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
- 3, false));
+ sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
if (!sp_adj2)
{
/* We can't drop new things to memory this late, afaik,
so build it up by pieces. */
- FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
- -(frame_size < 0)));
+ sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
+ -(frame_size < 0));
gcc_assert (sp_adj2);
}
}
mem = gen_rtx_MEM (DImode,
plus_constant (hard_frame_pointer_rtx, -16));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
+ emit_move_insn (hard_frame_pointer_rtx, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
+ hard_frame_pointer_rtx, cfa_restores);
}
else if (fp_is_frame_pointer)
{
emit_insn (gen_blockage ());
mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
+ emit_move_insn (hard_frame_pointer_rtx, mem);
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
+ hard_frame_pointer_rtx, cfa_restores);
}
else if (TARGET_ABI_OPEN_VMS)
{
emit_insn (gen_blockage ());
- FRP (emit_move_insn (hard_frame_pointer_rtx,
- gen_rtx_REG (DImode, vms_save_fp_regno)));
+ emit_move_insn (hard_frame_pointer_rtx,
+ gen_rtx_REG (DImode, vms_save_fp_regno));
+ cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
+ hard_frame_pointer_rtx, cfa_restores);
}
/* Restore the stack pointer. */
emit_insn (gen_blockage ());
if (sp_adj2 == const0_rtx)
- FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
+ insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
else
- FRP (emit_move_insn (stack_pointer_rtx,
- gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
+ insn = emit_move_insn (stack_pointer_rtx,
+ gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
+ REG_NOTES (insn) = cfa_restores;
+ add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
}
else
{
+ gcc_assert (cfa_restores == NULL);
+
if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
{
emit_insn (gen_blockage ());
- FRP (emit_move_insn (hard_frame_pointer_rtx,
- gen_rtx_REG (DImode, vms_save_fp_regno)));
+ insn = emit_move_insn (hard_frame_pointer_rtx,
+ gen_rtx_REG (DImode, vms_save_fp_regno));
+ add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
}
else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
{
/* Decrement the frame pointer if the function does not have a
frame. */
-
emit_insn (gen_blockage ());
- FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
- hard_frame_pointer_rtx, constm1_rtx)));
+ emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
+ hard_frame_pointer_rtx, constm1_rtx));
}
}
}
}
}
+#if TARGET_ABI_OPEN_VMS
+void avms_asm_output_external (FILE *file, tree decl ATTRIBUTE_UNUSED, const char *name)
+{
+#ifdef DO_CRTL_NAMES
+ DO_CRTL_NAMES;
+#endif
+}
+#endif
+
#if TARGET_ABI_OSF
/* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
struct GTY(()) alpha_links
{
int num;
+ const char *target;
rtx linkage;
enum links_kind lkind;
enum reloc_kind rkind;
return GEN_INT (regval);
}
\f
-/* Make (or fake) .linkage entry for function call.
-
- IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
-
- Return an SYMBOL_REF rtx for the linkage. */
+/* Register the need for a (fake) .linkage entry for calls to function NAME.
+ IS_LOCAL is 1 if this is for a definition, 0 if this is for a real call.
+ Return a SYMBOL_REF suited to the call instruction. */
rtx
alpha_need_linkage (const char *name, int is_local)
{
splay_tree_node node;
struct alpha_links *al;
+ const char *target;
+ tree id;
if (name[0] == '*')
name++;
/* Assume external if no definition. */
al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
- /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
- get_identifier (name);
+ /* Ensure we have an IDENTIFIER so assemble_name can mark it used
+ and find the ultimate alias target like assemble_name. */
+ id = get_identifier (name);
+ target = NULL;
+ while (IDENTIFIER_TRANSPARENT_ALIAS (id))
+ {
+ id = TREE_CHAIN (id);
+ target = IDENTIFIER_POINTER (id);
+ }
- /* Construct a SYMBOL_REF for us to call. */
- {
- size_t name_len = strlen (name);
- char *linksym = XALLOCAVEC (char, name_len + 6);
- linksym[0] = '$';
- memcpy (linksym + 1, name, name_len);
- memcpy (linksym + 1 + name_len, "..lk", 5);
- al->linkage = gen_rtx_SYMBOL_REF (Pmode,
- ggc_alloc_string (linksym, name_len + 5));
- }
+ al->target = target ? target : name;
+ al->linkage = gen_rtx_SYMBOL_REF (Pmode, name);
splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
(splay_tree_value) al);
return al->linkage;
}
+/* Return a SYMBOL_REF representing the reference to the .linkage entry
+ of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
+ this is the reference to the linkage pointer value, 0 if this is the
+ reference to the function entry value. RFLAG is 1 if this a reduced
+ reference (code address only), 0 if this is a full reference. */
+
rtx
-alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
+alpha_use_linkage (rtx func, tree cfundecl, int lflag, int rflag)
{
splay_tree_node cfunnode;
struct alpha_funcs *cfaf;
struct alpha_links *al;
- const char *name = XSTR (linkage, 0);
+ const char *name = XSTR (func, 0);
cfaf = (struct alpha_funcs *) 0;
al = (struct alpha_links *) 0;
{
size_t name_len;
size_t buflen;
- char buf [512];
char *linksym;
splay_tree_node node = 0;
struct alpha_links *anl;
name++;
name_len = strlen (name);
+ linksym = (char *) alloca (name_len + 50);
al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
al->num = cfaf->num;
{
anl = (struct alpha_links *) node->value;
al->lkind = anl->lkind;
+ name = anl->target;
}
- sprintf (buf, "$%d..%s..lk", cfaf->num, name);
- buflen = strlen (buf);
- linksym = XALLOCAVEC (char, buflen + 1);
- memcpy (linksym, buf, buflen + 1);
+ sprintf (linksym, "$%d..%s..lk", cfaf->num, name);
+ buflen = strlen (linksym);
al->linkage = gen_rtx_SYMBOL_REF
(Pmode, ggc_alloc_string (linksym, buflen + 1));
}
}
-/* Given a decl, a section name, and whether the decl initializer
- has relocs, choose attributes for the section. */
-
-#define SECTION_VMS_OVERLAY SECTION_FORGET
-#define SECTION_VMS_GLOBAL SECTION_MACH_DEP
-#define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
-
-static unsigned int
-vms_section_type_flags (tree decl, const char *name, int reloc)
-{
- unsigned int flags = default_section_type_flags (decl, name, reloc);
-
- if (decl && DECL_ATTRIBUTES (decl)
- && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
- flags |= SECTION_VMS_OVERLAY;
- if (decl && DECL_ATTRIBUTES (decl)
- && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
- flags |= SECTION_VMS_GLOBAL;
- if (decl && DECL_ATTRIBUTES (decl)
- && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
- flags |= SECTION_VMS_INITIALIZE;
-
- return flags;
-}
-
/* Switch to an arbitrary section NAME with attributes as specified
by FLAGS. ALIGN specifies any known alignment requirements for
the section; 0 if the default should be used. */
fputc ('\n', asm_out_file);
fprintf (asm_out_file, ".section\t%s", name);
- if (flags & SECTION_VMS_OVERLAY)
- fprintf (asm_out_file, ",OVR");
- if (flags & SECTION_VMS_GLOBAL)
- fprintf (asm_out_file, ",GBL");
- if (flags & SECTION_VMS_INITIALIZE)
- fprintf (asm_out_file, ",NOMOD");
if (flags & SECTION_DEBUG)
fprintf (asm_out_file, ",NOWRT");
}
rtx
-alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
+alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
tree cfundecl ATTRIBUTE_UNUSED,
int lflag ATTRIBUTE_UNUSED,
int rflag ATTRIBUTE_UNUSED)
emit_insn (gen_blockage ());
/* Set the new frame pointer. */
-
FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
stack_pointer_rtx, GEN_INT (64))));
-
}
else
{
/* Increment the frame pointer register to indicate that we do not
have a frame. */
-
- FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
- hard_frame_pointer_rtx, const1_rtx)));
+ emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
+ hard_frame_pointer_rtx, const1_rtx));
}
}
set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
+ abort_libfunc = init_one_libfunc ("decc$abort");
+ memcmp_libfunc = init_one_libfunc ("decc$memcmp");
+#ifdef MEM_LIBFUNCS_INIT
+ MEM_LIBFUNCS_INIT;
+#endif
}
}
#if TARGET_ABI_OPEN_VMS
# undef TARGET_ATTRIBUTE_TABLE
# define TARGET_ATTRIBUTE_TABLE vms_attribute_table
-# undef TARGET_SECTION_TYPE_FLAGS
-# define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
#endif
#undef TARGET_IN_SMALL_DATA_P
/* Default unaligned ops are provided for ELF systems. To get unaligned
data for non-ELF systems, we have to turn off auto alignment. */
-#ifndef OBJECT_FORMAT_ELF
+#if !defined (OBJECT_FORMAT_ELF) || TARGET_ABI_OPEN_VMS
#undef TARGET_ASM_UNALIGNED_HI_OP
#define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
#undef TARGET_ASM_UNALIGNED_SI_OP
#undef TARGET_INIT_LIBFUNCS
#define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
+#undef TARGET_LEGITIMIZE_ADDRESS
+#define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
+
#if TARGET_ABI_UNICOSMK
#undef TARGET_ASM_FILE_START
#define TARGET_ASM_FILE_START unicosmk_file_start
#undef TARGET_MACHINE_DEPENDENT_REORG
#define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
-#undef TARGET_PROMOTE_FUNCTION_ARGS
-#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
-#undef TARGET_PROMOTE_FUNCTION_RETURN
-#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
+#undef TARGET_PROMOTE_FUNCTION_MODE
+#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
#undef TARGET_PROMOTE_PROTOTYPES
#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
#undef TARGET_RETURN_IN_MEMORY
#define TARGET_MANGLE_TYPE alpha_mangle_type
#endif
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
+
struct gcc_target targetm = TARGET_INITIALIZER;
\f