/* Subroutines for insn-output.c for SPARC.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com)
64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
#include "target.h"
#include "target-def.h"
#include "cfglayout.h"
-#include "tree-gimple.h"
+#include "gimple.h"
#include "langhooks.h"
#include "params.h"
#include "df.h"
+#include "dwarf2out.h"
/* Processor costs */
static const
/* The alias set for the structure return value. */
static GTY(()) alias_set_type struct_value_alias_set;
-/* Save the operands last given to a compare for use when we
- generate a scc or bcc insn. */
-rtx sparc_compare_op0, sparc_compare_op1, sparc_compare_emitted;
-
/* Vector to say how input registers are mapped to output registers.
HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
eliminate it. You must use -fomit-frame-pointer to get that. */
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1};
-struct machine_function GTY(())
+struct GTY(()) machine_function
{
/* Some local-dynamic TLS symbol name. */
const char *some_ld_name;
static void sparc_output_addr_vec (rtx);
static void sparc_output_addr_diff_vec (rtx);
static void sparc_output_deferred_case_vectors (void);
+static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
static rtx sparc_builtin_saveregs (void);
static int epilogue_renumber (rtx *, int);
static bool sparc_assemble_integer (rtx, unsigned int, int);
static int set_extends (rtx);
-static void emit_pic_helper (void);
-static void load_pic_register (bool);
+static void load_pic_register (void);
static int save_or_restore_regs (int, int, rtx, int, int);
static void emit_save_or_restore_regs (int);
static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
-#ifdef OBJECT_FORMAT_ELF
-static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
-#endif
-
+static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
+ tree) ATTRIBUTE_UNUSED;
static int sparc_adjust_cost (rtx, rtx, rtx, int);
static int sparc_issue_rate (void);
static void sparc_sched_init (FILE *, int, int);
static void sparc_init_builtins (void);
static void sparc_vis_init_builtins (void);
static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
-static tree sparc_fold_builtin (tree, tree, bool);
+static tree sparc_fold_builtin (tree, int, tree *, bool);
static int sparc_vis_mul8x16 (int, int);
static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
static rtx sparc_tls_got (void);
static const char *get_some_local_dynamic_name (void);
static int get_some_local_dynamic_name_1 (rtx *, void *);
-static bool sparc_rtx_costs (rtx, int, int, int *);
+static bool sparc_rtx_costs (rtx, int, int, int *, bool);
static bool sparc_promote_prototypes (const_tree);
static rtx sparc_struct_value_rtx (tree, int);
+static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
+ int *, const_tree, int);
static bool sparc_return_in_memory (const_tree, const_tree);
static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
static void sparc_va_start (tree, rtx);
-static tree sparc_gimplify_va_arg (tree, tree, tree *, tree *);
+static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
static bool sparc_vector_mode_supported_p (enum machine_mode);
+static bool sparc_tls_referenced_p (rtx);
+static rtx legitimize_tls_address (rtx);
+static rtx legitimize_pic_address (rtx, rtx);
+static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
enum machine_mode, const_tree, bool);
static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
static void sparc_file_end (void);
+static bool sparc_frame_pointer_required (void);
+static bool sparc_can_eliminate (const int, const int);
#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
static const char *sparc_mangle_type (const_tree);
#endif
+static void sparc_trampoline_init (rtx, tree, rtx);
+\f
#ifdef SUBTARGET_ATTRIBUTE_TABLE
-const struct attribute_spec sparc_attribute_table[];
+/* Table of valid machine attributes. */
+static const struct attribute_spec sparc_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+ SUBTARGET_ATTRIBUTE_TABLE,
+ { NULL, 0, 0, false, false, false, NULL }
+};
#endif
\f
/* Option handling. */
/* Initialize the GCC target structure. */
-/* The sparc default is to use .half rather than .short for aligned
- HI objects. Use .word instead of .long on non-ELF systems. */
+/* The default is to use .half rather than .short for aligned HI objects. */
#undef TARGET_ASM_ALIGNED_HI_OP
#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
-#ifndef OBJECT_FORMAT_ELF
-#undef TARGET_ASM_ALIGNED_SI_OP
-#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
-#endif
#undef TARGET_ASM_UNALIGNED_HI_OP
#define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
#undef TARGET_INIT_BUILTINS
#define TARGET_INIT_BUILTINS sparc_init_builtins
+#undef TARGET_LEGITIMIZE_ADDRESS
+#define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
+
#undef TARGET_EXPAND_BUILTIN
#define TARGET_EXPAND_BUILTIN sparc_expand_builtin
#undef TARGET_FOLD_BUILTIN
#undef TARGET_RTX_COSTS
#define TARGET_RTX_COSTS sparc_rtx_costs
#undef TARGET_ADDRESS_COST
-#define TARGET_ADDRESS_COST hook_int_rtx_0
+#define TARGET_ADDRESS_COST hook_int_rtx_bool_0
-/* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
- no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
- test for this value. */
-#undef TARGET_PROMOTE_FUNCTION_ARGS
-#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
-
-/* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
- no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
- test for this value. */
-#undef TARGET_PROMOTE_FUNCTION_RETURN
-#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
+#undef TARGET_PROMOTE_FUNCTION_MODE
+#define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
#undef TARGET_PROMOTE_PROTOTYPES
#define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
#undef TARGET_HANDLE_OPTION
#define TARGET_HANDLE_OPTION sparc_handle_option
-#if TARGET_GNU_TLS
+#if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
#define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
#endif
#undef TARGET_ASM_FILE_END
#define TARGET_ASM_FILE_END sparc_file_end
+#undef TARGET_FRAME_POINTER_REQUIRED
+#define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
+
+#undef TARGET_CAN_ELIMINATE
+#define TARGET_CAN_ELIMINATE sparc_can_eliminate
+
#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
#undef TARGET_MANGLE_TYPE
#define TARGET_MANGLE_TYPE sparc_mangle_type
#endif
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
+
+#undef TARGET_TRAMPOLINE_INIT
+#define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
+
struct gcc_target targetm = TARGET_INITIALIZER;
/* Implement TARGET_HANDLE_OPTION. */
{
static struct code_model {
const char *const name;
- const int value;
+ const enum cmodel value;
} const cmodels[] = {
{ "32", CM_32 },
{ "medlow", CM_MEDLOW },
{ "medmid", CM_MEDMID },
{ "medany", CM_MEDANY },
{ "embmedany", CM_EMBMEDANY },
- { 0, 0 }
+ { NULL, (enum cmodel) 0 }
};
const struct code_model *cmodel;
/* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
/* UltraSPARC T1 */
{ "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
{ "niagara2", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9},
- { 0, 0, 0, 0 }
+ { 0, (enum processor_type) 0, 0, 0 }
};
const struct cpu_table *cpu;
const struct sparc_cpu_select *sel;
int fpu;
-
+
#ifndef SPARC_BI_ARCH
/* Check for unsupported architecture size. */
if (! TARGET_64BIT != DEFAULT_ARCH32_P)
/* Code model selection. */
sparc_cmodel = SPARC_DEFAULT_CMODEL;
-
+
#ifdef SPARC_BI_ARCH
if (TARGET_ARCH32)
sparc_cmodel = CM_32;
/* Don't use stack biasing in 32 bit mode. */
if (TARGET_ARCH32)
target_flags &= ~MASK_STACK_BIAS;
-
+
/* Supply a default value for align_functions. */
if (align_functions == 0
&& (sparc_cpu == PROCESSOR_ULTRASPARC
: (sparc_cpu == PROCESSOR_ULTRASPARC3
? 8 : 3)));
if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
- set_param_value ("l1-cache-line-size",
+ set_param_value ("l1-cache-line-size",
((sparc_cpu == PROCESSOR_ULTRASPARC
|| sparc_cpu == PROCESSOR_ULTRASPARC3
|| sparc_cpu == PROCESSOR_NIAGARA
? 64 : 32));
}
\f
-#ifdef SUBTARGET_ATTRIBUTE_TABLE
-/* Table of valid machine attributes. */
-const struct attribute_spec sparc_attribute_table[] =
-{
- /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
- SUBTARGET_ATTRIBUTE_TABLE,
- { NULL, 0, 0, false, false, false, NULL }
-};
-#endif
-\f
/* Miscellaneous utilities. */
/* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
/* Fixup TLS cases. */
if (TARGET_HAVE_TLS
&& CONSTANT_P (operands[1])
- && GET_CODE (operands[1]) != HIGH
&& sparc_tls_referenced_p (operands [1]))
{
- rtx sym = operands[1];
- rtx addend = NULL;
-
- if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
- {
- addend = XEXP (XEXP (sym, 0), 1);
- sym = XEXP (XEXP (sym, 0), 0);
- }
-
- gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
-
- sym = legitimize_tls_address (sym);
- if (addend)
- {
- sym = gen_rtx_PLUS (mode, sym, addend);
- sym = force_operand (sym, operands[0]);
- }
- operands[1] = sym;
+ operands[1] = legitimize_tls_address (operands[1]);
+ return false;
}
-
+
/* Fixup PIC cases. */
if (flag_pic && CONSTANT_P (operands[1]))
{
if (pic_address_needs_scratch (operands[1]))
- operands[1] = legitimize_pic_address (operands[1], mode, 0);
+ operands[1] = legitimize_pic_address (operands[1], NULL_RTX);
/* VxWorks does not impose a fixed gap between segments; the run-time
gap can be different from the object-file gap. We therefore can't
if (symbolic_operand (operands[1], mode))
{
operands[1] = legitimize_pic_address (operands[1],
- mode,
- (reload_in_progress ?
- operands[0] :
- NULL_RTX));
+ reload_in_progress
+ ? operands[0] : NULL_RTX);
return false;
}
}
/* input_operand should have filtered out 32-bit mode. */
sparc_emit_set_const64 (operands[0], operands[1]);
return true;
-
+
default:
gcc_unreachable ();
}
}
}
-/* X and Y are two things to compare using CODE. Emit the compare insn and
- return the rtx for the cc reg in the proper mode. */
+/* Emit the compare insn and return the CC reg for a CODE comparison
+ with operands X and Y. */
-rtx
-gen_compare_reg (enum rtx_code code)
+static rtx
+gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
{
- rtx x = sparc_compare_op0;
- rtx y = sparc_compare_op1;
- enum machine_mode mode = SELECT_CC_MODE (code, x, y);
+ enum machine_mode mode;
rtx cc_reg;
- if (sparc_compare_emitted != NULL_RTX)
- {
- cc_reg = sparc_compare_emitted;
- sparc_compare_emitted = NULL_RTX;
- return cc_reg;
- }
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
+ return x;
+
+ mode = SELECT_CC_MODE (code, x, y);
/* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
fcc regs (cse can't tell they're really call clobbered regs and will
else
cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
- emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
- gen_rtx_COMPARE (mode, x, y)));
+ /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
+ will only result in an unrecognizable insn so no point in asserting. */
+ emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
return cc_reg;
}
+
+/* Emit the compare insn and return the CC reg for the comparison in CMP. */
+
+rtx
+gen_compare_reg (rtx cmp)
+{
+ return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
+}
+
/* This function is used for v9 only.
+ DEST is the target of the Scc insn.
CODE is the code for an Scc's comparison.
- OPERANDS[0] is the target of the Scc insn.
- OPERANDS[1] is the value we compare against const0_rtx (which hasn't
- been generated yet).
+ X and Y are the values we compare.
This function is needed to turn
IE: The instruction recognizer needs to see the mode of the comparison to
find the right instruction. We could use "gt:DI" right in the
- define_expand, but leaving it out allows us to handle DI, SI, etc.
-
- We refer to the global sparc compare operands sparc_compare_op0 and
- sparc_compare_op1. */
+ define_expand, but leaving it out allows us to handle DI, SI, etc. */
-int
-gen_v9_scc (enum rtx_code compare_code, register rtx *operands)
+static int
+gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
{
if (! TARGET_ARCH64
- && (GET_MODE (sparc_compare_op0) == DImode
- || GET_MODE (operands[0]) == DImode))
+ && (GET_MODE (x) == DImode
+ || GET_MODE (dest) == DImode))
return 0;
/* Try to use the movrCC insns. */
if (TARGET_ARCH64
- && GET_MODE_CLASS (GET_MODE (sparc_compare_op0)) == MODE_INT
- && sparc_compare_op1 == const0_rtx
+ && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
+ && y == const0_rtx
&& v9_regcmp_p (compare_code))
{
- rtx op0 = sparc_compare_op0;
+ rtx op0 = x;
rtx temp;
/* Special case for op0 != 0. This can be done with one instruction if
- operands[0] == sparc_compare_op0. */
+ dest == x. */
if (compare_code == NE
- && GET_MODE (operands[0]) == DImode
- && rtx_equal_p (op0, operands[0]))
+ && GET_MODE (dest) == DImode
+ && rtx_equal_p (op0, dest))
{
- emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ emit_insn (gen_rtx_SET (VOIDmode, dest,
gen_rtx_IF_THEN_ELSE (DImode,
gen_rtx_fmt_ee (compare_code, DImode,
op0, const0_rtx),
const1_rtx,
- operands[0])));
+ dest)));
return 1;
}
- if (reg_overlap_mentioned_p (operands[0], op0))
+ if (reg_overlap_mentioned_p (dest, op0))
{
- /* Handle the case where operands[0] == sparc_compare_op0.
+ /* Handle the case where dest == x.
We "early clobber" the result. */
- op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
- emit_move_insn (op0, sparc_compare_op0);
+ op0 = gen_reg_rtx (GET_MODE (x));
+ emit_move_insn (op0, x);
}
- emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
+ emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
if (GET_MODE (op0) != DImode)
{
temp = gen_reg_rtx (DImode);
}
else
temp = op0;
- emit_insn (gen_rtx_SET (VOIDmode, operands[0],
- gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
+ emit_insn (gen_rtx_SET (VOIDmode, dest,
+ gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
gen_rtx_fmt_ee (compare_code, DImode,
temp, const0_rtx),
const1_rtx,
- operands[0])));
+ dest)));
return 1;
}
else
{
- operands[1] = gen_compare_reg (compare_code);
+ x = gen_compare_reg_1 (compare_code, x, y);
+ y = const0_rtx;
- switch (GET_MODE (operands[1]))
- {
- case CCmode :
- case CCXmode :
- case CCFPEmode :
- case CCFPmode :
- break;
- default :
- gcc_unreachable ();
- }
- emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
- emit_insn (gen_rtx_SET (VOIDmode, operands[0],
- gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
+ gcc_assert (GET_MODE (x) != CC_NOOVmode
+ && GET_MODE (x) != CCX_NOOVmode);
+
+ emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
+ emit_insn (gen_rtx_SET (VOIDmode, dest,
+ gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
gen_rtx_fmt_ee (compare_code,
- GET_MODE (operands[1]),
- operands[1], const0_rtx),
- const1_rtx, operands[0])));
+ GET_MODE (x), x, y),
+ const1_rtx, dest)));
return 1;
}
}
+
+/* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
+ without jumps using the addx/subx instructions. */
+
+bool
+emit_scc_insn (rtx operands[])
+{
+ rtx tem;
+ rtx x;
+ rtx y;
+ enum rtx_code code;
+
+ /* The quad-word fp compare library routines all return nonzero to indicate
+ true, which is different from the equivalent libgcc routines, so we must
+ handle them specially here. */
+ if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
+ GET_CODE (operands[1]));
+ operands[2] = XEXP (operands[1], 0);
+ operands[3] = XEXP (operands[1], 1);
+ }
+
+ code = GET_CODE (operands[1]);
+ x = operands[2];
+ y = operands[3];
+
+ /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
+ more applications). The exception to this is "reg != 0" which can
+ be done in one instruction on v9 (so we do it). */
+ if (code == EQ)
+ {
+ if (GET_MODE (x) == SImode)
+ {
+ rtx pat = gen_seqsi_special (operands[0], x, y);
+ emit_insn (pat);
+ return true;
+ }
+ else if (GET_MODE (x) == DImode)
+ {
+ rtx pat = gen_seqdi_special (operands[0], x, y);
+ emit_insn (pat);
+ return true;
+ }
+ }
+
+ if (code == NE)
+ {
+ if (GET_MODE (x) == SImode)
+ {
+ rtx pat = gen_snesi_special (operands[0], x, y);
+ emit_insn (pat);
+ return true;
+ }
+ else if (GET_MODE (x) == DImode)
+ {
+ rtx pat = gen_snedi_special (operands[0], x, y);
+ emit_insn (pat);
+ return true;
+ }
+ }
+
+ /* For the rest, on v9 we can use conditional moves. */
+
+ if (TARGET_V9)
+ {
+ if (gen_v9_scc (operands[0], code, x, y))
+ return true;
+ }
+
+ /* We can do LTU and GEU using the addx/subx instructions too. And
+ for GTU/LEU, if both operands are registers swap them and fall
+ back to the easy case. */
+ if (code == GTU || code == LEU)
+ {
+ if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
+ && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
+ {
+ tem = x;
+ x = y;
+ y = tem;
+ code = swap_condition (code);
+ }
+ }
+
+ if (code == LTU || code == GEU)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_fmt_ee (code, SImode,
+ gen_compare_reg_1 (code, x, y),
+ const0_rtx)));
+ return true;
+ }
+
+ /* Nope, do branches. */
+ return false;
+}
+
/* Emit a conditional jump insn for the v9 architecture using comparison code
CODE and jump target LABEL.
This function exists to take advantage of the v9 brxx insns. */
-void
+static void
emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
{
- gcc_assert (sparc_compare_emitted == NULL_RTX);
emit_jump_insn (gen_rtx_SET (VOIDmode,
pc_rtx,
gen_rtx_IF_THEN_ELSE (VOIDmode,
pc_rtx)));
}
+void
+emit_conditional_branch_insn (rtx operands[])
+{
+ /* The quad-word fp compare library routines all return nonzero to indicate
+ true, which is different from the equivalent libgcc routines, so we must
+ handle them specially here. */
+ if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
+ GET_CODE (operands[0]));
+ operands[1] = XEXP (operands[0], 0);
+ operands[2] = XEXP (operands[0], 1);
+ }
+
+ if (TARGET_ARCH64 && operands[2] == const0_rtx
+ && GET_CODE (operands[1]) == REG
+ && GET_MODE (operands[1]) == DImode)
+ {
+ emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
+ return;
+ }
+
+ operands[1] = gen_compare_reg (operands[0]);
+ operands[2] = const0_rtx;
+ operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
+ operands[1], operands[2]);
+ emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
+ operands[3]));
+}
+
+
/* Generate a DFmode part of a hard TFmode register.
REG is the TFmode hard register, LOW is 1 for the
low 64bit of the register and 0 otherwise.
{
case SImode:
func = "_Qp_itoq";
+ if (TARGET_ARCH64)
+ operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
break;
case DImode:
func = "_Qp_xtoq";
{
case SImode:
func = "_Qp_uitoq";
+ if (TARGET_ARCH64)
+ operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
break;
case DImode:
func = "_Qp_uxtoq";
/* If the function uses __builtin_eh_return, the eh_return machinery
occupies the delay slot. */
- if (current_function_calls_eh_return)
+ if (crtl->calls_eh_return)
return 0;
/* In the case of a true leaf function, anything can go into the slot. */
}
\f
/* PIC support. */
-static GTY(()) char pic_helper_symbol_name[256];
+static GTY(()) bool pic_helper_needed = false;
static GTY(()) rtx pic_helper_symbol;
-static GTY(()) bool pic_helper_emitted_p = false;
static GTY(()) rtx global_offset_table;
/* Ensure that we are not using patterns that are not OK with PIC. */
}
}
-/* Return true if X is an address which needs a temporary register when
+/* Return true if X is an address which needs a temporary register when
reloaded while generating PIC code. */
int
bool
legitimate_constant_p (rtx x)
{
- rtx inner;
-
switch (GET_CODE (x))
{
- case SYMBOL_REF:
- /* TLS symbols are not constant. */
- if (SYMBOL_REF_TLS_MODEL (x))
- return false;
- break;
-
case CONST:
- inner = XEXP (x, 0);
-
- /* Offsets of TLS symbols are never valid.
- Discourage CSE from creating them. */
- if (GET_CODE (inner) == PLUS
- && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
+ case SYMBOL_REF:
+ if (sparc_tls_referenced_p (x))
return false;
break;
{
if (pic_address_needs_scratch (x))
return false;
- if (SPARC_SYMBOL_REF_TLS_P (x)
- || (GET_CODE (x) == CONST
- && GET_CODE (XEXP (x, 0)) == PLUS
- && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
+ if (sparc_tls_referenced_p (x))
return false;
return true;
}
/* Return nonzero if ADDR is a valid memory address.
STRICT specifies whether strict register checking applies. */
-
-int
-legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
+
+static bool
+sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
{
rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
&& GET_CODE (rs2) != SUBREG
&& GET_CODE (rs2) != LO_SUM
&& GET_CODE (rs2) != MEM
- && ! SPARC_SYMBOL_REF_TLS_P (rs2)
+ && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
&& (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
&& (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
|| ((REG_P (rs1)
rs2 = NULL;
imm1 = XEXP (rs1, 1);
rs1 = XEXP (rs1, 0);
- if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
+ if (!CONSTANT_P (imm1)
+ || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
return 0;
}
}
rs1 = XEXP (addr, 0);
imm1 = XEXP (addr, 1);
- if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
+ if (!CONSTANT_P (imm1)
+ || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
return 0;
/* We can't allow TFmode in 32-bit mode, because an offset greater
rtx temp;
if (flag_pic)
{
- current_function_uses_pic_offset_table = 1;
+ crtl->uses_pic_offset_table = 1;
return pic_offset_table_rtx;
}
return temp;
}
-/* Return 1 if *X is a thread-local symbol. */
-
-static int
-sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
-{
- return SPARC_SYMBOL_REF_TLS_P (*x);
-}
-
-/* Return 1 if X contains a thread-local symbol. */
+/* Return true if X contains a thread-local symbol. */
-bool
+static bool
sparc_tls_referenced_p (rtx x)
{
if (!TARGET_HAVE_TLS)
return false;
- return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
+ if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
+ x = XEXP (XEXP (x, 0), 0);
+
+ if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
+ return true;
+
+ /* That's all we handle in legitimize_tls_address for now. */
+ return false;
}
/* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
this (thread-local) address. */
-rtx
+static rtx
legitimize_tls_address (rtx addr)
{
rtx temp1, temp2, temp3, ret, o0, got, insn;
gcc_unreachable ();
}
+ else if (GET_CODE (addr) == CONST)
+ {
+ rtx base, offset;
+
+ gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
+
+ base = legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
+ offset = XEXP (XEXP (addr, 0), 1);
+
+ base = force_operand (base, NULL_RTX);
+ if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
+ offset = force_reg (Pmode, offset);
+ ret = gen_rtx_PLUS (Pmode, base, offset);
+ }
+
else
gcc_unreachable (); /* for now ... */
return ret;
}
-
/* Legitimize PIC addresses. If the address is already position-independent,
we return ORIG. Newly generated position-independent addresses go into a
reg. This is REG if nonzero, otherwise we allocate register(s) as
necessary. */
-rtx
-legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
- rtx reg)
+static rtx
+legitimize_pic_address (rtx orig, rtx reg)
{
+ bool gotdata_op = false;
+
if (GET_CODE (orig) == SYMBOL_REF
/* See the comment in sparc_expand_move. */
|| (TARGET_VXWORKS_RTP && GET_CODE (orig) == LABEL_REF))
emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
}
address = temp_reg;
+ gotdata_op = true;
}
else
address = orig;
- pic_ref = gen_const_mem (Pmode,
- gen_rtx_PLUS (Pmode,
- pic_offset_table_rtx, address));
- current_function_uses_pic_offset_table = 1;
- insn = emit_move_insn (reg, pic_ref);
+ crtl->uses_pic_offset_table = 1;
+ if (gotdata_op)
+ {
+ if (TARGET_ARCH64)
+ insn = emit_insn (gen_movdi_pic_gotdata_op (reg, pic_offset_table_rtx,
+ address, orig));
+ else
+ insn = emit_insn (gen_movsi_pic_gotdata_op (reg, pic_offset_table_rtx,
+ address, orig));
+ }
+ else
+ {
+ pic_ref = gen_const_mem (Pmode,
+ gen_rtx_PLUS (Pmode,
+ pic_offset_table_rtx, address));
+ insn = emit_move_insn (reg, pic_ref);
+ }
/* Put a REG_EQUAL note on this insn, so that it can be optimized
by loop. */
set_unique_reg_note (insn, REG_EQUAL, orig);
}
gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
- base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
- offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
- base == reg ? 0 : reg);
+ base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
+ offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
+ base == reg ? NULL_RTX : reg);
if (GET_CODE (offset) == CONST_INT)
{
/* ??? Why do we do this? */
/* Now movsi_pic_label_ref uses it, but we ought to be checking that
the register is live instead, in case it is eliminated. */
- current_function_uses_pic_offset_table = 1;
+ crtl->uses_pic_offset_table = 1;
return orig;
}
OLDX is the address as it was before break_out_memory_refs was called.
In some cases it is useful to look at this to decide what needs to be done.
- MODE is the mode of the operand pointed to by X. */
+ MODE is the mode of the operand pointed to by X.
-rtx
-legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
+ On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
+
+static rtx
+sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
+ enum machine_mode mode)
{
rtx orig_x = x;
x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
force_operand (XEXP (x, 1), NULL_RTX));
- if (x != orig_x && legitimate_address_p (mode, x, FALSE))
+ if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
return x;
- if (SPARC_SYMBOL_REF_TLS_P (x))
+ if (sparc_tls_referenced_p (x))
x = legitimize_tls_address (x);
else if (flag_pic)
- x = legitimize_pic_address (x, mode, 0);
+ x = legitimize_pic_address (x, NULL_RTX);
else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
copy_to_mode_reg (Pmode, XEXP (x, 1)));
copy_to_mode_reg (Pmode, XEXP (x, 0)));
else if (GET_CODE (x) == SYMBOL_REF
|| GET_CODE (x) == CONST
- || GET_CODE (x) == LABEL_REF)
+ || GET_CODE (x) == LABEL_REF)
x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
+
return x;
}
-/* Emit the special PIC helper function. */
+#ifdef HAVE_GAS_HIDDEN
+# define USE_HIDDEN_LINKONCE 1
+#else
+# define USE_HIDDEN_LINKONCE 0
+#endif
static void
-emit_pic_helper (void)
+get_pc_thunk_name (char name[32], unsigned int regno)
{
- const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
- int align;
+ const char *pic_name = reg_names[regno];
- switch_to_section (text_section);
+ /* Skip the leading '%' as that cannot be used in a
+ symbol name. */
+ pic_name += 1;
- align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
- if (align > 0)
- ASM_OUTPUT_ALIGN (asm_out_file, align);
- ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
- if (flag_delayed_branch)
- fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
- pic_name, pic_name);
+ if (USE_HIDDEN_LINKONCE)
+ sprintf (name, "__sparc_get_pc_thunk.%s", pic_name);
else
- fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
- pic_name, pic_name);
-
- pic_helper_emitted_p = true;
+ ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
}
/* Emit code to load the PIC register. */
static void
-load_pic_register (bool delay_pic_helper)
+load_pic_register (void)
{
int orig_flag_pic = flag_pic;
if (TARGET_VXWORKS_RTP)
{
emit_insn (gen_vxworks_load_got ());
- emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
+ emit_use (pic_offset_table_rtx);
return;
}
/* If we haven't initialized the special PIC symbols, do so now. */
- if (!pic_helper_symbol_name[0])
+ if (!pic_helper_needed)
{
- ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
- pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
+ char name[32];
+
+ pic_helper_needed = true;
+
+ get_pc_thunk_name (name, REGNO (pic_offset_table_rtx));
+ pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
+
global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
}
- /* If we haven't emitted the special PIC helper function, do so now unless
- we are requested to delay it. */
- if (!delay_pic_helper && !pic_helper_emitted_p)
- emit_pic_helper ();
-
flag_pic = 0;
if (TARGET_ARCH64)
emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
since setjmp/longjmp can cause life info to screw up.
??? In the case where we don't obey regdecls, this is not sufficient
since we may not fall out the bottom. */
- emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
+ emit_use (pic_offset_table_rtx);
}
/* Emit a call instruction with the pattern given by PAT. ADDR is the
: !SYMBOL_REF_LOCAL_P (addr)))
{
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
- current_function_uses_pic_offset_table = 1;
+ crtl->uses_pic_offset_table = 1;
}
}
\f
it is to a constant address, or the address involves a LO_SUM. */
return 1;
}
-
+
/* An obviously unaligned address. */
return 0;
}
#define SF_MODES (S_MODES)
/* Modes for double-float and smaller quantities. */
-#define DF_MODES (S_MODES | D_MODES)
-
-/* Modes for double-float only quantities. */
-#define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
-
-/* Modes for quad-float only quantities. */
-#define TF_ONLY_MODES (1 << (int) TF_MODE)
+#define DF_MODES (D_MODES)
/* Modes for quad-float and smaller quantities. */
-#define TF_MODES (DF_MODES | TF_ONLY_MODES)
+#define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
-/* Modes for quad-float and double-float quantities. */
-#define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
+/* Modes for quad-float pairs and smaller quantities. */
+#define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
-/* Modes for quad-float pair only quantities. */
-#define OF_ONLY_MODES (1 << (int) OF_MODE)
+/* Modes for double-float only quantities. */
+#define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
-/* Modes for quad-float pairs and smaller quantities. */
-#define OF_MODES (TF_MODES | OF_ONLY_MODES)
+/* Modes for quad-float and double-float only quantities. */
+#define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
-#define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
+/* Modes for quad-float pairs and double-float only quantities. */
+#define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
/* Modes for condition codes. */
#define CC_MODES (1 << (int) CC_MODE)
sparc_mode_class[i] = 1 << (int) T_MODE;
else if (GET_MODE_SIZE (i) == 32)
sparc_mode_class[i] = 1 << (int) O_MODE;
- else
+ else
sparc_mode_class[i] = 0;
break;
case MODE_VECTOR_INT:
sparc_mode_class[i] = 1 << (int) TF_MODE;
else if (GET_MODE_SIZE (i) == 32)
sparc_mode_class[i] = 1 << (int) OF_MODE;
- else
+ else
sparc_mode_class[i] = 0;
break;
case MODE_CC:
rtx reg = gen_rtx_REG (Pmode, 1);
emit_move_insn (reg, GEN_INT (-actual_fsize));
insn = emit_insn (gen_stack_pointer_inc (reg));
- REG_NOTES (insn) =
- gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
- gen_stack_pointer_inc (GEN_INT (-actual_fsize)),
- REG_NOTES (insn));
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR,
+ gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
}
RTX_FRAME_RELATED_P (insn) = 1;
emit_save_or_restore_regs (SORR_SAVE);
/* Load the PIC register if needed. */
- if (flag_pic && current_function_uses_pic_offset_table)
- load_pic_register (false);
+ if (flag_pic && crtl->uses_pic_offset_table)
+ load_pic_register ();
}
-
+
/* This function generates the assembly code for function entry, which boils
down to emitting the necessary .register directives. */
return sparc_prologue_data_valid_p
&& (actual_fsize == 0 || !sparc_leaf_function_p);
}
-
+
/* This function generates the assembly code for function exit. */
-
+
static void
sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
{
sparc_output_deferred_case_vectors ();
}
-
+
/* Output a 'restore' instruction. */
-
+
static void
output_restore (rtx pat)
{
break;
}
}
-
+
/* Output a return. */
const char *
semantics of restore/return. We simply output the jump to the
return address and the insn in the delay slot (if any). */
- gcc_assert (! current_function_calls_eh_return);
+ gcc_assert (! crtl->calls_eh_return);
return "jmp\t%%o7+%)%#";
}
combined with the 'restore' instruction or put in the delay slot of
the 'return' instruction. */
- if (current_function_calls_eh_return)
+ if (crtl->calls_eh_return)
{
/* If the function uses __builtin_eh_return, the eh_return
machinery occupies the delay slot. */
the appropriate fp reg and in memory.
- Summary of the calling conventions implemented by GCC on SPARC:
+ Summary of the calling conventions implemented by GCC on the SPARC:
32-bit ABI:
size argument return value
Note #2: integral vector types follow the scalar floating-point types
conventions to match what is implemented by the Sun VIS SDK.
-Note #3: floating-point vector types follow the aggregate types
+Note #3: floating-point vector types follow the aggregate types
conventions. */
return TARGET_ARCH32 ? true : false;
}
+/* Handle promotion of pointer and integer arguments. */
+
+static enum machine_mode
+sparc_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
+ enum machine_mode mode,
+ int *punsignedp ATTRIBUTE_UNUSED,
+ const_tree fntype ATTRIBUTE_UNUSED,
+ int for_return ATTRIBUTE_UNUSED)
+{
+ if (POINTER_TYPE_P (type))
+ {
+ *punsignedp = POINTERS_EXTEND_UNSIGNED;
+ return Pmode;
+ }
+
+ /* For TARGET_ARCH64 we need this, as we don't have instructions
+ for arithmetic operations which do zero/sign extension at the same time,
+ so without this we end up with a srl/sra after every assignment to an
+ user variable, which means very very bad code. */
+ if (TARGET_ARCH64
+ && GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
+ return word_mode;
+
+ return mode;
+}
+
/* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
static bool
{
case MODE_FLOAT:
case MODE_COMPLEX_FLOAT:
+ case MODE_VECTOR_INT:
if (TARGET_ARCH64 && TARGET_FPU && named)
{
if (slotno >= SPARC_FP_ARG_MAX)
/* A subroutine of function_arg_record_value. Assign the bits of the
structure between parms->intoffset and bitpos to integer registers. */
-static void
+static void
function_arg_record_value_3 (HOST_WIDE_INT bitpos,
struct function_arg_record_value_parms *parms)
{
NAMED is nonzero if this argument is a named parameter
(otherwise it is an extra parameter matching an ellipsis).
REGBASE is the regno of the base register for the parameter array. */
-
+
static rtx
function_arg_record_value (const_tree type, enum machine_mode mode,
int slotno, int named, int regbase)
Return an expression valid as a return value for the two macros
FUNCTION_ARG and FUNCTION_VALUE.
- SIZE is the size in bytes of the vector.
- BASE_MODE is the argument's base machine mode.
+ SIZE is the size in bytes of the vector (at least 8 bytes).
REGNO is the FP hard register the vector will be passed in. */
static rtx
-function_arg_vector_value (int size, enum machine_mode base_mode, int regno)
+function_arg_vector_value (int size, int regno)
{
- unsigned short base_mode_size = GET_MODE_SIZE (base_mode);
- int nregs = size / base_mode_size, i;
+ int i, nregs = size / 8;
rtx regs;
regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
{
XVECEXP (regs, 0, i)
= gen_rtx_EXPR_LIST (VOIDmode,
- gen_rtx_REG (base_mode, regno),
- GEN_INT (base_mode_size * i));
- regno += base_mode_size / 4;
+ gen_rtx_REG (DImode, regno + 2*i),
+ GEN_INT (i*8));
}
return regs;
if (mode == BLKmode)
return function_arg_vector_value (size,
- TYPE_MODE (TREE_TYPE (type)),
SPARC_FP_ARG_FIRST + 2*slotno);
else
mclass = MODE_FLOAT;
/* Original SPARC 64-bit ABI says that structures and unions
smaller than 16 bytes are passed in registers, as well as
all other base types.
-
+
Extended ABI (as implemented by the Sun compiler) says that
complex floats are passed in registers up to 16 bytes. Pass
all complex integers in registers up to 16 bytes. More generally,
/* Original SPARC 64-bit ABI says that structures and unions
smaller than 32 bytes are returned in registers, as well as
all other base types.
-
+
Extended ABI (as implemented by the Sun compiler) says that all
complex floats are returned in registers (8 FP registers at most
for '_Complex long double'). Return all complex integers in
mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
STRUCT_VALUE_OFFSET));
- /* Only follow the SPARC ABI for fixed-size structure returns.
- Variable size structure returns are handled per the normal
+ /* Only follow the SPARC ABI for fixed-size structure returns.
+ Variable size structure returns are handled per the normal
procedures in GCC. This is enabled by -mstd-struct-return */
- if (incoming == 2
+ if (incoming == 2
&& sparc_std_struct_return
&& TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
&& TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
provided. */
rtx ret_rtx = gen_rtx_REG (Pmode, 31);
rtx scratch = gen_reg_rtx (SImode);
- rtx endlab = gen_label_rtx ();
+ rtx endlab = gen_label_rtx ();
/* Calculate the return object size */
tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
/* Implement SPARC 32-bit psABI callee returns struck checking
- requirements:
-
+ requirements:
+
Fetch the instruction where we will return to and see if
it's an unimp instruction (the most significant 10 bits
will be zero). */
emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
- /* Assign stack temp:
+ /* Assign stack temp:
Write the address of the memory pointed to by temp_val into
the memory pointed to by mem */
emit_move_insn (mem, XEXP (temp_val, 0));
if (mode == BLKmode)
return function_arg_vector_value (size,
- TYPE_MODE (TREE_TYPE (type)),
SPARC_FP_ARG_FIRST);
else
mclass = MODE_FLOAT;
mclass = MODE_INT;
}
- /* This must match PROMOTE_FUNCTION_MODE. */
+ /* This must match sparc_promote_function_mode.
+ ??? Maybe 32-bit pointers should actually remain in Pmode? */
else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
mode = word_mode;
}
/* Implement `va_arg' for stdarg. */
static tree
-sparc_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
+sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
+ gimple_seq *post_p)
{
HOST_WIDE_INT size, rsize, align;
tree addr, incr;
size = int_size_in_bytes (type);
rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
align = 0;
-
+
if (TARGET_ARCH64)
{
/* For SPARC64, objects requiring 16-byte alignment get it. */
addr = fold_convert (build_pointer_type (ptrtype), addr);
addr = build_va_arg_indirect_ref (addr);
}
- /* If the address isn't aligned properly for the type,
- we may need to copy to a temporary.
- FIXME: This is inefficient. Usually we can do this
- in registers. */
- else if (align == 0
- && TYPE_ALIGN (type) > BITS_PER_WORD)
+
+ /* If the address isn't aligned properly for the type, we need a temporary.
+ FIXME: This is inefficient, usually we can do this in registers. */
+ else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
{
tree tmp = create_tmp_var (type, "va_arg_tmp");
tree dest_addr = build_fold_addr_expr (tmp);
-
- tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY], 3,
- dest_addr,
- addr,
- size_int (rsize));
-
+ tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
+ 3, dest_addr, addr, size_int (rsize));
+ TREE_ADDRESSABLE (tmp) = 1;
gimplify_and_add (copy, pre_p);
addr = dest_addr;
}
+
else
addr = fold_convert (ptrtype, addr);
- incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
- incr = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, valist, incr);
- gimplify_and_add (incr, post_p);
+ incr
+ = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
+ gimplify_assign (valist, incr, post_p);
return build_va_arg_indirect_ref (addr);
}
}
/* Emit a library call comparison between floating point X and Y.
- COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
+ COMPARISON is the operator to compare with (EQ, NE, GT, etc).
+ Return the new operator to be used in the comparison sequence.
+
TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
values as arguments instead of the TFmode registers themselves,
that's why we cannot call emit_float_lib_cmp. */
-void
+
+rtx
sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
{
const char *qpfunc;
- rtx slot0, slot1, result, tem, tem2;
+ rtx slot0, slot1, result, tem, tem2, libfunc;
enum machine_mode mode;
+ enum rtx_code new_comparison;
switch (comparison)
{
case EQ:
- qpfunc = (TARGET_ARCH64) ? "_Qp_feq" : "_Q_feq";
+ qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
break;
case NE:
- qpfunc = (TARGET_ARCH64) ? "_Qp_fne" : "_Q_fne";
+ qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
break;
case GT:
- qpfunc = (TARGET_ARCH64) ? "_Qp_fgt" : "_Q_fgt";
+ qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
break;
case GE:
- qpfunc = (TARGET_ARCH64) ? "_Qp_fge" : "_Q_fge";
+ qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
break;
case LT:
- qpfunc = (TARGET_ARCH64) ? "_Qp_flt" : "_Q_flt";
+ qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
break;
case LE:
- qpfunc = (TARGET_ARCH64) ? "_Qp_fle" : "_Q_fle";
+ qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
break;
case ORDERED:
case UNGE:
case UNLE:
case LTGT:
- qpfunc = (TARGET_ARCH64) ? "_Qp_cmp" : "_Q_cmp";
+ qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
break;
default:
if (TARGET_ARCH64)
{
- if (GET_CODE (x) != MEM)
+ if (MEM_P (x))
+ slot0 = x;
+ else
{
slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_move_insn (slot0, x);
}
- else
- slot0 = x;
- if (GET_CODE (y) != MEM)
+ if (MEM_P (y))
+ slot1 = y;
+ else
{
slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
emit_move_insn (slot1, y);
}
- else
- slot1 = y;
- emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
+ libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
+ emit_library_call (libfunc, LCT_NORMAL,
DImode, 2,
XEXP (slot0, 0), Pmode,
XEXP (slot1, 0), Pmode);
-
mode = DImode;
}
else
{
- emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
+ libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
+ emit_library_call (libfunc, LCT_NORMAL,
SImode, 2,
x, TFmode, y, TFmode);
-
mode = SImode;
}
register so reload doesn't clobber the value if it needs
the return register for a spill reg. */
result = gen_reg_rtx (mode);
- emit_move_insn (result, hard_libcall_value (mode));
+ emit_move_insn (result, hard_libcall_value (mode, libfunc));
switch (comparison)
{
default:
- emit_cmp_insn (result, const0_rtx, NE, NULL_RTX, mode, 0);
- break;
+ return gen_rtx_NE (VOIDmode, result, const0_rtx);
case ORDERED:
case UNORDERED:
- emit_cmp_insn (result, GEN_INT(3), comparison == UNORDERED ? EQ : NE,
- NULL_RTX, mode, 0);
- break;
+ new_comparison = (comparison == UNORDERED ? EQ : NE);
+ return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
case UNGT:
case UNGE:
- emit_cmp_insn (result, const1_rtx,
- comparison == UNGT ? GT : NE, NULL_RTX, mode, 0);
- break;
+ new_comparison = (comparison == UNGT ? GT : NE);
+ return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
case UNLE:
- emit_cmp_insn (result, const2_rtx, NE, NULL_RTX, mode, 0);
- break;
+ return gen_rtx_NE (VOIDmode, result, const2_rtx);
case UNLT:
tem = gen_reg_rtx (mode);
if (TARGET_ARCH32)
emit_insn (gen_andsi3 (tem, result, const1_rtx));
else
emit_insn (gen_anddi3 (tem, result, const1_rtx));
- emit_cmp_insn (tem, const0_rtx, NE, NULL_RTX, mode, 0);
- break;
+ return gen_rtx_NE (VOIDmode, tem, const0_rtx);
case UNEQ:
case LTGT:
tem = gen_reg_rtx (mode);
emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
else
emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
- emit_cmp_insn (tem2, const0_rtx, comparison == UNEQ ? EQ : NE,
- NULL_RTX, mode, 0);
- break;
+ new_comparison = (comparison == UNEQ ? EQ : NE);
+ return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
}
+
+ gcc_unreachable ();
}
/* Generate an unsigned DImode to FP conversion. This is the same code
/* branch on register are limited to +-128KB. If it is too far away,
change
-
+
brnz,pt %g1, .LC30
-
+
to
-
+
brz,pn %g1, .+12
nop
ba,pt %xcc, .LC30
-
+
and
-
+
brgez,a,pn %o1, .LC29
-
+
to
-
+
brlz,pt %o1, .+16
nop
ba,pt %xcc, .LC29 */
}
/* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
- This makes them candidates for using ldd and std insns.
+ This makes them candidates for using ldd and std insns.
Note reg1 and reg2 *must* be hard registers. */
registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
{
/* We might have been passed a SUBREG. */
- if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
+ if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
return 0;
if (REGNO (reg1) % 2 != 0)
return 0;
- /* Integer ldd is deprecated in SPARC V9 */
- if (TARGET_V9 && REGNO (reg1) < 32)
- return 0;
+ /* Integer ldd is deprecated in SPARC V9 */
+ if (TARGET_V9 && REGNO (reg1) < 32)
+ return 0;
return (REGNO (reg1) == REGNO (reg2) - 1);
}
/* Return 1 if the addresses in mem1 and mem2 are suitable for use in
an ldd or std insn.
-
+
This can only happen when addr1 and addr2, the addresses in mem1
and mem2, are consecutive memory locations (addr1 + 4 == addr2).
addr1 must also be aligned on a 64-bit boundary.
/* MEM1 should be aligned on a 64-bit boundary. */
if (MEM_ALIGN (mem1) < 64)
return 0;
-
+
addr1 = XEXP (mem1, 0);
addr2 = XEXP (mem2, 0);
-
+
/* Extract a register number and offset (if used) from the first addr. */
if (GET_CODE (addr1) == PLUS)
{
if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
return 0;
-
- /* The first offset must be evenly divisible by 8 to ensure the
+
+ /* The first offset must be evenly divisible by 8 to ensure the
address is 64 bit aligned. */
if (offset1 % 8 != 0)
return 0;
return 1;
}
-/* Return 1 if reg is a pseudo, or is the first register in
- a hard register pair. This makes it a candidate for use in
+/* Return 1 if reg is a pseudo, or is the first register in
+ a hard register pair. This makes it suitable for use in
ldd and std insns. */
int
register_ok_for_ldd (rtx reg)
{
/* We might have been passed a SUBREG. */
- if (GET_CODE (reg) != REG)
+ if (!REG_P (reg))
return 0;
if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
return (REGNO (reg) % 2 == 0);
- else
- return 1;
+
+ return 1;
+}
+
+/* Return 1 if OP is a memory whose address is known to be
+ aligned to 8-byte boundary, or a pseudo during reload.
+ This makes it suitable for use in ldd and std insns. */
+
+int
+memory_ok_for_ldd (rtx op)
+{
+ if (MEM_P (op))
+ {
+ /* In 64-bit mode, we assume that the address is word-aligned. */
+ if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
+ return 0;
+
+ if ((reload_in_progress || reload_completed)
+ && !strict_memory_address_p (Pmode, XEXP (op, 0)))
+ return 0;
+ }
+ else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
+ {
+ if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
+ return 0;
+ }
+ else
+ return 0;
+
+ return 1;
}
\f
/* Print operand X (an rtx) in assembler syntax to file FILE.
/* Output the right displacement from the saved PC on function return.
The caller may have placed an "unimp" insn immediately after the call
so we have to account for it. This insn is used in the 32-bit ABI
- when calling a function that returns a non zero-sized structure. The
+ when calling a function that returns a non zero-sized structure. The
64-bit ABI doesn't have it. Be careful to have this test be the same
- as that used on the call. The exception here is that when
- sparc_std_struct_return is enabled, the psABI is followed exactly
- and the adjustment is made by the code in sparc_struct_value_rtx.
- The call emitted is the same when sparc_std_struct_return is
- present. */
- if (! TARGET_ARCH64
- && current_function_returns_struct
- && ! sparc_std_struct_return
- && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
- == INTEGER_CST)
- && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
+ as that for the call. The exception is when sparc_std_struct_return
+ is enabled, the psABI is followed exactly and the adjustment is made
+ by the code in sparc_struct_value_rtx. The call emitted is the same
+ when sparc_std_struct_return is enabled. */
+ if (!TARGET_ARCH64
+ && cfun->returns_struct
+ && !sparc_std_struct_return
+ && DECL_SIZE (DECL_RESULT (current_function_decl))
+ && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
+ == INTEGER_CST
+ && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
fputs ("12", file);
else
fputc ('8', file);
{
case ERROR_MARK:
return qualifiers;
-
+
case ARRAY_TYPE:
qualifiers |= (3 << shift);
break;
if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
-
+
else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
-
+
else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
-
+
else
return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
-
+
case REAL_TYPE:
/* If this is a range type, consider it to be the underlying
type. */
if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
return (qualifiers | 6);
- else
+ else
return (qualifiers | 7);
-
+
case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
/* ??? We need to distinguish between double and float complex types,
but I don't know how yet because I can't reach this code from
case BOOLEAN_TYPE: /* Boolean truth value type. */
case LANG_TYPE: /* ? */
return qualifiers;
-
+
default:
gcc_unreachable (); /* Not a type! */
}
Emit enough FLUSH insns to synchronize the data and instruction caches. */
-void
-sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
+static void
+sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
{
/* SPARC 32-bit trampoline:
*/
emit_move_insn
- (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
+ (adjust_address (m_tramp, SImode, 0),
expand_binop (SImode, ior_optab,
expand_shift (RSHIFT_EXPR, SImode, fnaddr,
size_int (10), 0, 1),
NULL_RTX, 1, OPTAB_DIRECT));
emit_move_insn
- (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
+ (adjust_address (m_tramp, SImode, 4),
expand_binop (SImode, ior_optab,
expand_shift (RSHIFT_EXPR, SImode, cxt,
size_int (10), 0, 1),
NULL_RTX, 1, OPTAB_DIRECT));
emit_move_insn
- (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
+ (adjust_address (m_tramp, SImode, 8),
expand_binop (SImode, ior_optab,
expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
NULL_RTX, 1, OPTAB_DIRECT));
emit_move_insn
- (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
+ (adjust_address (m_tramp, SImode, 12),
expand_binop (SImode, ior_optab,
expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
/* On UltraSPARC a flush flushes an entire cache line. The trampoline is
aligned on a 16 byte boundary so one flush clears it all. */
- emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
+ emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
if (sparc_cpu != PROCESSOR_ULTRASPARC
&& sparc_cpu != PROCESSOR_ULTRASPARC3
&& sparc_cpu != PROCESSOR_NIAGARA
&& sparc_cpu != PROCESSOR_NIAGARA2)
- emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
- plus_constant (tramp, 8)))));
+ emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
/* Call __enable_execute_stack after writing onto the stack to make sure
the stack address is accessible. */
#ifdef ENABLE_EXECUTE_STACK
emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
- LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
+ LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
#endif
}
values as "immediate" data out of the trampoline. It's also easier since
we can read the PC without clobbering a register. */
-void
-sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
+static void
+sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
{
/* SPARC 64-bit trampoline:
+16 bytes data
*/
- emit_move_insn (gen_rtx_MEM (SImode, tramp),
+ emit_move_insn (adjust_address (m_tramp, SImode, 0),
GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
- emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
+ emit_move_insn (adjust_address (m_tramp, SImode, 4),
GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
- emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
+ emit_move_insn (adjust_address (m_tramp, SImode, 8),
GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
- emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
+ emit_move_insn (adjust_address (m_tramp, SImode, 12),
GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
- emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
- emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
- emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
+ emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
+ emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
+ emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
if (sparc_cpu != PROCESSOR_ULTRASPARC
&& sparc_cpu != PROCESSOR_ULTRASPARC3
&& sparc_cpu != PROCESSOR_NIAGARA
&& sparc_cpu != PROCESSOR_NIAGARA2)
- emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
+ emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
/* Call __enable_execute_stack after writing onto the stack to make sure
the stack address is accessible. */
#ifdef ENABLE_EXECUTE_STACK
emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
- LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
+ LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
#endif
}
+
+/* Worker for TARGET_TRAMPOLINE_INIT. */
+
+static void
+sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
+{
+ rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
+ cxt = force_reg (Pmode, cxt);
+ if (TARGET_ARCH64)
+ sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
+ else
+ sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
+}
\f
/* Adjust the cost of a scheduling dependency. Return the new cost of
a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
{
if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
- || GET_CODE (SET_DEST (dep_pat)) != MEM
+ || GET_CODE (SET_DEST (dep_pat)) != MEM
|| GET_CODE (SET_SRC (pat)) != MEM
|| ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
XEXP (SET_SRC (pat), 0)))
return cost + 2;
- return cost + 8;
+ return cost + 8;
}
break;
default:
break;
- }
+ }
return cost;
}
sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
int sched_verbose ATTRIBUTE_UNUSED,
int max_ready ATTRIBUTE_UNUSED)
-{
-}
-
+{}
+
static int
sparc_use_sched_lookahead (void)
{
sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
}
-static void
+static void
sparc_output_addr_vec (rtx vec)
{
rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
int idx, vlen = XVECLEN (body, 0);
-#ifdef ASM_OUTPUT_ADDR_VEC_START
+#ifdef ASM_OUTPUT_ADDR_VEC_START
ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
#endif
ASM_OUTPUT_ADDR_VEC_ELT
(asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
}
-
+
#ifdef ASM_OUTPUT_ADDR_VEC_END
ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
#endif
}
-static void
+static void
sparc_output_addr_diff_vec (rtx vec)
{
rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
rtx base = XEXP (XEXP (body, 0), 0);
int idx, vlen = XVECLEN (body, 1);
-#ifdef ASM_OUTPUT_ADDR_VEC_START
+#ifdef ASM_OUTPUT_ADDR_VEC_START
ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
#endif
CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
CODE_LABEL_NUMBER (base));
}
-
+
#ifdef ASM_OUTPUT_ADDR_VEC_END
ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
#endif
align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
if (align > 0)
ASM_OUTPUT_ALIGN (asm_out_file, align);
-
+
for (t = sparc_addr_list; t ; t = XEXP (t, 1))
sparc_output_addr_vec (XEXP (t, 0));
for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
char buf[32];
rtx lab, fun;
- ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
- lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
-
- emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
+ if (NO_PROFILE_COUNTERS)
+ {
+ emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
+ }
+ else
+ {
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
+ lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+ emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
+ }
}
\f
-#ifdef OBJECT_FORMAT_ELF
+/* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
+
static void
-sparc_elf_asm_named_section (const char *name, unsigned int flags,
- tree decl)
+sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
+ tree decl ATTRIBUTE_UNUSED)
{
- if (flags & SECTION_MERGE)
- {
- /* entsize cannot be expressed in this section attributes
- encoding style. */
- default_elf_asm_named_section (name, flags, decl);
- return;
- }
-
fprintf (asm_out_file, "\t.section\t\"%s\"", name);
if (!(flags & SECTION_DEBUG))
fputc ('\n', asm_out_file);
}
-#endif /* OBJECT_FORMAT_ELF */
/* We do not allow indirect calls to be optimized into sibling calls.
{
return (decl
&& flag_delayed_branch
- && (TARGET_ARCH64 || ! current_function_returns_struct)
+ && (TARGET_ARCH64 || ! cfun->returns_struct)
&& !(TARGET_VXWORKS_RTP
&& flag_pic
&& !targetm.binds_local_p (decl)));
function could not be folded. */
static tree
-sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
+sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
+ tree *args, bool ignore)
{
tree arg0, arg1, arg2;
tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
+ enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
if (ignore
- && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrsi_vis
- && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrdi_vis)
+ && icode != CODE_FOR_alignaddrsi_vis
+ && icode != CODE_FOR_alignaddrdi_vis)
return fold_convert (rtype, integer_zero_node);
- switch (DECL_FUNCTION_CODE (fndecl))
+ switch (icode)
{
case CODE_FOR_fexpand_vis:
- arg0 = TREE_VALUE (arglist);
+ arg0 = args[0];
STRIP_NOPS (arg0);
if (TREE_CODE (arg0) == VECTOR_CST)
case CODE_FOR_fmul8x16_vis:
case CODE_FOR_fmul8x16au_vis:
case CODE_FOR_fmul8x16al_vis:
- arg0 = TREE_VALUE (arglist);
- arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg0 = args[0];
+ arg1 = args[1];
STRIP_NOPS (arg0);
STRIP_NOPS (arg1);
tree inner_type = TREE_TYPE (rtype);
tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
- tree n_elts = sparc_handle_vis_mul8x16 (DECL_FUNCTION_CODE (fndecl),
- inner_type, elts0, elts1);
+ tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
+ elts1);
return build_vector (rtype, n_elts);
}
break;
case CODE_FOR_fpmerge_vis:
- arg0 = TREE_VALUE (arglist);
- arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg0 = args[0];
+ arg1 = args[1];
STRIP_NOPS (arg0);
STRIP_NOPS (arg1);
break;
case CODE_FOR_pdist_vis:
- arg0 = TREE_VALUE (arglist);
- arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ arg0 = args[0];
+ arg1 = args[1];
+ arg2 = args[2];
STRIP_NOPS (arg0);
STRIP_NOPS (arg1);
STRIP_NOPS (arg2);
return NULL_TREE;
}
\f
-int
-sparc_extra_constraint_check (rtx op, int c, int strict)
-{
- int reload_ok_mem;
-
- if (TARGET_ARCH64
- && (c == 'T' || c == 'U'))
- return 0;
-
- switch (c)
- {
- case 'Q':
- return fp_sethi_p (op);
-
- case 'R':
- return fp_mov_p (op);
-
- case 'S':
- return fp_high_losum_p (op);
-
- case 'U':
- if (! strict
- || (GET_CODE (op) == REG
- && (REGNO (op) < FIRST_PSEUDO_REGISTER
- || reg_renumber[REGNO (op)] >= 0)))
- return register_ok_for_ldd (op);
-
- return 0;
-
- case 'W':
- case 'T':
- break;
-
- case 'Y':
- return const_zero_operand (op, GET_MODE (op));
-
- default:
- return 0;
- }
-
- /* Our memory extra constraints have to emulate the
- behavior of 'm' and 'o' in order for reload to work
- correctly. */
- if (GET_CODE (op) == MEM)
- {
- reload_ok_mem = 0;
- if ((TARGET_ARCH64 || mem_min_alignment (op, 8))
- && (! strict
- || strict_memory_address_p (Pmode, XEXP (op, 0))))
- reload_ok_mem = 1;
- }
- else
- {
- reload_ok_mem = (reload_in_progress
- && GET_CODE (op) == REG
- && REGNO (op) >= FIRST_PSEUDO_REGISTER
- && reg_renumber [REGNO (op)] < 0);
- }
-
- return reload_ok_mem;
-}
-
/* ??? This duplicates information provided to the compiler by the
??? scheduler description. Some day, teach genautomata to output
??? the latencies and then CSE will just use that. */
static bool
-sparc_rtx_costs (rtx x, int code, int outer_code, int *total)
+sparc_rtx_costs (rtx x, int code, int outer_code, int *total,
+ bool speed ATTRIBUTE_UNUSED)
{
enum machine_mode mode = GET_MODE (x);
bool float_mode_p = FLOAT_MODE_P (mode);
HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
tree function)
{
- rtx this, insn, funexp;
+ rtx this_rtx, insn, funexp;
unsigned int int_arg_first;
reload_completed = 1;
/* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
returns a structure, the structure return pointer is there instead. */
- if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
- this = gen_rtx_REG (Pmode, int_arg_first + 1);
+ if (TARGET_ARCH64
+ && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
+ this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
else
- this = gen_rtx_REG (Pmode, int_arg_first);
+ this_rtx = gen_rtx_REG (Pmode, int_arg_first);
/* Add DELTA. When possible use a plain add, otherwise load it into
a register first. */
delta_rtx = scratch;
}
- /* THIS += DELTA. */
- emit_insn (gen_add2_insn (this, delta_rtx));
+ /* THIS_RTX += DELTA. */
+ emit_insn (gen_add2_insn (this_rtx, delta_rtx));
}
- /* Add the word at address (*THIS + VCALL_OFFSET). */
+ /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
if (vcall_offset)
{
rtx vcall_offset_rtx = GEN_INT (vcall_offset);
gcc_assert (vcall_offset < 0);
- /* SCRATCH = *THIS. */
- emit_move_insn (scratch, gen_rtx_MEM (Pmode, this));
+ /* SCRATCH = *THIS_RTX. */
+ emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
/* Prepare for adding VCALL_OFFSET. The difficulty is that we
may not have any available scratch register at this point. */
vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
}
- /* SCRATCH = *(*THIS + VCALL_OFFSET). */
+ /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
emit_move_insn (scratch, gen_rtx_MEM (Pmode,
gen_rtx_PLUS (Pmode,
scratch,
vcall_offset_rtx)));
- /* THIS += *(*THIS + VCALL_OFFSET). */
- emit_insn (gen_add2_insn (this, scratch));
+ /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
+ emit_insn (gen_add2_insn (this_rtx, scratch));
}
/* Generate a tail call to the target function. */
start_sequence ();
/* Delay emitting the PIC helper function because it needs to
change the section and we are emitting assembly code. */
- load_pic_register (true); /* clobbers %o7 */
- scratch = legitimize_pic_address (funexp, Pmode, scratch);
+ load_pic_register (); /* clobbers %o7 */
+ scratch = legitimize_pic_address (funexp, scratch);
seq = get_insns ();
end_sequence ();
emit_and_preserve (seq, spill_reg, spill_reg2);
final_start_function (insn, file, 1);
final (insn, file, 1);
final_end_function ();
- free_after_compilation (cfun);
reload_completed = 0;
epilogue_completed = 0;
static struct machine_function *
sparc_init_machine_status (void)
{
- return ggc_alloc_cleared (sizeof (struct machine_function));
+ return GGC_CNEW (struct machine_function);
}
/* Locate some local-dynamic symbol still in use by this function
static void
sparc_file_end (void)
{
- /* If we haven't emitted the special PIC helper function, do so now. */
- if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
- emit_pic_helper ();
+ /* If need to emit the special PIC helper function, do so now. */
+ if (pic_helper_needed)
+ {
+ unsigned int regno = REGNO (pic_offset_table_rtx);
+ const char *pic_name = reg_names[regno];
+ char name[32];
+#ifdef DWARF2_UNWIND_INFO
+ bool do_cfi;
+#endif
+
+ get_pc_thunk_name (name, regno);
+ if (USE_HIDDEN_LINKONCE)
+ {
+ tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
+ get_identifier (name),
+ build_function_type (void_type_node,
+ void_list_node));
+ DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
+ NULL_TREE, void_type_node);
+ TREE_STATIC (decl) = 1;
+ make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
+ DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
+ DECL_VISIBILITY_SPECIFIED (decl) = 1;
+ allocate_struct_function (decl, true);
+ current_function_decl = decl;
+ init_varasm_status ();
+ assemble_start_function (decl, name);
+ }
+ else
+ {
+ const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
+ switch_to_section (text_section);
+ if (align > 0)
+ ASM_OUTPUT_ALIGN (asm_out_file, align);
+ ASM_OUTPUT_LABEL (asm_out_file, name);
+ }
+
+#ifdef DWARF2_UNWIND_INFO
+ do_cfi = dwarf2out_do_cfi_asm ();
+ if (do_cfi)
+ fprintf (asm_out_file, "\t.cfi_startproc\n");
+#endif
+ if (flag_delayed_branch)
+ fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
+ pic_name, pic_name);
+ else
+ fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
+ pic_name, pic_name);
+#ifdef DWARF2_UNWIND_INFO
+ if (do_cfi)
+ fprintf (asm_out_file, "\t.cfi_endproc\n");
+#endif
+ }
if (NEED_INDICATE_EXEC_STACK)
file_end_indicate_exec_stack ();
gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
res)));
- sparc_compare_op0 = resv;
- sparc_compare_op1 = val;
- cc = gen_compare_reg (NE);
-
+ cc = gen_compare_reg_1 (NE, resv, val);
emit_insn (gen_rtx_SET (VOIDmode, val, resv));
- sparc_compare_emitted = cc;
- emit_jump_insn (gen_bne (loop_label));
+ /* Use cbranchcc4 to separate the compare and branch! */
+ emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
+ cc, const0_rtx, loop_label));
emit_label (end_label);
emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
}
+/* Implement TARGET_FRAME_POINTER_REQUIRED. */
+
+bool
+sparc_frame_pointer_required (void)
+{
+ return !(leaf_function_p () && only_leaf_regs_used ());
+}
+
+/* The way this is structured, we can't eliminate SFP in favor of SP
+ if the frame pointer is required: we want to use the SFP->HFP elimination
+ in that case. But the test in update_eliminables doesn't know we are
+ assuming below that we only do the former elimination. */
+
+bool
+sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
+{
+ return (to == HARD_FRAME_POINTER_REGNUM
+ || !targetm.frame_pointer_required ());
+}
+
#include "gt-sparc.h"