/* Subroutines used for code generation on IBM RS/6000.
Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
This file is part of GCC.
#include "sched-int.h"
#include "tree-gimple.h"
#include "intl.h"
+#include "params.h"
#if TARGET_XCOFF
#include "xcoffout.h" /* get declarations of xcoff_*_section_name */
#endif
int lr_save_p; /* true if the link reg needs to be saved */
int cr_save_p; /* true if the CR reg needs to be saved */
unsigned int vrsave_mask; /* mask of vec registers to save */
- int toc_save_p; /* true if the TOC needs to be saved */
int push_p; /* true if we need to allocate stack space */
int calls_p; /* true if the function makes any calls */
int world_save_p; /* true if we're saving *everything*:
int cr_save_offset; /* offset to save CR from initial SP */
int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
- int toc_save_offset; /* offset to save the TOC pointer */
int varargs_save_offset; /* offset to save the varargs registers */
int ehrd_offset; /* offset to EH return data */
int reg_size; /* register size (4 or 8) */
int fp_size; /* size of saved FP registers */
int altivec_size; /* size of saved AltiVec registers */
int cr_size; /* size to hold CR if not in save_size */
- int lr_size; /* size to hold LR if not in save_size */
int vrsave_size; /* size to hold VRSAVE if not in save_size */
int altivec_padding_size; /* size of altivec alignment padding if
not in save_size */
int spe_gp_size; /* size of 64-bit GPR save size for SPE */
int spe_padding_size;
- int toc_size; /* size to hold TOC if not in save_size */
HOST_WIDE_INT total_size; /* total bytes allocated for stack */
int spe_64bit_regs_used;
} rs6000_stack_t;
/* Support targetm.vectorize.builtin_mask_for_load. */
static GTY(()) tree altivec_builtin_mask_for_load;
-/* Size of long double */
+/* Size of long double. */
int rs6000_long_double_type_size;
-/* Whether -mabi=altivec has appeared */
+/* IEEE quad extended precision long double. */
+int rs6000_ieeequad;
+
+/* Whether -mabi=altivec has appeared. */
int rs6000_altivec_abi;
/* Nonzero if we want SPE ABI extensions. */
int toc_initialized;
char toc_label_name[10];
-/* Alias set for saves and restores from the rs6000 stack. */
-static GTY(()) int rs6000_sr_alias_set;
+static GTY(()) section *read_only_data_section;
+static GTY(()) section *private_data_section;
+static GTY(()) section *read_only_private_data_section;
+static GTY(()) section *sdata2_section;
+static GTY(()) section *toc_section;
/* Control alignment for fields within structures. */
/* String from -malign-XXXXX. */
static void rs6000_elf_asm_out_constructor (rtx, int);
static void rs6000_elf_asm_out_destructor (rtx, int);
static void rs6000_elf_end_indicate_exec_stack (void) ATTRIBUTE_UNUSED;
-static void rs6000_elf_select_section (tree, int, unsigned HOST_WIDE_INT);
+static void rs6000_elf_asm_init_sections (void);
+static section *rs6000_elf_select_section (tree, int, unsigned HOST_WIDE_INT);
static void rs6000_elf_unique_section (tree, int);
-static void rs6000_elf_select_rtx_section (enum machine_mode, rtx,
- unsigned HOST_WIDE_INT);
+static section *rs6000_elf_select_rtx_section (enum machine_mode, rtx,
+ unsigned HOST_WIDE_INT);
static void rs6000_elf_encode_section_info (tree, rtx, int)
ATTRIBUTE_UNUSED;
-static bool rs6000_elf_in_small_data_p (tree);
#endif
+static bool rs6000_use_blocks_for_constant_p (enum machine_mode, rtx);
#if TARGET_XCOFF
+static void rs6000_xcoff_asm_output_anchor (rtx);
static void rs6000_xcoff_asm_globalize_label (FILE *, const char *);
+static void rs6000_xcoff_asm_init_sections (void);
static void rs6000_xcoff_asm_named_section (const char *, unsigned int, tree);
-static void rs6000_xcoff_select_section (tree, int, unsigned HOST_WIDE_INT);
-static void rs6000_xcoff_unique_section (tree, int);
-static void rs6000_xcoff_select_rtx_section (enum machine_mode, rtx,
+static section *rs6000_xcoff_select_section (tree, int,
unsigned HOST_WIDE_INT);
+static void rs6000_xcoff_unique_section (tree, int);
+static section *rs6000_xcoff_select_rtx_section
+ (enum machine_mode, rtx, unsigned HOST_WIDE_INT);
static const char * rs6000_xcoff_strip_name_encoding (const char *);
static unsigned int rs6000_xcoff_section_type_flags (tree, const char *, int);
static void rs6000_xcoff_file_start (void);
static tree rs6000_build_builtin_va_list (void);
static tree rs6000_gimplify_va_arg (tree, tree, tree *, tree *);
static bool rs6000_must_pass_in_stack (enum machine_mode, tree);
+static bool rs6000_scalar_mode_supported_p (enum machine_mode);
static bool rs6000_vector_mode_supported_p (enum machine_mode);
static int get_vec_cmp_insn (enum rtx_code, enum machine_mode,
enum machine_mode);
#undef TARGET_EH_RETURN_FILTER_MODE
#define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
+
#undef TARGET_VECTOR_MODE_SUPPORTED_P
#define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
#undef TARGET_DEFAULT_TARGET_FLAGS
#define TARGET_DEFAULT_TARGET_FLAGS \
- (TARGET_DEFAULT | MASK_SCHED_PROLOG)
+ (TARGET_DEFAULT)
#undef TARGET_STACK_PROTECT_FAIL
#define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
#define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
#endif
+/* Use a 32-bit anchor range. This leads to sequences like:
+
+ addis tmp,anchor,high
+ add dest,tmp,low
+
+ where tmp itself acts as an anchor, and can be shared between
+ accesses to the same 64k page. */
+#undef TARGET_MIN_ANCHOR_OFFSET
+#define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
+#undef TARGET_MAX_ANCHOR_OFFSET
+#define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
+
struct gcc_target targetm = TARGET_INITIALIZER;
\f
if (INT_REGNO_P (regno))
return INT_REGNO_P (regno + HARD_REGNO_NREGS (regno, mode) - 1);
- /* The float registers can only hold floating modes and DImode. */
+ /* The float registers can only hold floating modes and DImode.
+ This also excludes decimal float modes. */
if (FP_REGNO_P (regno))
return
- (GET_MODE_CLASS (mode) == MODE_FLOAT
+ (SCALAR_FLOAT_MODE_P (mode)
+ && !DECIMAL_FLOAT_MODE_P (mode)
&& FP_REGNO_P (regno + HARD_REGNO_NREGS (regno, mode) - 1))
|| (GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD);
= {{"401", PROCESSOR_PPC403, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
{"403", PROCESSOR_PPC403,
POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_STRICT_ALIGN},
- {"405", PROCESSOR_PPC405, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
- {"405fp", PROCESSOR_PPC405, POWERPC_BASE_MASK},
- {"440", PROCESSOR_PPC440, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
- {"440fp", PROCESSOR_PPC440, POWERPC_BASE_MASK},
+ {"405", PROCESSOR_PPC405,
+ POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
+ {"405fp", PROCESSOR_PPC405,
+ POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
+ {"440", PROCESSOR_PPC440,
+ POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
+ {"440fp", PROCESSOR_PPC440,
+ POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
{"505", PROCESSOR_MPCCORE, POWERPC_BASE_MASK},
{"601", PROCESSOR_PPC601,
MASK_POWER | POWERPC_BASE_MASK | MASK_MULTIPLE | MASK_STRING},
{"power5", PROCESSOR_POWER5,
POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GFXOPT
| MASK_MFCRF | MASK_POPCNTB},
+ {"power5+", PROCESSOR_POWER5,
+ POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GFXOPT
+ | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND},
{"powerpc", PROCESSOR_POWERPC, POWERPC_BASE_MASK},
{"powerpc64", PROCESSOR_POWERPC64,
POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
POWER_MASKS = MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING,
POWERPC_MASKS = (POWERPC_BASE_MASK | MASK_PPC_GPOPT
| MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_ALTIVEC
- | MASK_MFCRF)
+ | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_MULHW
+ | MASK_DLMZB)
};
rs6000_init_hard_regno_mode_ok ();
if (!rs6000_explicit_options.long_double)
rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
+#ifndef POWERPC_LINUX
+ if (!rs6000_explicit_options.abi)
+ rs6000_ieeequad = 1;
+#endif
+
/* Set Altivec ABI as default for powerpc64 linux. */
if (TARGET_ELF && TARGET_64BIT)
{
if (!rs6000_explicit_options.aix_struct_ret)
aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
- if (TARGET_LONG_DOUBLE_128
- && (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN))
+ if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
- /* Allocate an alias set for register saves & restores from stack. */
- rs6000_sr_alias_set = new_alias_set ();
-
if (TARGET_TOC)
ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
/* The Darwin libraries never set errno, so we might as well
avoid calling them when that's the only reason we would. */
flag_errno_math = 0;
+
+ /* Double growth factor to counter reduced min jump length. */
+ set_param_value ("max-grow-copy-bb-insns", 16);
}
/* Implement TARGET_HANDLE_OPTION. */
| MASK_PPC_GFXOPT | MASK_POWERPC64);
break;
case OPT_mfull_toc:
- target_flags &= ~(MASK_MINIMAL_TOC | MASK_NO_FP_IN_TOC
- | MASK_NO_SUM_IN_TOC);
- target_flags_explicit |= (MASK_MINIMAL_TOC | MASK_NO_FP_IN_TOC
- | MASK_NO_SUM_IN_TOC);
+ target_flags &= ~MASK_MINIMAL_TOC;
+ TARGET_NO_FP_IN_TOC = 0;
+ TARGET_NO_SUM_IN_TOC = 0;
+ target_flags_explicit |= MASK_MINIMAL_TOC;
#ifdef TARGET_USES_SYSV4_OPT
/* Note, V.4 no longer uses a normal TOC, so make -mfull-toc, be
just the same as -mminimal-toc. */
#else
case OPT_m64:
#endif
- target_flags |= MASK_POWERPC64 | MASK_POWERPC | MASK_PPC_GFXOPT;
- target_flags_explicit |= MASK_POWERPC64 | MASK_POWERPC
- | MASK_PPC_GFXOPT;
+ target_flags |= MASK_POWERPC64 | MASK_POWERPC;
+ target_flags |= ~target_flags_explicit & MASK_PPC_GFXOPT;
+ target_flags_explicit |= MASK_POWERPC64 | MASK_POWERPC;
break;
#ifdef TARGET_USES_AIX64_OPT
case OPT_mminimal_toc:
if (value == 1)
{
- target_flags &= ~(MASK_NO_FP_IN_TOC | MASK_NO_SUM_IN_TOC);
- target_flags_explicit |= (MASK_NO_FP_IN_TOC | MASK_NO_SUM_IN_TOC);
+ TARGET_NO_FP_IN_TOC = 0;
+ TARGET_NO_SUM_IN_TOC = 0;
}
break;
warning (0, "Using old darwin ABI");
}
+ else if (! strcmp (arg, "ibmlongdouble"))
+ {
+ rs6000_ieeequad = 0;
+ warning (0, "Using IBM extended precision long double");
+ }
+ else if (! strcmp (arg, "ieeelongdouble"))
+ {
+ rs6000_ieeequad = 1;
+ warning (0, "Using IEEE extended precision long double");
+ }
+
else
{
error ("unknown ABI specified: '%s'", arg);
if (DEFAULT_ABI == ABI_AIX || (TARGET_ELF && flag_pic == 2))
{
- toc_section ();
- text_section ();
+ switch_to_section (toc_section);
+ switch_to_section (text_section);
}
}
}
}
-/* Returns the constant for the splat instruction, if exists. */
-int
-easy_vector_splat_const (int cst, enum machine_mode mode)
+/* Return true if OP can be synthesized with a particular vspltisb, vspltish
+ or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
+ depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
+ all items are set to the same value and contain COPIES replicas of the
+ vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
+ operand and the others are set to the value of the operand's msb. */
+
+static bool
+vspltis_constant (rtx op, unsigned step, unsigned copies)
{
- switch (mode)
+ enum machine_mode mode = GET_MODE (op);
+ enum machine_mode inner = GET_MODE_INNER (mode);
+
+ unsigned i;
+ unsigned nunits = GET_MODE_NUNITS (mode);
+ unsigned bitsize = GET_MODE_BITSIZE (inner);
+ unsigned mask = GET_MODE_MASK (inner);
+
+ rtx last = CONST_VECTOR_ELT (op, nunits - 1);
+ HOST_WIDE_INT val = INTVAL (last);
+ HOST_WIDE_INT splat_val = val;
+ HOST_WIDE_INT msb_val = val > 0 ? 0 : -1;
+
+ /* Construct the value to be splatted, if possible. If not, return 0. */
+ for (i = 2; i <= copies; i *= 2)
{
- case V4SImode:
- if (EASY_VECTOR_15 (cst)
- || EASY_VECTOR_15_ADD_SELF (cst))
- return cst;
- if ((cst & 0xffff) != ((cst >> 16) & 0xffff))
- break;
- cst = cst >> 16;
- /* Fall thru */
+ HOST_WIDE_INT small_val;
+ bitsize /= 2;
+ small_val = splat_val >> bitsize;
+ mask >>= bitsize;
+ if (splat_val != ((small_val << bitsize) | (small_val & mask)))
+ return false;
+ splat_val = small_val;
+ }
- case V8HImode:
- if (EASY_VECTOR_15 (cst)
- || EASY_VECTOR_15_ADD_SELF (cst))
- return cst;
- if ((cst & 0xff) != ((cst >> 8) & 0xff))
- break;
- cst = cst >> 8;
- /* Fall thru */
+ /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
+ if (EASY_VECTOR_15 (splat_val))
+ ;
- case V16QImode:
- if (EASY_VECTOR_15 (cst)
- || EASY_VECTOR_15_ADD_SELF (cst))
- return cst;
- default:
- break;
+ /* Also check if we can splat, and then add the result to itself. Do so if
+ the value is positive, of if the splat instruction is using OP's mode;
+ for splat_val < 0, the splat and the add should use the same mode. */
+ else if (EASY_VECTOR_15_ADD_SELF (splat_val)
+ && (splat_val >= 0 || (step == 1 && copies == 1)))
+ ;
+
+ else
+ return false;
+
+ /* Check if VAL is present in every STEP-th element, and the
+ other elements are filled with its most significant bit. */
+ for (i = 0; i < nunits - 1; ++i)
+ {
+ HOST_WIDE_INT desired_val;
+ if (((i + 1) & (step - 1)) == 0)
+ desired_val = val;
+ else
+ desired_val = msb_val;
+
+ if (desired_val != INTVAL (CONST_VECTOR_ELT (op, i)))
+ return false;
}
- return 0;
+
+ return true;
}
-/* Return nonzero if all elements of a vector have the same value. */
-int
-easy_vector_same (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+/* Return true if OP is of the given MODE and can be synthesized
+ with a vspltisb, vspltish or vspltisw. */
+
+bool
+easy_altivec_constant (rtx op, enum machine_mode mode)
{
- int units, i, cst;
+ unsigned step, copies;
- units = CONST_VECTOR_NUNITS (op);
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+ else if (mode != GET_MODE (op))
+ return false;
- cst = INTVAL (CONST_VECTOR_ELT (op, 0));
- for (i = 1; i < units; ++i)
- if (INTVAL (CONST_VECTOR_ELT (op, i)) != cst)
- break;
- if (i == units && easy_vector_splat_const (cst, mode))
- return 1;
- return 0;
+ /* Start with a vspltisw. */
+ step = GET_MODE_NUNITS (mode) / 4;
+ copies = 1;
+
+ if (vspltis_constant (op, step, copies))
+ return true;
+
+ /* Then try with a vspltish. */
+ if (step == 1)
+ copies <<= 1;
+ else
+ step >>= 1;
+
+ if (vspltis_constant (op, step, copies))
+ return true;
+
+ /* And finally a vspltisb. */
+ if (step == 1)
+ copies <<= 1;
+ else
+ step >>= 1;
+
+ if (vspltis_constant (op, step, copies))
+ return true;
+
+ return false;
}
-/* Generate easy_vector_constant out of a easy_vector_constant_add_self. */
+/* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
+ result is OP. Abort if it is not possible. */
rtx
-gen_easy_vector_constant_add_self (rtx op)
+gen_easy_altivec_constant (rtx op)
{
- int i, units;
- rtvec v;
- units = GET_MODE_NUNITS (GET_MODE (op));
- v = rtvec_alloc (units);
+ enum machine_mode mode = GET_MODE (op);
+ int nunits = GET_MODE_NUNITS (mode);
+ rtx last = CONST_VECTOR_ELT (op, nunits - 1);
+ unsigned step = nunits / 4;
+ unsigned copies = 1;
+
+ /* Start with a vspltisw. */
+ if (vspltis_constant (op, step, copies))
+ return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, last));
+
+ /* Then try with a vspltish. */
+ if (step == 1)
+ copies <<= 1;
+ else
+ step >>= 1;
+
+ if (vspltis_constant (op, step, copies))
+ return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, last));
+
+ /* And finally a vspltisb. */
+ if (step == 1)
+ copies <<= 1;
+ else
+ step >>= 1;
- for (i = 0; i < units; i++)
- RTVEC_ELT (v, i) =
- GEN_INT (INTVAL (CONST_VECTOR_ELT (op, i)) >> 1);
- return gen_rtx_raw_CONST_VECTOR (GET_MODE (op), v);
+ if (vspltis_constant (op, step, copies))
+ return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, last));
+
+ gcc_unreachable ();
}
const char *
dest = operands[0];
vec = operands[1];
-
- cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
- cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
mode = GET_MODE (dest);
if (TARGET_ALTIVEC)
{
+ rtx splat_vec;
if (zero_constant (vec, mode))
return "vxor %0,%0,%0";
- gcc_assert (easy_vector_constant (vec, mode));
+ splat_vec = gen_easy_altivec_constant (vec);
+ gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
+ operands[1] = XEXP (splat_vec, 0);
+ if (!EASY_VECTOR_15 (INTVAL (operands[1])))
+ return "#";
- operands[1] = GEN_INT (cst);
- switch (mode)
+ switch (GET_MODE (splat_vec))
{
case V4SImode:
- if (EASY_VECTOR_15 (cst))
- {
- operands[1] = GEN_INT (cst);
- return "vspltisw %0,%1";
- }
- else if (EASY_VECTOR_15_ADD_SELF (cst))
- return "#";
- cst = cst >> 16;
- /* Fall thru */
+ return "vspltisw %0,%1";
case V8HImode:
- if (EASY_VECTOR_15 (cst))
- {
- operands[1] = GEN_INT (cst);
- return "vspltish %0,%1";
- }
- else if (EASY_VECTOR_15_ADD_SELF (cst))
- return "#";
- cst = cst >> 8;
- /* Fall thru */
+ return "vspltish %0,%1";
case V16QImode:
- if (EASY_VECTOR_15 (cst))
- {
- operands[1] = GEN_INT (cst);
- return "vspltisb %0,%1";
- }
- else if (EASY_VECTOR_15_ADD_SELF (cst))
- return "#";
+ return "vspltisb %0,%1";
default:
gcc_unreachable ();
FIXME: We should probably return # and add post reload
splitters for these, but this way is so easy ;-). */
- operands[1] = GEN_INT (cst);
- operands[2] = GEN_INT (cst2);
+ cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
+ cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
+ operands[1] = CONST_VECTOR_ELT (vec, 0);
+ operands[2] = CONST_VECTOR_ELT (vec, 1);
if (cst == cst2)
return "li %0,%1\n\tevmergelo %0,%0,%0";
else
gen_rtx_XOR (mode, target, target)));
return;
}
- else if (mode != V4SFmode && easy_vector_same (vals, mode))
+ else if (mode != V4SFmode && easy_vector_constant (vals, mode))
{
/* Splat immediate. */
- x = gen_rtx_VEC_DUPLICATE (mode, CONST_VECTOR_ELT (vals, 0));
- emit_insn (gen_rtx_SET (VOIDmode, target, x));
+ emit_insn (gen_rtx_SET (VOIDmode, target, vals));
return;
}
else if (all_same)
field is an FP double while the FP fields remain word aligned. */
unsigned int
-rs6000_special_round_type_align (tree type, int computed, int specified)
+rs6000_special_round_type_align (tree type, unsigned int computed,
+ unsigned int specified)
{
+ unsigned int align = MAX (computed, specified);
tree field = TYPE_FIELDS (type);
/* Skip all non field decls */
while (field != NULL && TREE_CODE (field) != FIELD_DECL)
field = TREE_CHAIN (field);
- if (field == NULL || field == type || DECL_MODE (field) != DFmode)
- return MAX (computed, specified);
+ if (field != NULL && field != type)
+ {
+ type = TREE_TYPE (field);
+ while (TREE_CODE (type) == ARRAY_TYPE)
+ type = TREE_TYPE (type);
- return MAX (MAX (computed, specified), 64);
+ if (type != error_mark_node && TYPE_MODE (type) == DFmode)
+ align = MAX (align, 64);
+ }
+
+ return align;
}
/* Return 1 for an operand in small memory on V.4/eabi. */
op0 = XEXP (x, 0);
op1 = XEXP (x, 1);
- if (!REG_P (op0) || !REG_P (op1))
- return false;
+ /* Recognize the rtl generated by reload which we know will later be
+ replaced with proper base and index regs. */
+ if (!strict
+ && reload_in_progress
+ && (REG_P (op0) || GET_CODE (op0) == PLUS)
+ && REG_P (op1))
+ return true;
- return ((INT_REG_OK_FOR_BASE_P (op0, strict)
- && INT_REG_OK_FOR_INDEX_P (op1, strict))
- || (INT_REG_OK_FOR_BASE_P (op1, strict)
- && INT_REG_OK_FOR_INDEX_P (op0, strict)));
+ return (REG_P (op0) && REG_P (op1)
+ && ((INT_REG_OK_FOR_BASE_P (op0, strict)
+ && INT_REG_OK_FOR_INDEX_P (op1, strict))
+ || (INT_REG_OK_FOR_BASE_P (op1, strict)
+ && INT_REG_OK_FOR_INDEX_P (op0, strict))));
}
inline bool
{
rtx r3, got, tga, tmp1, tmp2, eqv;
+ /* We currently use relocations like @got@tlsgd for tls, which
+ means the linker will handle allocation of tls entries, placing
+ them in the .got section. So use a pointer to the .got section,
+ not one to secondary TOC sections used by 64-bit -mminimal-toc,
+ or to secondary GOT sections used by 32-bit -fPIC. */
if (TARGET_64BIT)
- got = gen_rtx_REG (Pmode, TOC_REGISTER);
+ got = gen_rtx_REG (Pmode, 2);
else
{
if (flag_pic == 1)
return x;
}
-#if TARGET_MACHO
if (GET_CODE (x) == SYMBOL_REF
- && DEFAULT_ABI == ABI_DARWIN
&& !ALTIVEC_VECTOR_MODE (mode)
+#if TARGET_MACHO
+ && DEFAULT_ABI == ABI_DARWIN
&& (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
+#else
+ && DEFAULT_ABI == ABI_V4
+ && !flag_pic
+#endif
/* Don't do this for TFmode, since the result isn't offsettable.
The same goes for DImode without 64-bit gprs. */
&& mode != TFmode
&& (mode != DImode || TARGET_POWERPC64))
{
+#if TARGET_MACHO
if (flag_pic)
{
rtx offset = gen_rtx_CONST (Pmode,
gen_rtx_HIGH (Pmode, offset)), offset);
}
else
+#endif
x = gen_rtx_LO_SUM (GET_MODE (x),
gen_rtx_HIGH (Pmode, x), x);
*win = 1;
return x;
}
-#endif
/* Reload an offset address wrapped by an AND that represents the
masking of the lower bits. Strip the outer AND and let reload
/* 128-bit constant floating-point values on Darwin should really be
loaded as two parts. */
- if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ if (!TARGET_IEEEQUAD
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128
&& mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
{
= CONSTANT_POOL_ADDRESS_P (operands[1]);
SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
- SYMBOL_REF_DECL (new_ref) = SYMBOL_REF_DECL (operands[1]);
+ SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
operands[1] = new_ref;
}
\f
/* Nonzero if we can use a floating-point register to pass this arg. */
#define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
- (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ (SCALAR_FLOAT_MODE_P (MODE) \
+ && !DECIMAL_FLOAT_MODE_P (MODE) \
&& (CUM)->fregno <= FP_ARG_MAX_REG \
&& TARGET_HARD_FLOAT && TARGET_FPRS)
return true;
}
- if (DEFAULT_ABI == ABI_V4 && TYPE_MODE (type) == TFmode)
+ if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
return true;
return false;
else if (DEFAULT_ABI == ABI_V4)
{
if (TARGET_HARD_FLOAT && TARGET_FPRS
- && (mode == SFmode || mode == DFmode))
+ && (mode == SFmode || mode == DFmode
+ || (mode == TFmode && !TARGET_IEEEQUAD)))
{
- if (cum->fregno <= FP_ARG_V4_MAX_REG)
- cum->fregno++;
+ if (cum->fregno + (mode == TFmode ? 1 : 0) <= FP_ARG_V4_MAX_REG)
+ cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
else
{
- if (mode == DFmode)
+ cum->fregno = FP_ARG_V4_MAX_REG + 1;
+ if (mode == DFmode || mode == TFmode)
cum->words += cum->words & 1;
cum->words += rs6000_arg_size (mode, type);
}
cum->words = align_words + n_words;
- if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ if (SCALAR_FLOAT_MODE_P (mode)
+ && !DECIMAL_FLOAT_MODE_P (mode)
&& TARGET_HARD_FLOAT && TARGET_FPRS)
cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
else if (abi == ABI_V4)
{
if (TARGET_HARD_FLOAT && TARGET_FPRS
- && (mode == SFmode || mode == DFmode))
+ && (mode == SFmode || mode == DFmode
+ || (mode == TFmode && !TARGET_IEEEQUAD)))
{
- if (cum->fregno <= FP_ARG_V4_MAX_REG)
+ if (cum->fregno + (mode == TFmode ? 1 : 0) <= FP_ARG_V4_MAX_REG)
return gen_rtx_REG (mode, cum->fregno);
else
return NULL_RTX;
enum machine_mode mode, tree type,
bool named ATTRIBUTE_UNUSED)
{
- if (DEFAULT_ABI == ABI_V4 && mode == TFmode)
+ if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
{
if (TARGET_DEBUG_ARG)
fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
f_sav = TREE_CHAIN (f_ovf);
valist = build_va_arg_indirect_ref (valist);
- gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
- fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
- ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
- sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
+ gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
+ fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
+ ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
+ sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
/* Count number of gp and fp argument registers used. */
words = current_function_args_info.words;
if (cfun->va_list_gpr_size)
{
- t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
- build_int_cst (NULL_TREE, n_gpr));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
+ build_int_cst (NULL_TREE, n_gpr));
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
if (cfun->va_list_fpr_size)
{
- t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
- build_int_cst (NULL_TREE, n_fpr));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
+ build_int_cst (NULL_TREE, n_fpr));
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
/* Find the overflow area. */
t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
if (words != 0)
- t = build (PLUS_EXPR, TREE_TYPE (ovf), t,
- build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
- t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
+ t = build2 (PLUS_EXPR, TREE_TYPE (ovf), t,
+ build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
/* Find the register save area. */
t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
if (cfun->machine->varargs_save_offset)
- t = build (PLUS_EXPR, TREE_TYPE (sav), t,
- build_int_cst (NULL_TREE, cfun->machine->varargs_save_offset));
- t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
+ t = build2 (PLUS_EXPR, TREE_TYPE (sav), t,
+ build_int_cst (NULL_TREE, cfun->machine->varargs_save_offset));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
post_p);
- return build (COMPLEX_EXPR, type, real_part, imag_part);
+ return build2 (COMPLEX_EXPR, type, real_part, imag_part);
}
}
f_sav = TREE_CHAIN (f_ovf);
valist = build_va_arg_indirect_ref (valist);
- gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
- fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
- ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
- sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
+ gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
+ fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
+ ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
+ sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
size = int_size_in_bytes (type);
rsize = (size + 3) / 4;
align = 1;
if (TARGET_HARD_FLOAT && TARGET_FPRS
- && (TYPE_MODE (type) == SFmode || TYPE_MODE (type) == DFmode))
+ && (TYPE_MODE (type) == SFmode
+ || TYPE_MODE (type) == DFmode
+ || TYPE_MODE (type) == TFmode))
{
/* FP args go in FP registers, if present. */
reg = fpr;
- n_reg = 1;
+ n_reg = (size + 7) / 8;
sav_ofs = 8*4;
sav_scale = 8;
- if (TYPE_MODE (type) == DFmode)
+ if (TYPE_MODE (type) != SFmode)
align = 8;
}
else
As are any other 2 gpr item such as complex int due to a
historical mistake. */
u = reg;
- if (n_reg == 2)
+ if (n_reg == 2 && reg == gpr)
{
u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), reg,
size_int (n_reg - 1));
{
/* Ensure that we don't find any more args in regs.
Alignment has taken care of the n_reg == 2 case. */
- t = build (MODIFY_EXPR, TREE_TYPE (reg), reg, size_int (8));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (reg), reg, size_int (8));
gimplify_and_add (t, pre_p);
}
}
{ MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum, "__builtin_altivec_vpkuhum", ALTIVEC_BUILTIN_VPKUHUM },
{ MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum, "__builtin_altivec_vpkuwum", ALTIVEC_BUILTIN_VPKUWUM },
{ MASK_ALTIVEC, CODE_FOR_altivec_vpkpx, "__builtin_altivec_vpkpx", ALTIVEC_BUILTIN_VPKPX },
- { MASK_ALTIVEC, CODE_FOR_altivec_vpkuhss, "__builtin_altivec_vpkuhss", ALTIVEC_BUILTIN_VPKUHSS },
{ MASK_ALTIVEC, CODE_FOR_altivec_vpkshss, "__builtin_altivec_vpkshss", ALTIVEC_BUILTIN_VPKSHSS },
- { MASK_ALTIVEC, CODE_FOR_altivec_vpkuwss, "__builtin_altivec_vpkuwss", ALTIVEC_BUILTIN_VPKUWSS },
{ MASK_ALTIVEC, CODE_FOR_altivec_vpkswss, "__builtin_altivec_vpkswss", ALTIVEC_BUILTIN_VPKSWSS },
{ MASK_ALTIVEC, CODE_FOR_altivec_vpkuhus, "__builtin_altivec_vpkuhus", ALTIVEC_BUILTIN_VPKUHUS },
{ MASK_ALTIVEC, CODE_FOR_altivec_vpkshus, "__builtin_altivec_vpkshus", ALTIVEC_BUILTIN_VPKSHUS },
{
rtx pat;
tree arg0 = TREE_VALUE (arglist);
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
{
rtx pat, scratch1, scratch2;
tree arg0 = TREE_VALUE (arglist);
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
rtx pat;
tree arg0 = TREE_VALUE (arglist);
tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
enum machine_mode mode1 = insn_data[icode].operand[2].mode;
tree cr6_form = TREE_VALUE (arglist);
tree arg0 = TREE_VALUE (TREE_CHAIN (arglist));
tree arg1 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
enum machine_mode tmode = SImode;
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
enum machine_mode mode1 = insn_data[icode].operand[2].mode;
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode0 = Pmode;
enum machine_mode mode1 = Pmode;
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
if (icode == CODE_FOR_nothing)
/* Builtin not supported on this processor. */
tree arg0 = TREE_VALUE (arglist);
tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
tree arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
- rtx op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
rtx pat;
enum machine_mode mode0 = insn_data[icode].operand[0].mode;
enum machine_mode mode1 = insn_data[icode].operand[1].mode;
tree arg0 = TREE_VALUE (arglist);
tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
tree arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
- rtx op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
rtx pat, addr;
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode1 = Pmode;
tree arg0 = TREE_VALUE (arglist);
tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
tree arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
- rtx op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
enum machine_mode mode1 = insn_data[icode].operand[2].mode;
*expandedp = true;
arg0 = TREE_VALUE (arglist);
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
tmode = insn_data[icode].operand[0].mode;
mode0 = insn_data[icode].operand[1].mode;
arg0 = TREE_VALUE (arglist);
arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
mode0 = insn_data[icode].operand[0].mode;
mode1 = insn_data[icode].operand[1].mode;
arg0 = TREE_VALUE (arglist);
arg1 = TREE_VALUE (TREE_CHAIN (arglist));
arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
- op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ op2 = expand_normal (arg2);
mode0 = insn_data[d->icode].operand[0].mode;
mode1 = insn_data[d->icode].operand[1].mode;
mode2 = insn_data[d->icode].operand[2].mode;
for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
{
- rtx x = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
+ rtx x = expand_normal (TREE_VALUE (arglist));
RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
}
arg0 = TREE_VALUE (arglist);
arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
elt = get_element_number (TREE_TYPE (arg0), arg1);
tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
case ALTIVEC_BUILTIN_MTVSCR:
icode = CODE_FOR_altivec_mtvscr;
arg0 = TREE_VALUE (arglist);
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
mode0 = insn_data[icode].operand[0].mode;
/* If we got invalid arguments bail out before generating bad rtl. */
icode = CODE_FOR_altivec_dss;
arg0 = TREE_VALUE (arglist);
STRIP_NOPS (arg0);
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
mode0 = insn_data[icode].operand[0].mode;
/* If we got invalid arguments bail out before generating bad rtl. */
case SPE_BUILTIN_MTSPEFSCR:
icode = CODE_FOR_spe_mtspefscr;
arg0 = TREE_VALUE (arglist);
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
mode0 = insn_data[icode].operand[0].mode;
if (arg0 == error_mark_node)
tree form = TREE_VALUE (arglist);
tree arg0 = TREE_VALUE (TREE_CHAIN (arglist));
tree arg1 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
enum machine_mode mode1 = insn_data[icode].operand[2].mode;
int form_int;
tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
tree arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
tree arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
- rtx op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
- rtx op3 = expand_expr (arg3, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
+ rtx op3 = expand_normal (arg3);
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
enum machine_mode mode1 = insn_data[icode].operand[2].mode;
altivec_init_builtins ();
if (TARGET_ALTIVEC || TARGET_SPE)
rs6000_common_init_builtins ();
+
+#if TARGET_XCOFF
+ /* AIX libm provides clog as __clog. */
+ if (built_in_decls [BUILT_IN_CLOG])
+ set_user_assembler_name (built_in_decls [BUILT_IN_CLOG], "__clog");
+#endif
}
/* Search through a set of builtins and enable the mask bits.
if (!TARGET_HARD_FLOAT)
return;
- if (DEFAULT_ABI != ABI_V4)
+ if (DEFAULT_ABI != ABI_V4 && TARGET_XCOFF
+ && !TARGET_POWER2 && !TARGET_POWERPC)
{
- if (TARGET_XCOFF && ! TARGET_POWER2 && ! TARGET_POWERPC)
- {
- /* AIX library routines for float->int conversion. */
- set_conv_libfunc (sfix_optab, SImode, DFmode, "__itrunc");
- set_conv_libfunc (ufix_optab, SImode, DFmode, "__uitrunc");
- set_conv_libfunc (sfix_optab, SImode, TFmode, "_qitrunc");
- set_conv_libfunc (ufix_optab, SImode, TFmode, "_quitrunc");
- }
+ /* AIX library routines for float->int conversion. */
+ set_conv_libfunc (sfix_optab, SImode, DFmode, "__itrunc");
+ set_conv_libfunc (ufix_optab, SImode, DFmode, "__uitrunc");
+ set_conv_libfunc (sfix_optab, SImode, TFmode, "_qitrunc");
+ set_conv_libfunc (ufix_optab, SImode, TFmode, "_quitrunc");
+ }
+ if (!TARGET_IEEEQUAD)
/* AIX/Darwin/64-bit Linux quad floating point routines. */
- if (!TARGET_XL_COMPAT)
- {
- set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
- set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
- set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
- set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
- }
- else
- {
- set_optab_libfunc (add_optab, TFmode, "_xlqadd");
- set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
- set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
- set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
- }
- }
+ if (!TARGET_XL_COMPAT)
+ {
+ set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
+ set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
+ set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
+ set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
+ }
+ else
+ {
+ set_optab_libfunc (add_optab, TFmode, "_xlqadd");
+ set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
+ set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
+ set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
+ }
else
{
/* 32-bit SVR4 quad floating point routines. */
set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
+ set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
}
}
NO_REGS is returned. */
enum reg_class
-secondary_reload_class (enum reg_class class,
- enum machine_mode mode ATTRIBUTE_UNUSED,
- rtx in)
+rs6000_secondary_reload_class (enum reg_class class,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ rtx in)
{
int regno;
else
{
gcc_assert (GET_CODE (tmp) == PLUS
- && GET_CODE (XEXP (tmp, 1)) == REG);
+ && REG_P (XEXP (tmp, 0))
+ && REG_P (XEXP (tmp, 1)));
if (REGNO (XEXP (tmp, 0)) == 0)
fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
}
else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == REG)
{
+ gcc_assert (REG_P (XEXP (x, 0)));
if (REGNO (XEXP (x, 0)) == 0)
fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
reg_names[ REGNO (XEXP (x, 0)) ]);
/* Special handling for SI values. */
if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
{
- extern int in_toc_section (void);
static int recurse = 0;
/* For -mrelocatable, we mark all addresses that need to be fixed up
in the .fixup section. */
if (TARGET_RELOCATABLE
- && !in_toc_section ()
- && !in_text_section ()
- && !in_unlikely_text_section ()
+ && in_section != toc_section
+ && in_section != text_section
+ && !unlikely_text_section_p (in_section)
&& !recurse
&& GET_CODE (x) != CONST_INT
&& GET_CODE (x) != CONST_DOUBLE
CLOBBERs to match cmptf_internal2 pattern. */
if (comp_mode == CCFPmode && TARGET_XL_COMPAT
&& GET_MODE (rs6000_compare_op0) == TFmode
- && (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ && !TARGET_IEEEQUAD
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
emit_insn (gen_rtx_PARALLEL (VOIDmode,
gen_rtvec (9,
return 0;
}
else if (TARGET_E500 && TARGET_HARD_FLOAT && !TARGET_FPRS
- && GET_MODE_CLASS (compare_mode) == MODE_FLOAT)
+ && SCALAR_FLOAT_MODE_P (compare_mode))
return 0;
is_against_zero = op1 == CONST0_RTX (compare_mode);
can't be generated if we care about that. It's safe if one side
of the construct is zero, since then no subtract will be
generated. */
- if (GET_MODE_CLASS (compare_mode) == MODE_FLOAT
+ if (SCALAR_FLOAT_MODE_P (compare_mode)
&& flag_trapping_math && ! is_against_zero)
return 0;
emit_insn (gen_isync ());
}
+void
+rs6000_expand_compare_and_swapqhi (rtx dst, rtx mem, rtx oldval, rtx newval)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx addrSI, align, wdst, shift, mask;
+ HOST_WIDE_INT shift_mask = mode == QImode ? 0x18 : 0x10;
+ HOST_WIDE_INT imask = GET_MODE_MASK (mode);
+
+ /* Shift amount for subword relative to aligned word. */
+ addrSI = force_reg (SImode, gen_lowpart_common (SImode, XEXP (mem, 0)));
+ shift = gen_reg_rtx (SImode);
+ emit_insn (gen_rlwinm (shift, addrSI, GEN_INT (3),
+ GEN_INT (shift_mask)));
+ emit_insn (gen_xorsi3 (shift, shift, GEN_INT (shift_mask)));
+
+ /* Shift and mask old value into position within word. */
+ oldval = convert_modes (SImode, mode, oldval, 1);
+ oldval = expand_binop (SImode, and_optab,
+ oldval, GEN_INT (imask), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ emit_insn (gen_ashlsi3 (oldval, oldval, shift));
+
+ /* Shift and mask new value into position within word. */
+ newval = convert_modes (SImode, mode, newval, 1);
+ newval = expand_binop (SImode, and_optab,
+ newval, GEN_INT (imask), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ emit_insn (gen_ashlsi3 (newval, newval, shift));
+
+ /* Mask for insertion. */
+ mask = gen_reg_rtx (SImode);
+ emit_move_insn (mask, GEN_INT (imask));
+ emit_insn (gen_ashlsi3 (mask, mask, shift));
+
+ /* Address of aligned word containing subword. */
+ align = expand_binop (Pmode, and_optab, XEXP (mem, 0), GEN_INT (-4),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ mem = change_address (mem, SImode, align);
+ set_mem_align (mem, 32);
+ MEM_VOLATILE_P (mem) = 1;
+
+ wdst = gen_reg_rtx (SImode);
+ emit_insn (gen_sync_compare_and_swapqhi_internal (wdst, mask,
+ oldval, newval, mem));
+
+ emit_move_insn (dst, gen_lowpart (mode, wdst));
+}
+
+void
+rs6000_split_compare_and_swapqhi (rtx dest, rtx mask,
+ rtx oldval, rtx newval, rtx mem,
+ rtx scratch)
+{
+ rtx label1, label2, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
+
+ emit_insn (gen_memory_barrier ());
+ label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ emit_label (XEXP (label1, 0));
+
+ emit_load_locked (SImode, scratch, mem);
+
+ /* Mask subword within loaded value for comparison with oldval.
+ Use UNSPEC_AND to avoid clobber.*/
+ emit_insn (gen_rtx_SET (SImode, dest,
+ gen_rtx_UNSPEC (SImode,
+ gen_rtvec (2, scratch, mask),
+ UNSPEC_AND)));
+
+ x = gen_rtx_COMPARE (CCmode, dest, oldval);
+ emit_insn (gen_rtx_SET (VOIDmode, cond, x));
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label2);
+
+ /* Clear subword within loaded value for insertion of new value. */
+ emit_insn (gen_rtx_SET (SImode, scratch,
+ gen_rtx_AND (SImode,
+ gen_rtx_NOT (SImode, mask), scratch)));
+ emit_insn (gen_iorsi3 (scratch, scratch, newval));
+ emit_store_conditional (SImode, cond, mem, scratch);
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label1);
+
+ emit_insn (gen_isync ());
+ emit_label (XEXP (label2, 0));
+}
+
+
/* Emit instructions to move SRC to DST. Called by splitters for
multi-register moves. It will emit at most one instruction for
each register that is accessed; that is, it won't emit li/lis pairs
static rs6000_stack_t *
rs6000_stack_info (void)
{
- static rs6000_stack_t info, zero_info;
+ static rs6000_stack_t info;
rs6000_stack_t *info_ptr = &info;
int reg_size = TARGET_32BIT ? 4 : 8;
int ehrd_size;
int save_align;
HOST_WIDE_INT non_fixed_size;
- /* Zero all fields portably. */
- info = zero_info;
+ memset (&info, 0, sizeof (info));
if (TARGET_SPE)
{
|| cfun->machine->ra_needs_full_frame);
/* Determine if we need to save the link register. */
- if (rs6000_ra_ever_killed ()
- || (DEFAULT_ABI == ABI_AIX
- && current_function_profile
- && !TARGET_PROFILE_KERNEL)
+ if ((DEFAULT_ABI == ABI_AIX
+ && current_function_profile
+ && !TARGET_PROFILE_KERNEL)
#ifdef TARGET_RELOCATABLE
|| (TARGET_RELOCATABLE && (get_pool_size () != 0))
#endif
&& !FP_SAVE_INLINE (info_ptr->first_fp_reg_save))
|| info_ptr->first_altivec_reg_save <= LAST_ALTIVEC_REGNO
|| (DEFAULT_ABI == ABI_V4 && current_function_calls_alloca)
- || info_ptr->calls_p)
+ || info_ptr->calls_p
+ || rs6000_ra_ever_killed ())
{
info_ptr->lr_save_p = 1;
regs_ever_live[LINK_REGISTER_REGNUM] = 1;
- info_ptr->spe_gp_size;
/* Adjust for SPE case. */
- info_ptr->toc_save_offset
- = info_ptr->spe_gp_save_offset - info_ptr->toc_size;
+ info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
}
else if (TARGET_ALTIVEC_ABI)
{
- info_ptr->altivec_size;
/* Adjust for AltiVec case. */
- info_ptr->toc_save_offset
- = info_ptr->altivec_save_offset - info_ptr->toc_size;
+ info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
}
else
- info_ptr->toc_save_offset = info_ptr->cr_save_offset - info_ptr->toc_size;
- info_ptr->ehrd_offset = info_ptr->toc_save_offset - ehrd_size;
+ info_ptr->ehrd_offset = info_ptr->cr_save_offset;
+ info_ptr->ehrd_offset -= ehrd_size;
info_ptr->lr_save_offset = reg_size;
break;
}
+ info_ptr->spe_padding_size
+ ehrd_size
+ info_ptr->cr_size
- + info_ptr->lr_size
- + info_ptr->vrsave_size
- + info_ptr->toc_size,
+ + info_ptr->vrsave_size,
save_align);
non_fixed_size = (info_ptr->vars_size
if (! info_ptr->cr_save_p)
info_ptr->cr_save_offset = 0;
- if (! info_ptr->toc_save_p)
- info_ptr->toc_save_offset = 0;
-
return info_ptr;
}
if (info->cr_save_p)
fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
- if (info->toc_save_p)
- fprintf (stderr, "\ttoc_save_p = %5d\n", info->toc_save_p);
-
if (info->vrsave_mask)
fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
if (info->cr_save_offset)
fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
- if (info->toc_save_offset)
- fprintf (stderr, "\ttoc_save_offset = %5d\n", info->toc_save_offset);
-
if (info->varargs_save_offset)
fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
fprintf (stderr, "\tspe_padding_size = %5d\n",
info->spe_padding_size);
- if (info->lr_size)
- fprintf (stderr, "\tlr_size = %5d\n", info->lr_size);
-
if (info->cr_size)
fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
- if (info->toc_size)
- fprintf (stderr, "\ttoc_size = %5d\n", info->toc_size);
-
if (info->save_size)
fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
{
if (INSN_P (insn))
{
- if (FIND_REG_INC_NOTE (insn, reg))
- return 1;
- else if (GET_CODE (insn) == CALL_INSN
- && !SIBLING_CALL_P (insn))
+ if (CALL_P (insn))
+ {
+ if (!SIBLING_CALL_P (insn))
+ return 1;
+ }
+ else if (find_regno_note (insn, REG_INC, LINK_REGISTER_REGNUM))
return 1;
else if (set_of (reg, insn) != NULL_RTX
&& !prologue_epilogue_contains (insn))
|| current_function_calls_alloca
|| info->total_size > 32767)
{
- emit_move_insn (operands[1], gen_rtx_MEM (Pmode, frame_rtx));
+ tmp = gen_frame_mem (Pmode, frame_rtx);
+ emit_move_insn (operands[1], tmp);
frame_rtx = operands[1];
}
else if (info->push_p)
sp_offset = info->total_size;
tmp = plus_constant (frame_rtx, info->lr_save_offset + sp_offset);
- tmp = gen_rtx_MEM (Pmode, tmp);
+ tmp = gen_frame_mem (Pmode, tmp);
emit_move_insn (tmp, operands[0]);
}
else
rtx
create_TOC_reference (rtx symbol)
{
+ if (no_new_pseudos)
+ regs_ever_live[TOC_REGISTER] = 1;
return gen_rtx_PLUS (Pmode,
gen_rtx_REG (Pmode, TOC_REGISTER),
gen_rtx_CONST (Pmode,
rtx tocompare = gen_reg_rtx (SImode);
rtx no_toc_save_needed = gen_label_rtx ();
- mem = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
+ mem = gen_frame_mem (Pmode, hard_frame_pointer_rtx);
emit_move_insn (stack_top, mem);
- mem = gen_rtx_MEM (Pmode,
- gen_rtx_PLUS (Pmode, stack_top,
- GEN_INT (2 * GET_MODE_SIZE (Pmode))));
+ mem = gen_frame_mem (Pmode,
+ gen_rtx_PLUS (Pmode, stack_top,
+ GEN_INT (2 * GET_MODE_SIZE (Pmode))));
emit_move_insn (opcode_addr, mem);
emit_move_insn (opcode, gen_rtx_MEM (SImode, opcode_addr));
emit_move_insn (tocompare, gen_int_mode (TARGET_32BIT ? 0x80410014
SImode, NULL_RTX, NULL_RTX,
no_toc_save_needed);
- mem = gen_rtx_MEM (Pmode,
- gen_rtx_PLUS (Pmode, stack_top,
- GEN_INT (5 * GET_MODE_SIZE (Pmode))));
+ mem = gen_frame_mem (Pmode,
+ gen_rtx_PLUS (Pmode, stack_top,
+ GEN_INT (5 * GET_MODE_SIZE (Pmode))));
emit_move_insn (mem, gen_rtx_REG (Pmode, 2));
emit_label (no_toc_save_needed);
}
\f
-/* This ties together stack memory (MEM with an alias set of
- rs6000_sr_alias_set) and the change to the stack pointer. */
+/* This ties together stack memory (MEM with an alias set of frame_alias_set)
+ and the change to the stack pointer. */
static void
rs6000_emit_stack_tie (void)
{
- rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
+ rtx mem = gen_frame_mem (BLKmode,
+ gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
- set_mem_alias_set (mem, rs6000_sr_alias_set);
emit_insn (gen_stack_tie (mem));
}
reg = gen_rtx_REG (mode, regno);
addr = gen_rtx_PLUS (Pmode, frame_reg, offset_rtx);
- mem = gen_rtx_MEM (mode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (mode, addr);
insn = emit_move_insn (mem, reg);
else
offset_rtx = int_rtx;
- return gen_rtx_MEM (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
+ return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
}
/* Look for user-defined global regs. We should not save and restore these,
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->fp_save_offset
+ sp_offset + 8 * i));
- rtx mem = gen_rtx_MEM (DFmode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ rtx mem = gen_frame_mem (DFmode, addr);
RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
}
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->altivec_save_offset
+ sp_offset + 16 * i));
- rtx mem = gen_rtx_MEM (V4SImode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ rtx mem = gen_frame_mem (V4SImode, addr);
RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
}
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->gp_save_offset
+ sp_offset + reg_size * i));
- rtx mem = gen_rtx_MEM (reg_mode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ rtx mem = gen_frame_mem (reg_mode, addr);
RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
}
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->cr_save_offset
+ sp_offset));
- rtx mem = gen_rtx_MEM (reg_mode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ rtx mem = gen_frame_mem (reg_mode, addr);
RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
}
emit_move_insn (areg, GEN_INT (offset));
/* AltiVec addressing mode is [reg+reg]. */
- mem = gen_rtx_MEM (V4SImode,
- gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
-
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (V4SImode,
+ gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
insn = emit_move_insn (mem, savereg);
{
/* Save VRSAVE. */
offset = info->vrsave_save_offset + sp_offset;
- mem
- = gen_rtx_MEM (SImode,
- gen_rtx_PLUS (Pmode, frame_reg_rtx, GEN_INT (offset)));
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (SImode,
+ gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (offset)));
insn = emit_move_insn (mem, reg);
}
addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->fp_save_offset
+ sp_offset + 8*i));
- mem = gen_rtx_MEM (DFmode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (DFmode, addr);
RTVEC_ELT (p, i + 2) = gen_rtx_SET (VOIDmode, mem, reg);
}
GEN_INT (info->gp_save_offset
+ sp_offset
+ reg_size * i));
- mem = gen_rtx_MEM (reg_mode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (reg_mode, addr);
RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, reg);
}
b = GEN_INT (offset);
addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, b);
- mem = gen_rtx_MEM (V2SImode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (V2SImode, addr);
insn = emit_move_insn (mem, reg);
if (GET_CODE (b) == CONST_INT)
GEN_INT (info->gp_save_offset
+ sp_offset
+ reg_size * i));
- mem = gen_rtx_MEM (reg_mode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (reg_mode, addr);
insn = emit_move_insn (mem, reg);
rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
reg = gen_rtx_REG (reg_mode, 2);
addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (sp_offset + 5 * reg_size));
- mem = gen_rtx_MEM (reg_mode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (reg_mode, addr);
insn = emit_move_insn (mem, reg);
rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
GEN_INT (info->lr_save_offset + sp_offset));
rtx reg = gen_rtx_REG (Pmode, 0);
rtx mem = gen_rtx_MEM (Pmode, addr);
- /* This should not be of rs6000_sr_alias_set, because of
+ /* This should not be of frame_alias_set, because of
__builtin_return_address. */
insn = emit_move_insn (mem, reg);
{
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->cr_save_offset + sp_offset));
- rtx mem = gen_rtx_MEM (SImode, addr);
+ rtx mem = gen_frame_mem (SImode, addr);
/* See the large comment above about why CR2_REGNO is used. */
rtx magic_eh_cr_reg = gen_rtx_REG (SImode, CR2_REGNO);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
-
/* If r12 was used to hold the original sp, copy cr into r0 now
that it's free. */
if (REGNO (frame_reg_rtx) == 12)
rtx reg = gen_rtx_REG (reg_mode, CR2_REGNO);
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->cr_save_offset));
- rtx mem = gen_rtx_MEM (reg_mode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ rtx mem = gen_frame_mem (reg_mode, addr);
RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
}
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->gp_save_offset
+ reg_size * i));
- rtx mem = gen_rtx_MEM (reg_mode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ rtx mem = gen_frame_mem (reg_mode, addr);
RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
}
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->altivec_save_offset
+ 16 * i));
- rtx mem = gen_rtx_MEM (V4SImode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ rtx mem = gen_frame_mem (V4SImode, addr);
RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
}
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->fp_save_offset
+ 8 * i));
- rtx mem = gen_rtx_MEM (DFmode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ rtx mem = gen_frame_mem (DFmode, addr);
RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
}
emit_move_insn (frame_reg_rtx,
gen_rtx_MEM (Pmode, sp_reg_rtx));
-
}
else if (info->push_p)
{
/* AltiVec addressing mode is [reg+reg]. */
addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
- mem = gen_rtx_MEM (V4SImode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (V4SImode, addr);
emit_move_insn (gen_rtx_REG (V4SImode, i), mem);
}
addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->vrsave_save_offset + sp_offset));
- mem = gen_rtx_MEM (SImode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (SImode, addr);
reg = gen_rtx_REG (SImode, 12);
emit_move_insn (reg, mem);
rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx,
info->lr_save_offset + sp_offset);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
-
emit_move_insn (gen_rtx_REG (Pmode, 0), mem);
}
{
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->cr_save_offset + sp_offset));
- rtx mem = gen_rtx_MEM (SImode, addr);
-
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ rtx mem = gen_frame_mem (SImode, addr);
emit_move_insn (gen_rtx_REG (SImode, 12), mem);
}
{
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (sp_offset + 5 * reg_size));
- rtx mem = gen_rtx_MEM (reg_mode, addr);
-
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ rtx mem = gen_frame_mem (reg_mode, addr);
emit_move_insn (gen_rtx_REG (reg_mode, 2), mem);
}
mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
info->ehrd_offset + sp_offset
+ reg_size * (int) i);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
}
GEN_INT (info->gp_save_offset
+ sp_offset
+ reg_size * i));
- rtx mem = gen_rtx_MEM (reg_mode, addr);
-
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ rtx mem = gen_frame_mem (reg_mode, addr);
RTVEC_ELT (p, i) =
gen_rtx_SET (VOIDmode,
GEN_INT (info->gp_save_offset
+ sp_offset
+ reg_size * i));
- rtx mem = gen_rtx_MEM (reg_mode, addr);
+ rtx mem = gen_frame_mem (reg_mode, addr);
/* Restore 64-bit quantities for SPE. */
if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
b = GEN_INT (offset);
addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, b);
- mem = gen_rtx_MEM (V2SImode, addr);
+ mem = gen_frame_mem (V2SImode, addr);
}
- set_mem_alias_set (mem, rs6000_sr_alias_set);
-
emit_move_insn (gen_rtx_REG (reg_mode,
info->first_gp_reg_save + i), mem);
}
GEN_INT (info->fp_save_offset
+ sp_offset
+ 8 * i));
- mem = gen_rtx_MEM (DFmode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (DFmode, addr);
emit_move_insn (gen_rtx_REG (DFmode,
info->first_fp_reg_save + i),
}
/* If this is V.4, unwind the stack pointer after all of the loads
- have been done. We need to emit a block here so that sched
- doesn't decide to move the sp change before the register restores
- (which may not have any obvious dependency on the stack). This
- doesn't hurt performance, because there is no scheduling that can
- be done after this point. */
- if (DEFAULT_ABI == ABI_V4
- || current_function_calls_eh_return)
+ have been done. */
+ if (frame_reg_rtx != sp_reg_rtx)
{
- if (frame_reg_rtx != sp_reg_rtx)
- rs6000_emit_stack_tie ();
-
- if (use_backchain_to_restore_sp)
- {
- emit_move_insn (sp_reg_rtx, frame_reg_rtx);
- }
- else if (sp_offset != 0)
- {
- emit_insn (TARGET_32BIT
- ? gen_addsi3 (sp_reg_rtx, sp_reg_rtx,
- GEN_INT (sp_offset))
- : gen_adddi3 (sp_reg_rtx, sp_reg_rtx,
- GEN_INT (sp_offset)));
- }
+ /* This blockage is needed so that sched doesn't decide to move
+ the sp change before the register restores. */
+ rs6000_emit_stack_tie ();
+ emit_move_insn (sp_reg_rtx, frame_reg_rtx);
}
+ else if (sp_offset != 0)
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (sp_reg_rtx, sp_reg_rtx,
+ GEN_INT (sp_offset))
+ : gen_adddi3 (sp_reg_rtx, sp_reg_rtx,
+ GEN_INT (sp_offset)));
if (current_function_calls_eh_return)
{
rtx addr, mem;
addr = gen_rtx_PLUS (Pmode, sp_reg_rtx,
GEN_INT (info->fp_save_offset + 8*i));
- mem = gen_rtx_MEM (DFmode, addr);
- set_mem_alias_set (mem, rs6000_sr_alias_set);
+ mem = gen_frame_mem (DFmode, addr);
RTVEC_ELT (p, i+3) =
gen_rtx_SET (VOIDmode,
rs6000_output_function_epilogue (FILE *file,
HOST_WIDE_INT size ATTRIBUTE_UNUSED)
{
- rs6000_stack_t *info = rs6000_stack_info ();
-
if (! HAVE_epilogue)
{
rtx insn = get_last_insn ();
System V.4 Powerpc's (and the embedded ABI derived from it) use a
different traceback table. */
if (DEFAULT_ABI == ABI_AIX && ! flag_inhibit_size_directive
- && rs6000_traceback != traceback_none)
+ && rs6000_traceback != traceback_none && !current_function_is_thunk)
{
const char *fname = NULL;
const char *language_string = lang_hooks.name;
int fixed_parms = 0, float_parms = 0, parm_info = 0;
int i;
int optional_tbtab;
+ rs6000_stack_t *info = rs6000_stack_info ();
if (rs6000_traceback == traceback_full)
optional_tbtab = 1;
if (GET_CODE (parameter) == REG)
{
- if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ if (SCALAR_FLOAT_MODE_P (mode))
{
int bits;
const char *name = buf;
const char *real_name;
rtx base = x;
- int offset = 0;
+ HOST_WIDE_INT offset = 0;
gcc_assert (!TARGET_NO_TOC);
/* Handle FP constants specially. Note that if we have a minimal
TOC, things we put here aren't actually in the TOC, so we can allow
FP constants. */
- if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == TFmode)
+ if (GET_CODE (x) == CONST_DOUBLE &&
+ (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
{
REAL_VALUE_TYPE rv;
long k[4];
REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
- REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
+ if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
+ REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
+ else
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
if (TARGET_64BIT)
{
return;
}
}
- else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
+ else if (GET_CODE (x) == CONST_DOUBLE &&
+ (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
{
REAL_VALUE_TYPE rv;
long k[2];
REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
- REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
+
+ if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
+ REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
+ else
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
if (TARGET_64BIT)
{
return;
}
}
- else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
+ else if (GET_CODE (x) == CONST_DOUBLE &&
+ (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
{
REAL_VALUE_TYPE rv;
long l;
REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
- REAL_VALUE_TO_TARGET_SINGLE (rv, l);
+ if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
+ REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
+ else
+ REAL_VALUE_TO_TARGET_SINGLE (rv, l);
if (TARGET_64BIT)
{
fprintf (file, "\t.tc %s", real_name);
if (offset < 0)
- fprintf (file, ".N%d", - offset);
+ fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
else if (offset)
- fprintf (file, ".P%d", offset);
+ fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
fputs ("[TC],", file);
}
{
RS6000_OUTPUT_BASENAME (file, name);
if (offset < 0)
- fprintf (file, "%d", offset);
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
else if (offset > 0)
- fprintf (file, "+%d", offset);
+ fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
}
else
output_addr_const (file, x);
static rtx
get_next_active_insn (rtx insn, rtx tail)
{
- rtx next_insn;
-
- if (!insn || insn == tail)
+ if (insn == NULL_RTX || insn == tail)
return NULL_RTX;
- next_insn = NEXT_INSN (insn);
-
- while (next_insn
- && next_insn != tail
- && (GET_CODE (next_insn) == NOTE
- || GET_CODE (PATTERN (next_insn)) == USE
- || GET_CODE (PATTERN (next_insn)) == CLOBBER))
+ while (1)
{
- next_insn = NEXT_INSN (next_insn);
- }
-
- if (!next_insn || next_insn == tail)
- return NULL_RTX;
+ insn = NEXT_INSN (insn);
+ if (insn == NULL_RTX || insn == tail)
+ return NULL_RTX;
- return next_insn;
+ if (CALL_P (insn)
+ || JUMP_P (insn)
+ || (NONJUMP_INSN_P (insn)
+ && GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER
+ && INSN_CODE (insn) != CODE_FOR_stack_tie))
+ break;
+ }
+ return insn;
}
/* Return whether the presence of INSN causes a dispatch group termination
error ("use of boolean types in AltiVec types is invalid");
else if (TREE_CODE (type) == COMPLEX_TYPE)
error ("use of %<complex%> in AltiVec types is invalid");
+ else if (DECIMAL_FLOAT_MODE_P (mode))
+ error ("use of decimal floating point types in AltiVec types is invalid");
switch (altivec_type)
{
if (type == pixel_type_node) return "u7__pixel";
if (type == bool_int_type_node) return "U6__booli";
+ /* Mangle IBM extended float long double as `g' (__float128) on
+ powerpc*-linux where long-double-64 previously was the default. */
+ if (TYPE_MAIN_VARIANT (type) == long_double_type_node
+ && TARGET_ELF
+ && TARGET_LONG_DOUBLE_128
+ && !TARGET_IEEEQUAD)
+ return "g";
+
/* For all other types, use normal C++ mangling. */
return NULL;
}
\f
#ifdef USING_ELFOS_H
-/* A C statement or statements to switch to the appropriate section
- for output of RTX in mode MODE. You can assume that RTX is some
- kind of constant in RTL. The argument MODE is redundant except in
- the case of a `const_int' rtx. Select the section by calling
- `text_section' or one of the alternatives for other sections.
+/* A get_unnamed_section callback, used for switching to toc_section. */
- Do not define this macro if you put all constants in the read-only
- data section. */
+static void
+rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
+{
+ if (DEFAULT_ABI == ABI_AIX
+ && TARGET_MINIMAL_TOC
+ && !TARGET_RELOCATABLE)
+ {
+ if (!toc_initialized)
+ {
+ toc_initialized = 1;
+ fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
+ (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
+ fprintf (asm_out_file, "\t.tc ");
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
+ fprintf (asm_out_file, "\n");
+
+ fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
+ fprintf (asm_out_file, " = .+32768\n");
+ }
+ else
+ fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
+ }
+ else if (DEFAULT_ABI == ABI_AIX && !TARGET_RELOCATABLE)
+ fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
+ else
+ {
+ fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
+ if (!toc_initialized)
+ {
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
+ fprintf (asm_out_file, " = .+32768\n");
+ toc_initialized = 1;
+ }
+ }
+}
+
+/* Implement TARGET_ASM_INIT_SECTIONS. */
static void
+rs6000_elf_asm_init_sections (void)
+{
+ toc_section
+ = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
+
+ sdata2_section
+ = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
+ SDATA2_SECTION_ASM_OP);
+}
+
+/* Implement TARGET_SELECT_RTX_SECTION. */
+
+static section *
rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
unsigned HOST_WIDE_INT align)
{
if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
- toc_section ();
+ return toc_section;
else
- default_elf_select_rtx_section (mode, x, align);
+ return default_elf_select_rtx_section (mode, x, align);
}
-/* A C statement or statements to switch to the appropriate
- section for output of DECL. DECL is either a `VAR_DECL' node
- or a constant of some sort. RELOC indicates whether forming
- the initial value of DECL requires link-time relocations. */
+/* Implement TARGET_ASM_SELECT_SECTION for ELF targets. */
-static void
+static section *
rs6000_elf_select_section (tree decl, int reloc,
unsigned HOST_WIDE_INT align)
{
ABI_AIX, because otherwise we end up with dynamic relocations
in read-only sections. This happens for function pointers,
references to vtables in typeinfo, and probably other cases. */
- default_elf_select_section_1 (decl, reloc, align,
- flag_pic || DEFAULT_ABI == ABI_AIX);
+ return default_elf_select_section_1 (decl, reloc, align,
+ flag_pic || DEFAULT_ABI == ABI_AIX);
}
/* A C statement to build up a unique section name, expressed as a
}
}
-static bool
+bool
rs6000_elf_in_small_data_p (tree decl)
{
if (rs6000_sdata == SDATA_NONE)
}
#endif /* USING_ELFOS_H */
+\f
+/* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
+static bool
+rs6000_use_blocks_for_constant_p (enum machine_mode mode, rtx x)
+{
+ return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
+}
\f
/* Return a REG that occurs in ADDR with coefficient 1.
ADDR can be effectively incremented by incrementing REG.
GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
if (flag_pic == 2)
- machopic_picsymbol_stub1_section ();
+ switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
else
- machopic_symbol_stub1_section ();
+ switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
if (flag_pic == 2)
{
fprintf (file, "\tbctr\n");
}
- machopic_lazy_symbol_ptr_section ();
+ switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
fprintf (file, "%s:\n", lazy_ptr_name);
fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
fprintf (file, "%sdyld_stub_binding_helper\n",
return machopic_legitimize_pic_address (orig, mode, reg);
}
-/* This is just a placeholder to make linking work without having to
- add this to the generic Darwin EXTRA_SECTIONS. If -mcall-aix is
- ever needed for Darwin (not too likely!) this would have to get a
- real definition. */
-
-void
-toc_section (void)
-{
-}
-
/* Output a .machine directive for the Darwin assembler, and call
the generic start_file routine. */
size_t i;
rs6000_file_start ();
+ darwin_file_start ();
/* Determine the argument to -mcpu=. Default to G3 if not specified. */
for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
section = buf;
}
- named_section_flags (section, SECTION_WRITE);
+ switch_to_section (get_section (section, SECTION_WRITE, NULL));
assemble_align (POINTER_SIZE);
if (TARGET_RELOCATABLE)
section = buf;
}
- named_section_flags (section, SECTION_WRITE);
+ switch_to_section (get_section (section, SECTION_WRITE, NULL));
assemble_align (POINTER_SIZE);
if (TARGET_RELOCATABLE)
#if TARGET_XCOFF
static void
+rs6000_xcoff_asm_output_anchor (rtx symbol)
+{
+ char buffer[100];
+
+ sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
+ SYMBOL_REF_BLOCK_OFFSET (symbol));
+ ASM_OUTPUT_DEF (asm_out_file, XSTR (symbol, 0), buffer);
+}
+
+static void
rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
{
fputs (GLOBAL_ASM_OP, stream);
putc ('\n', stream);
}
+/* A get_unnamed_decl callback, used for read-only sections. PTR
+ points to the section string variable. */
+
+static void
+rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
+{
+ fprintf (asm_out_file, "\t.csect %s[RO],3\n",
+ *(const char *const *) directive);
+}
+
+/* Likewise for read-write sections. */
+
+static void
+rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
+{
+ fprintf (asm_out_file, "\t.csect %s[RW],3\n",
+ *(const char *const *) directive);
+}
+
+/* A get_unnamed_section callback, used for switching to toc_section. */
+
+static void
+rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
+{
+ if (TARGET_MINIMAL_TOC)
+ {
+ /* toc_section is always selected at least once from
+ rs6000_xcoff_file_start, so this is guaranteed to
+ always be defined once and only once in each file. */
+ if (!toc_initialized)
+ {
+ fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
+ fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
+ toc_initialized = 1;
+ }
+ fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
+ (TARGET_32BIT ? "" : ",3"));
+ }
+ else
+ fputs ("\t.toc\n", asm_out_file);
+}
+
+/* Implement TARGET_ASM_INIT_SECTIONS. */
+
+static void
+rs6000_xcoff_asm_init_sections (void)
+{
+ read_only_data_section
+ = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
+ &xcoff_read_only_section_name);
+
+ private_data_section
+ = get_unnamed_section (SECTION_WRITE,
+ rs6000_xcoff_output_readwrite_section_asm_op,
+ &xcoff_private_data_section_name);
+
+ read_only_private_data_section
+ = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
+ &xcoff_private_data_section_name);
+
+ toc_section
+ = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
+
+ readonly_data_section = read_only_data_section;
+ exception_section = data_section;
+}
+
static void
rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
tree decl ATTRIBUTE_UNUSED)
name, suffix[smclass], flags & SECTION_ENTSIZE);
}
-static void
+static section *
rs6000_xcoff_select_section (tree decl, int reloc,
unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
{
if (decl_readonly_section_1 (decl, reloc, 1))
{
if (TREE_PUBLIC (decl))
- read_only_data_section ();
+ return read_only_data_section;
else
- read_only_private_data_section ();
+ return read_only_private_data_section;
}
else
{
if (TREE_PUBLIC (decl))
- data_section ();
+ return data_section;
else
- private_data_section ();
+ return private_data_section;
}
}
However, if this is being placed in the TOC it must be output as a
toc entry. */
-static void
+static section *
rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
{
if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
- toc_section ();
+ return toc_section;
else
- read_only_private_data_section ();
+ return read_only_private_data_section;
}
/* Remove any trailing [DS] or the like from the symbol name. */
output_quoted_string (asm_out_file, main_input_filename);
fputc ('\n', asm_out_file);
if (write_symbols != NO_DEBUG)
- private_data_section ();
- text_section ();
+ switch_to_section (private_data_section);
+ switch_to_section (text_section);
if (profile_flag)
fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
rs6000_file_start ();
static void
rs6000_xcoff_file_end (void)
{
- text_section ();
+ switch_to_section (text_section);
fputs ("_section_.text:\n", asm_out_file);
- data_section ();
+ switch_to_section (data_section);
fputs (TARGET_32BIT
? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
asm_out_file);
else
*total = rs6000_cost->fp;
}
- else if (GET_CODE (XEXP (x, 0)) == MULT)
- {
- /* The rs6000 doesn't have shift-and-add instructions. */
- rs6000_rtx_costs (XEXP (x, 0), MULT, PLUS, total);
- *total += COSTS_N_INSNS (1);
- }
else
*total = COSTS_N_INSNS (1);
return false;
else
*total = rs6000_cost->fp;
}
- else if (GET_CODE (XEXP (x, 0)) == MULT)
- {
- /* The rs6000 doesn't have shift-and-sub instructions. */
- rs6000_rtx_costs (XEXP (x, 0), MULT, MINUS, total);
- *total += COSTS_N_INSNS (1);
- }
else
*total = COSTS_N_INSNS (1);
return false;
GP_ARG_RETURN + 1),
GEN_INT (4))));
}
+ if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
+ {
+ return gen_rtx_PARALLEL (DCmode,
+ gen_rtvec (4,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode, GP_ARG_RETURN),
+ const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 1),
+ GEN_INT (4)),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 2),
+ GEN_INT (8)),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 3),
+ GEN_INT (12))));
+ }
if ((INTEGRAL_TYPE_P (valtype)
&& TYPE_PRECISION (valtype) < BITS_PER_WORD)
else
mode = TYPE_MODE (valtype);
- if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS)
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ regno = GP_ARG_RETURN;
+ else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS)
regno = FP_ARG_RETURN;
else if (TREE_CODE (valtype) == COMPLEX_TYPE
&& targetm.calls.split_complex_arg)
GEN_INT (4))));
}
- if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ regno = GP_ARG_RETURN;
+ else if (SCALAR_FLOAT_MODE_P (mode)
&& TARGET_HARD_FLOAT && TARGET_FPRS)
regno = FP_ARG_RETURN;
else if (ALTIVEC_VECTOR_MODE (mode)
return TARGET_32BIT ? SImode : word_mode;
}
+/* Target hook for scalar_mode_supported_p. */
+static bool
+rs6000_scalar_mode_supported_p (enum machine_mode mode)
+{
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ return true;
+ else
+ return default_scalar_mode_supported_p (mode);
+}
+
/* Target hook for vector_mode_supported_p. */
static bool
rs6000_vector_mode_supported_p (enum machine_mode mode)