/* Subroutines used for code generation on IBM RS/6000.
Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
#include "tm_p.h"
#include "target.h"
#include "target-def.h"
+#include "common/common-target.h"
#include "langhooks.h"
#include "reload.h"
#include "cfglayout.h"
#include "intl.h"
#include "params.h"
#include "tm-constrs.h"
+#include "opts.h"
#if TARGET_XCOFF
#include "xcoffout.h" /* get declarations of xcoff_*_section_name */
#endif
rtx sdmode_stack_slot;
} machine_function;
-/* Target cpu type */
-
-struct rs6000_cpu_select rs6000_select[3] =
-{
- /* switch name, tune arch */
- { (const char *)0, "--with-cpu=", 1, 1 },
- { (const char *)0, "-mcpu=", 1, 1 },
- { (const char *)0, "-mtune=", 1, 0 },
-};
-
-/* String variables to hold the various options. */
-static const char *rs6000_sched_insert_nops_str;
-static const char *rs6000_sched_costly_dep_str;
-static const char *rs6000_recip_name;
-
-#ifdef USING_ELFOS_H
-static const char *rs6000_abi_name;
-static const char *rs6000_sdata_name;
-#endif
-
/* Support targetm.vectorize.builtin_mask_for_load. */
static GTY(()) tree altivec_builtin_mask_for_load;
/* Width in bits of a pointer. */
unsigned rs6000_pointer_size;
+#ifdef HAVE_AS_GNU_ATTRIBUTE
+/* Flag whether floating point values have been passed/returned. */
+static bool rs6000_passes_float;
+/* Flag whether vector values have been passed/returned. */
+static bool rs6000_passes_vector;
+/* Flag whether small (<= 8 byte) structures have been returned. */
+static bool rs6000_returns_struct;
+#endif
/* Value is TRUE if register/mode pair is acceptable. */
bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
/* Reload functions based on the type and the vector unit. */
static enum insn_code rs6000_vector_reload[NUM_MACHINE_MODES][2];
+static int dbg_cost_ctrl;
+
/* Built in types. */
tree rs6000_builtin_types[RS6000_BTI_MAX];
tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
static GTY(()) section *sdata2_section;
static GTY(()) section *toc_section;
-/* True for any options that were explicitly set. */
-static struct {
- bool aix_struct_ret; /* True if -maix-struct-ret was used. */
- bool alignment; /* True if -malign- was used. */
- bool spe_abi; /* True if -mabi=spe/no-spe was used. */
- bool altivec_abi; /* True if -mabi=altivec/no-altivec used. */
- bool spe; /* True if -mspe= was used. */
- bool float_gprs; /* True if -mfloat-gprs= was used. */
- bool long_double; /* True if -mlong-double- was used. */
- bool ieee; /* True if -mabi=ieee/ibmlongdouble used. */
- bool vrsave; /* True if -mvrsave was used. */
- bool cmodel; /* True if -mcmodel was used. */
-} rs6000_explicit_options;
-
struct builtin_description
{
/* mask is not const because we're going to alter it below. This
static struct machine_function * rs6000_init_machine_status (void);
static bool rs6000_assemble_integer (rtx, unsigned int, int);
static bool no_global_regs_above (int, bool);
-#ifdef HAVE_GAS_HIDDEN
+#if defined (HAVE_GAS_HIDDEN) && !defined (TARGET_MACHO)
static void rs6000_assemble_visibility (tree, int);
#endif
static int rs6000_ra_ever_killed (void);
static void rs6000_file_start (void);
#if TARGET_ELF
static int rs6000_elf_reloc_rw_mask (void);
-static void rs6000_elf_asm_out_constructor (rtx, int);
-static void rs6000_elf_asm_out_destructor (rtx, int);
-static void rs6000_elf_end_indicate_exec_stack (void) ATTRIBUTE_UNUSED;
+static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
+static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
+static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
static void rs6000_elf_asm_init_sections (void);
static section *rs6000_elf_select_rtx_section (enum machine_mode, rtx,
unsigned HOST_WIDE_INT);
static rtx altivec_expand_vec_ext_builtin (tree, rtx);
static int get_element_number (tree, tree);
static void rs6000_option_override (void);
-static void rs6000_option_init_struct (struct gcc_options *);
static void rs6000_option_default_params (void);
-static bool rs6000_handle_option (size_t, const char *, int);
static int rs6000_loop_align_max_skip (rtx);
-static void rs6000_parse_yes_no_option (const char *, const char *, int *);
static int first_altivec_reg_to_save (void);
static unsigned int compute_vrsave_mask (void);
static void compute_save_world_info (rs6000_stack_t *info_ptr);
rtx[], int *);
static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree, bool, bool);
static rtx rs6000_mixed_function_arg (enum machine_mode, const_tree, int);
-static void rs6000_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
+static void rs6000_function_arg_advance (cumulative_args_t, enum machine_mode,
const_tree, bool);
-static rtx rs6000_function_arg (CUMULATIVE_ARGS *, enum machine_mode,
+static rtx rs6000_function_arg (cumulative_args_t, enum machine_mode,
const_tree, bool);
static unsigned int rs6000_function_arg_boundary (enum machine_mode,
const_tree);
static void rs6000_move_block_from_reg (int regno, rtx x, int nregs);
-static void setup_incoming_varargs (CUMULATIVE_ARGS *,
+static void setup_incoming_varargs (cumulative_args_t,
enum machine_mode, tree,
int *, int);
-static bool rs6000_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
+static bool rs6000_pass_by_reference (cumulative_args_t, enum machine_mode,
const_tree, bool);
-static int rs6000_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
+static int rs6000_arg_partial_bytes (cumulative_args_t, enum machine_mode,
tree, bool);
static const char *invalid_arg_for_unprototyped_fn (const_tree, const_tree, const_tree);
#if TARGET_MACHO
enum machine_mode,
struct secondary_reload_info *);
-static const reg_class_t *rs6000_ira_cover_classes (void);
-
const int INSN_NOT_AVAILABLE = -1;
static enum machine_mode rs6000_eh_return_filter_mode (void);
static bool rs6000_can_eliminate (const int, const int);
+static void rs6000_conditional_register_usage (void);
static void rs6000_trampoline_init (rtx, tree, rtx);
+static bool rs6000_cannot_force_const_mem (enum machine_mode, rtx);
+static bool rs6000_legitimate_constant_p (enum machine_mode, rtx);
/* Hash table stuff for keeping track of TOC entries. */
static const struct attribute_spec rs6000_attribute_table[] =
{
- /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
- { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute },
- { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
- { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
- { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute },
- { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute },
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
+ affects_type_identity } */
+ { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
+ false },
+ { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
+ false },
+ { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
+ false },
+ { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
+ false },
+ { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
+ false },
#ifdef SUBTARGET_ATTRIBUTE_TABLE
SUBTARGET_ATTRIBUTE_TABLE,
#endif
- { NULL, 0, 0, false, false, false, NULL }
+ { NULL, 0, 0, false, false, false, NULL, false }
};
-
-/* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
-static const struct default_options rs6000_option_optimization_table[] =
- {
- { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
- { OPT_LEVELS_NONE, 0, NULL, 0 }
- };
\f
#ifndef MASK_STRICT_ALIGN
#define MASK_STRICT_ALIGN 0
#undef TARGET_ASM_INTEGER
#define TARGET_ASM_INTEGER rs6000_assemble_integer
-#ifdef HAVE_GAS_HIDDEN
+#if defined (HAVE_GAS_HIDDEN) && !defined (TARGET_MACHO)
#undef TARGET_ASM_ASSEMBLE_VISIBILITY
#define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
#endif
#define TARGET_HAVE_TLS HAVE_AS_TLS
#undef TARGET_CANNOT_FORCE_CONST_MEM
-#define TARGET_CANNOT_FORCE_CONST_MEM rs6000_tls_referenced_p
+#define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
#undef TARGET_DELEGITIMIZE_ADDRESS
#define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
#undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
#define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
-#undef TARGET_HANDLE_OPTION
-#define TARGET_HANDLE_OPTION rs6000_handle_option
-
#undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
#define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
#undef TARGET_OPTION_OVERRIDE
#define TARGET_OPTION_OVERRIDE rs6000_option_override
-#undef TARGET_OPTION_INIT_STRUCT
-#define TARGET_OPTION_INIT_STRUCT rs6000_option_init_struct
-
#undef TARGET_OPTION_DEFAULT_PARAMS
#define TARGET_OPTION_DEFAULT_PARAMS rs6000_option_default_params
-#undef TARGET_OPTION_OPTIMIZATION_TABLE
-#define TARGET_OPTION_OPTIMIZATION_TABLE rs6000_option_optimization_table
-
#undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
#define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
rs6000_builtin_vectorized_function
-#undef TARGET_DEFAULT_TARGET_FLAGS
-#define TARGET_DEFAULT_TARGET_FLAGS \
- (TARGET_DEFAULT)
-
+#ifndef TARGET_MACHO
#undef TARGET_STACK_PROTECT_FAIL
#define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
+#endif
/* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
The PowerPC architecture requires only weak consistency among
#undef TARGET_SECONDARY_RELOAD
#define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
-#undef TARGET_IRA_COVER_CLASSES
-#define TARGET_IRA_COVER_CLASSES rs6000_ira_cover_classes
-
#undef TARGET_LEGITIMATE_ADDRESS_P
#define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
#undef TARGET_CAN_ELIMINATE
#define TARGET_CAN_ELIMINATE rs6000_can_eliminate
+#undef TARGET_CONDITIONAL_REGISTER_USAGE
+#define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
+
#undef TARGET_TRAMPOLINE_INIT
#define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
#undef TARGET_SET_CURRENT_FUNCTION
#define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
+#undef TARGET_LEGITIMATE_CONSTANT_P
+#define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
+
struct gcc_target targetm = TARGET_INITIALIZER;
\f
| MASK_VSX)
};
-/* This table occasionally claims that a processor does not support a
- particular feature even though it does, but the feature is slower than the
- alternative. Thus, it shouldn't be relied on as a complete description of
- the processor's support.
-
- Please keep this list in order, and don't forget to update the documentation
- in invoke.texi when adding a new processor or flag. */
-
struct rs6000_ptt
{
const char *const name; /* Canonical processor name. */
static struct rs6000_ptt const processor_target_table[] =
{
- {"401", PROCESSOR_PPC403, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
- {"403", PROCESSOR_PPC403,
- POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_STRICT_ALIGN},
- {"405", PROCESSOR_PPC405,
- POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
- {"405fp", PROCESSOR_PPC405,
- POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
- {"440", PROCESSOR_PPC440,
- POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
- {"440fp", PROCESSOR_PPC440,
- POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
- {"464", PROCESSOR_PPC440,
- POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
- {"464fp", PROCESSOR_PPC440,
- POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
- {"476", PROCESSOR_PPC476,
- POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_PPC_GFXOPT | MASK_MFCRF
- | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_MULHW | MASK_DLMZB},
- {"476fp", PROCESSOR_PPC476,
- POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_MFCRF | MASK_POPCNTB
- | MASK_FPRND | MASK_CMPB | MASK_MULHW | MASK_DLMZB},
- {"505", PROCESSOR_MPCCORE, POWERPC_BASE_MASK},
- {"601", PROCESSOR_PPC601,
- MASK_POWER | POWERPC_BASE_MASK | MASK_MULTIPLE | MASK_STRING},
- {"602", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
- {"603", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
- {"603e", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
- {"604", PROCESSOR_PPC604, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
- {"604e", PROCESSOR_PPC604e, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
- {"620", PROCESSOR_PPC620,
- POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
- {"630", PROCESSOR_PPC630,
- POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
- {"740", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
- {"7400", PROCESSOR_PPC7400, POWERPC_7400_MASK},
- {"7450", PROCESSOR_PPC7450, POWERPC_7400_MASK},
- {"750", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
- {"801", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
- {"821", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
- {"823", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
- {"8540", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_STRICT_ALIGN
- | MASK_ISEL},
- /* 8548 has a dummy entry for now. */
- {"8548", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_STRICT_ALIGN
- | MASK_ISEL},
- {"a2", PROCESSOR_PPCA2,
- POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_POPCNTB
- | MASK_CMPB | MASK_NO_UPDATE },
- {"e300c2", PROCESSOR_PPCE300C2, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
- {"e300c3", PROCESSOR_PPCE300C3, POWERPC_BASE_MASK},
- {"e500mc", PROCESSOR_PPCE500MC, POWERPC_BASE_MASK | MASK_PPC_GFXOPT
- | MASK_ISEL},
- {"e500mc64", PROCESSOR_PPCE500MC64, POWERPC_BASE_MASK | MASK_POWERPC64
- | MASK_PPC_GFXOPT | MASK_ISEL},
- {"860", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
- {"970", PROCESSOR_POWER4,
- POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
- {"cell", PROCESSOR_CELL,
- POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
- {"common", PROCESSOR_COMMON, MASK_NEW_MNEMONICS},
- {"ec603e", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
- {"G3", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
- {"G4", PROCESSOR_PPC7450, POWERPC_7400_MASK},
- {"G5", PROCESSOR_POWER4,
- POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
- {"titan", PROCESSOR_TITAN,
- POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
- {"power", PROCESSOR_POWER, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
- {"power2", PROCESSOR_POWER,
- MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING},
- {"power3", PROCESSOR_PPC630,
- POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
- {"power4", PROCESSOR_POWER4,
- POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
- | MASK_MFCRF},
- {"power5", PROCESSOR_POWER5,
- POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
- | MASK_MFCRF | MASK_POPCNTB},
- {"power5+", PROCESSOR_POWER5,
- POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
- | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND},
- {"power6", PROCESSOR_POWER6,
- POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
- | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP
- | MASK_RECIP_PRECISION},
- {"power6x", PROCESSOR_POWER6,
- POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
- | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP
- | MASK_MFPGPR | MASK_RECIP_PRECISION},
- {"power7", PROCESSOR_POWER7, /* Don't add MASK_ISEL by default */
- POWERPC_7400_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_MFCRF
- | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP | MASK_POPCNTD
- | MASK_VSX | MASK_RECIP_PRECISION},
- {"powerpc", PROCESSOR_POWERPC, POWERPC_BASE_MASK},
- {"powerpc64", PROCESSOR_POWERPC64,
- POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
- {"rios", PROCESSOR_RIOS1, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
- {"rios1", PROCESSOR_RIOS1, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
- {"rios2", PROCESSOR_RIOS2,
- MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING},
- {"rsc", PROCESSOR_PPC601, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
- {"rsc1", PROCESSOR_PPC601, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
- {"rs64", PROCESSOR_RS64A,
- POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64}
+#define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
+#include "rs6000-cpus.def"
+#undef RS6000_CPU
};
/* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
&& !flag_apple_kext
&& strverscmp (darwin_macosx_version_min, "10.5") >= 0
&& ! (target_flags_explicit & MASK_ALTIVEC)
- && ! rs6000_select[1].string)
+ && ! global_options_set.x_rs6000_cpu_index)
{
target_flags |= MASK_ALTIVEC;
}
= ((global_init_p || target_option_default_node == NULL)
? NULL : TREE_TARGET_OPTION (target_option_default_node));
+ /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
+ library functions, so warn about it. The flag may be useful for
+ performance studies from time to time though, so don't disable it
+ entirely. */
+ if (global_options_set.x_rs6000_alignment_flags
+ && rs6000_alignment_flags == MASK_ALIGN_POWER
+ && DEFAULT_ABI == ABI_DARWIN
+ && TARGET_64BIT)
+ warning (0, "-malign-power is not supported for 64-bit Darwin;"
+ " it is incompatible with the installed C and C++ libraries");
+
+ if (global_options_set.x_rs6000_spe_abi
+ && rs6000_spe_abi
+ && !TARGET_SPE_ABI)
+ error ("not configured for SPE ABI");
+
/* Numerous experiment shows that IRA based loop pressure
calculation works better for RTL loop invariant motion on targets
with enough (>= 32) registers. It is an expensive optimization.
}
}
- if (!rs6000_explicit_options.long_double)
+ if (!global_options_set.x_rs6000_long_double_type_size)
{
if (main_target_opt != NULL
&& (main_target_opt->x_rs6000_long_double_type_size
}
#ifndef POWERPC_LINUX
- if (!rs6000_explicit_options.ieee)
+ if (!global_options_set.x_rs6000_ieeequad)
rs6000_ieeequad = 1;
#endif
be explicitly overridden in either case. */
if (TARGET_ELF)
{
- if (!rs6000_explicit_options.altivec_abi
+ if (!global_options_set.x_rs6000_altivec_abi
&& (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
{
if (main_target_opt != NULL &&
}
/* Enable VRSAVE for AltiVec ABI, unless explicitly overridden. */
- if (!rs6000_explicit_options.vrsave)
+ if (!global_options_set.x_TARGET_ALTIVEC_VRSAVE)
TARGET_ALTIVEC_VRSAVE = rs6000_altivec_abi;
}
if ((target_flags & MASK_STRING) != 0)
target_flags = target_flags & ~MASK_STRING;
}
- else if (rs6000_select[1].string != NULL)
+ else if (global_options_set.x_rs6000_cpu_index)
{
/* For the powerpc-eabispe configuration, we set all these by
default, so let's unset them if we manually set another
error ("target attribute or pragma changes SPE ABI");
else
{
- if (!rs6000_explicit_options.spe_abi)
+ if (!global_options_set.x_rs6000_spe_abi)
rs6000_spe_abi = 0;
- if (!rs6000_explicit_options.spe)
+ if (!global_options_set.x_rs6000_spe)
rs6000_spe = 0;
- if (!rs6000_explicit_options.float_gprs)
+ if (!global_options_set.x_rs6000_float_gprs)
rs6000_float_gprs = 0;
}
if (!(target_flags_explicit & MASK_ISEL))
|| rs6000_cpu == PROCESSOR_PPCE500MC
|| rs6000_cpu == PROCESSOR_PPCE500MC64);
- /* Allow debug switches to override the above settings. */
- if (TARGET_ALWAYS_HINT > 0)
+ /* Allow debug switches to override the above settings. These are set to -1
+ in rs6000.opt to indicate the user hasn't directly set the switch. */
+ if (TARGET_ALWAYS_HINT >= 0)
rs6000_always_hint = TARGET_ALWAYS_HINT;
- if (TARGET_SCHED_GROUPS > 0)
+ if (TARGET_SCHED_GROUPS >= 0)
rs6000_sched_groups = TARGET_SCHED_GROUPS;
- if (TARGET_ALIGN_BRANCH_TARGETS > 0)
+ if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
rs6000_sched_restricted_insns_priority
/* Set aix_struct_return last, after the ABI is determined.
If -maix-struct-return or -msvr4-struct-return was explicitly
used, don't override with the ABI default. */
- if (!rs6000_explicit_options.aix_struct_ret)
+ if (!global_options_set.x_aix_struct_return)
aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
#if 0
/* If not explicitly specified via option, decide whether to generate indexed
load/store instructions. */
if (TARGET_AVOID_XFORM == -1)
- /* Avoid indexed addressing when targeting Power6 in order to avoid
- the DERAT mispredict penalty. */
- TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB);
+ /* Avoid indexed addressing when targeting Power6 in order to avoid the
+ DERAT mispredict penalty. However the LVE and STVE altivec instructions
+ need indexed accesses and the type used is the scalar type of the element
+ being loaded or stored. */
+ TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
+ && !TARGET_ALTIVEC);
/* Set the -mrecip options. */
if (rs6000_recip_name)
return word_mode;
}
-/* Handle generic options of the form -mfoo=yes/no.
- NAME is the option name.
- VALUE is the option value.
- FLAG is the pointer to the flag where to store a 1 or 0, depending on
- whether the option value is 'yes' or 'no' respectively. */
-static void
-rs6000_parse_yes_no_option (const char *name, const char *value, int *flag)
-{
- if (value == 0)
- return;
- else if (!strcmp (value, "yes"))
- *flag = 1;
- else if (!strcmp (value, "no"))
- *flag = 0;
- else
- error ("unknown -m%s= option specified: '%s'", name, value);
-}
-
-/* Implement TARGET_OPTION_INIT_STRUCT. */
-
-static void
-rs6000_option_init_struct (struct gcc_options *opts)
-{
- if (DEFAULT_ABI == ABI_DARWIN)
- /* The Darwin libraries never set errno, so we might as well
- avoid calling them when that's the only reason we would. */
- opts->x_flag_errno_math = 0;
-
- /* Enable section anchors by default. */
- if (!TARGET_MACHO)
- opts->x_flag_section_anchors = 1;
-}
-
/* Implement TARGET_OPTION_DEFAULT_PARAMS. */
static void
set_default_param_value (PARAM_MAX_GROW_COPY_BB_INSNS, 16);
}
-static enum fpu_type_t
-rs6000_parse_fpu_option (const char *option)
-{
- if (!strcmp("none", option)) return FPU_NONE;
- if (!strcmp("sp_lite", option)) return FPU_SF_LITE;
- if (!strcmp("dp_lite", option)) return FPU_DF_LITE;
- if (!strcmp("sp_full", option)) return FPU_SF_FULL;
- if (!strcmp("dp_full", option)) return FPU_DF_FULL;
- error("unknown value %s for -mfpu", option);
- return FPU_NONE;
-}
-
-
/* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
library with vectorized intrinsics. */
return NULL_TREE;
}
-
-
-/* Implement TARGET_HANDLE_OPTION. */
-
-static bool
-rs6000_handle_option (size_t code, const char *arg, int value)
-{
- enum fpu_type_t fpu_type = FPU_NONE;
- int isel;
- char *p, *q;
-
- switch (code)
- {
- case OPT_mno_power:
- target_flags &= ~(MASK_POWER | MASK_POWER2
- | MASK_MULTIPLE | MASK_STRING);
- target_flags_explicit |= (MASK_POWER | MASK_POWER2
- | MASK_MULTIPLE | MASK_STRING);
- break;
- case OPT_mno_powerpc:
- target_flags &= ~(MASK_POWERPC | MASK_PPC_GPOPT
- | MASK_PPC_GFXOPT | MASK_POWERPC64);
- target_flags_explicit |= (MASK_POWERPC | MASK_PPC_GPOPT
- | MASK_PPC_GFXOPT | MASK_POWERPC64);
- break;
- case OPT_mfull_toc:
- target_flags &= ~MASK_MINIMAL_TOC;
- TARGET_NO_FP_IN_TOC = 0;
- TARGET_NO_SUM_IN_TOC = 0;
- target_flags_explicit |= MASK_MINIMAL_TOC;
-#ifdef TARGET_USES_SYSV4_OPT
- /* Note, V.4 no longer uses a normal TOC, so make -mfull-toc, be
- just the same as -mminimal-toc. */
- target_flags |= MASK_MINIMAL_TOC;
- target_flags_explicit |= MASK_MINIMAL_TOC;
-#endif
- break;
-
-#ifdef TARGET_USES_SYSV4_OPT
- case OPT_mtoc:
- /* Make -mtoc behave like -mminimal-toc. */
- target_flags |= MASK_MINIMAL_TOC;
- target_flags_explicit |= MASK_MINIMAL_TOC;
- break;
-#endif
-
-#if defined (HAVE_LD_LARGE_TOC) && defined (TARGET_USES_LINUX64_OPT)
- case OPT_mcmodel_:
- if (strcmp (arg, "small") == 0)
- rs6000_current_cmodel = CMODEL_SMALL;
- else if (strcmp (arg, "medium") == 0)
- rs6000_current_cmodel = CMODEL_MEDIUM;
- else if (strcmp (arg, "large") == 0)
- rs6000_current_cmodel = CMODEL_LARGE;
- else
- {
- error ("invalid option for -mcmodel: '%s'", arg);
- return false;
- }
- rs6000_explicit_options.cmodel = true;
-#endif
-
-#ifdef TARGET_USES_AIX64_OPT
- case OPT_maix64:
-#else
- case OPT_m64:
-#endif
- target_flags |= MASK_POWERPC64 | MASK_POWERPC;
- target_flags |= ~target_flags_explicit & MASK_PPC_GFXOPT;
- target_flags_explicit |= MASK_POWERPC64 | MASK_POWERPC;
- break;
-
-#ifdef TARGET_USES_AIX64_OPT
- case OPT_maix32:
-#else
- case OPT_m32:
-#endif
- target_flags &= ~MASK_POWERPC64;
- target_flags_explicit |= MASK_POWERPC64;
- break;
-
- case OPT_minsert_sched_nops_:
- rs6000_sched_insert_nops_str = arg;
- break;
-
- case OPT_mminimal_toc:
- if (value == 1)
- {
- TARGET_NO_FP_IN_TOC = 0;
- TARGET_NO_SUM_IN_TOC = 0;
- }
- break;
-
- case OPT_mpower:
- if (value == 1)
- {
- target_flags |= (MASK_MULTIPLE | MASK_STRING);
- target_flags_explicit |= (MASK_MULTIPLE | MASK_STRING);
- }
- break;
-
- case OPT_mpower2:
- if (value == 1)
- {
- target_flags |= (MASK_POWER | MASK_MULTIPLE | MASK_STRING);
- target_flags_explicit |= (MASK_POWER | MASK_MULTIPLE | MASK_STRING);
- }
- break;
-
- case OPT_mpowerpc_gpopt:
- case OPT_mpowerpc_gfxopt:
- if (value == 1)
- {
- target_flags |= MASK_POWERPC;
- target_flags_explicit |= MASK_POWERPC;
- }
- break;
-
- case OPT_maix_struct_return:
- case OPT_msvr4_struct_return:
- rs6000_explicit_options.aix_struct_ret = true;
- break;
-
- case OPT_mvrsave:
- rs6000_explicit_options.vrsave = true;
- TARGET_ALTIVEC_VRSAVE = value;
- break;
-
- case OPT_mvrsave_:
- rs6000_explicit_options.vrsave = true;
- rs6000_parse_yes_no_option ("vrsave", arg, &(TARGET_ALTIVEC_VRSAVE));
- break;
-
- case OPT_misel_:
- target_flags_explicit |= MASK_ISEL;
- isel = 0;
- rs6000_parse_yes_no_option ("isel", arg, &isel);
- if (isel)
- target_flags |= MASK_ISEL;
- else
- target_flags &= ~MASK_ISEL;
- break;
-
- case OPT_mspe:
- rs6000_explicit_options.spe = true;
- rs6000_spe = value;
- break;
-
- case OPT_mspe_:
- rs6000_explicit_options.spe = true;
- rs6000_parse_yes_no_option ("spe", arg, &(rs6000_spe));
- break;
-
- case OPT_mdebug_:
- p = ASTRDUP (arg);
- rs6000_debug = 0;
-
- while ((q = strtok (p, ",")) != NULL)
- {
- unsigned mask = 0;
- bool invert;
-
- p = NULL;
- if (*q == '!')
- {
- invert = true;
- q++;
- }
- else
- invert = false;
-
- if (! strcmp (q, "all"))
- mask = MASK_DEBUG_ALL;
- else if (! strcmp (q, "stack"))
- mask = MASK_DEBUG_STACK;
- else if (! strcmp (q, "arg"))
- mask = MASK_DEBUG_ARG;
- else if (! strcmp (q, "reg"))
- mask = MASK_DEBUG_REG;
- else if (! strcmp (q, "addr"))
- mask = MASK_DEBUG_ADDR;
- else if (! strcmp (q, "cost"))
- mask = MASK_DEBUG_COST;
- else if (! strcmp (q, "target"))
- mask = MASK_DEBUG_TARGET;
- else
- error ("unknown -mdebug-%s switch", q);
-
- if (invert)
- rs6000_debug &= ~mask;
- else
- rs6000_debug |= mask;
- }
- break;
-
-#ifdef TARGET_USES_SYSV4_OPT
- case OPT_mcall_:
- rs6000_abi_name = arg;
- break;
-
- case OPT_msdata_:
- rs6000_sdata_name = arg;
- break;
-
- case OPT_mtls_size_:
- if (strcmp (arg, "16") == 0)
- rs6000_tls_size = 16;
- else if (strcmp (arg, "32") == 0)
- rs6000_tls_size = 32;
- else if (strcmp (arg, "64") == 0)
- rs6000_tls_size = 64;
- else
- error ("bad value %qs for -mtls-size switch", arg);
- break;
-
- case OPT_mrelocatable:
- if (value == 1)
- {
- target_flags |= MASK_MINIMAL_TOC;
- target_flags_explicit |= MASK_MINIMAL_TOC;
- TARGET_NO_FP_IN_TOC = 1;
- }
- break;
-
- case OPT_mrelocatable_lib:
- if (value == 1)
- {
- target_flags |= MASK_RELOCATABLE | MASK_MINIMAL_TOC;
- target_flags_explicit |= MASK_RELOCATABLE | MASK_MINIMAL_TOC;
- TARGET_NO_FP_IN_TOC = 1;
- }
- else
- {
- target_flags &= ~MASK_RELOCATABLE;
- target_flags_explicit |= MASK_RELOCATABLE;
- }
- break;
-#endif
-
- case OPT_mabi_:
- if (!strcmp (arg, "altivec"))
- {
- rs6000_explicit_options.altivec_abi = true;
- rs6000_altivec_abi = 1;
-
- /* Enabling the AltiVec ABI turns off the SPE ABI. */
- rs6000_spe_abi = 0;
- }
- else if (! strcmp (arg, "no-altivec"))
- {
- rs6000_explicit_options.altivec_abi = true;
- rs6000_altivec_abi = 0;
- }
- else if (! strcmp (arg, "spe"))
- {
- rs6000_explicit_options.spe_abi = true;
- rs6000_spe_abi = 1;
- rs6000_altivec_abi = 0;
- if (!TARGET_SPE_ABI)
- error ("not configured for ABI: '%s'", arg);
- }
- else if (! strcmp (arg, "no-spe"))
- {
- rs6000_explicit_options.spe_abi = true;
- rs6000_spe_abi = 0;
- }
-
- /* These are here for testing during development only, do not
- document in the manual please. */
- else if (! strcmp (arg, "d64"))
- {
- rs6000_darwin64_abi = 1;
- warning (0, "using darwin64 ABI");
- }
- else if (! strcmp (arg, "d32"))
- {
- rs6000_darwin64_abi = 0;
- warning (0, "using old darwin ABI");
- }
-
- else if (! strcmp (arg, "ibmlongdouble"))
- {
- rs6000_explicit_options.ieee = true;
- rs6000_ieeequad = 0;
- warning (0, "using IBM extended precision long double");
- }
- else if (! strcmp (arg, "ieeelongdouble"))
- {
- rs6000_explicit_options.ieee = true;
- rs6000_ieeequad = 1;
- warning (0, "using IEEE extended precision long double");
- }
-
- else
- {
- error ("unknown ABI specified: '%s'", arg);
- return false;
- }
- break;
-
- case OPT_mcpu_:
- rs6000_select[1].string = arg;
- rs6000_cpu_index = rs6000_cpu_name_lookup (arg);
- if (rs6000_cpu_index < 0)
- error ("bad value (%s) for -mcpu", arg);
- break;
-
- case OPT_mtune_:
- rs6000_select[2].string = arg;
- rs6000_tune_index = rs6000_cpu_name_lookup (arg);
- if (rs6000_tune_index < 0)
- error ("bad value (%s) for -mtune", arg);
- break;
-
- case OPT_mtraceback_:
- if (! strncmp (arg, "full", 4))
- rs6000_traceback = traceback_full;
- else if (! strncmp (arg, "part", 4))
- rs6000_traceback = traceback_part;
- else if (! strncmp (arg, "no", 2))
- rs6000_traceback = traceback_none;
- else
- error ("unknown -mtraceback arg %qs; expecting %<full%>, "
- "%<partial%> or %<none%>", arg);
- break;
-
- case OPT_mfloat_gprs_:
- rs6000_explicit_options.float_gprs = true;
- if (! strcmp (arg, "yes") || ! strcmp (arg, "single"))
- rs6000_float_gprs = 1;
- else if (! strcmp (arg, "double"))
- rs6000_float_gprs = 2;
- else if (! strcmp (arg, "no"))
- rs6000_float_gprs = 0;
- else
- {
- error ("invalid option for -mfloat-gprs: '%s'", arg);
- return false;
- }
- break;
-
- case OPT_mlong_double_:
- rs6000_explicit_options.long_double = true;
- rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
- if (value != 64 && value != 128)
- {
- error ("unknown switch -mlong-double-%s", arg);
- rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
- return false;
- }
- else
- rs6000_long_double_type_size = value;
- break;
-
- case OPT_msched_costly_dep_:
- rs6000_sched_costly_dep_str = arg;
- break;
-
- case OPT_malign_:
- rs6000_explicit_options.alignment = true;
- if (! strcmp (arg, "power"))
- {
- /* On 64-bit Darwin, power alignment is ABI-incompatible with
- some C library functions, so warn about it. The flag may be
- useful for performance studies from time to time though, so
- don't disable it entirely. */
- if (DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT)
- warning (0, "-malign-power is not supported for 64-bit Darwin;"
- " it is incompatible with the installed C and C++ libraries");
- rs6000_alignment_flags = MASK_ALIGN_POWER;
- }
- else if (! strcmp (arg, "natural"))
- rs6000_alignment_flags = MASK_ALIGN_NATURAL;
- else
- {
- error ("unknown -malign-XXXXX option specified: '%s'", arg);
- return false;
- }
- break;
-
- case OPT_msingle_float:
- if (!TARGET_SINGLE_FPU)
- warning (0, "-msingle-float option equivalent to -mhard-float");
- /* -msingle-float implies -mno-double-float and TARGET_HARD_FLOAT. */
- rs6000_double_float = 0;
- target_flags &= ~MASK_SOFT_FLOAT;
- target_flags_explicit |= MASK_SOFT_FLOAT;
- break;
-
- case OPT_mdouble_float:
- /* -mdouble-float implies -msingle-float and TARGET_HARD_FLOAT. */
- rs6000_single_float = 1;
- target_flags &= ~MASK_SOFT_FLOAT;
- target_flags_explicit |= MASK_SOFT_FLOAT;
- break;
-
- case OPT_msimple_fpu:
- if (!TARGET_SINGLE_FPU)
- warning (0, "-msimple-fpu option ignored");
- break;
-
- case OPT_mhard_float:
- /* -mhard_float implies -msingle-float and -mdouble-float. */
- rs6000_single_float = rs6000_double_float = 1;
- break;
-
- case OPT_msoft_float:
- /* -msoft_float implies -mnosingle-float and -mnodouble-float. */
- rs6000_single_float = rs6000_double_float = 0;
- break;
-
- case OPT_mfpu_:
- fpu_type = rs6000_parse_fpu_option(arg);
- if (fpu_type != FPU_NONE)
- /* If -mfpu is not none, then turn off SOFT_FLOAT, turn on HARD_FLOAT. */
- {
- target_flags &= ~MASK_SOFT_FLOAT;
- target_flags_explicit |= MASK_SOFT_FLOAT;
- rs6000_xilinx_fpu = 1;
- if (fpu_type == FPU_SF_LITE || fpu_type == FPU_SF_FULL)
- rs6000_single_float = 1;
- if (fpu_type == FPU_DF_LITE || fpu_type == FPU_DF_FULL)
- rs6000_single_float = rs6000_double_float = 1;
- if (fpu_type == FPU_SF_LITE || fpu_type == FPU_DF_LITE)
- rs6000_simple_fpu = 1;
- }
- else
- {
- /* -mfpu=none is equivalent to -msoft-float */
- target_flags |= MASK_SOFT_FLOAT;
- target_flags_explicit |= MASK_SOFT_FLOAT;
- rs6000_single_float = rs6000_double_float = 0;
- }
-
- case OPT_mrecip:
- rs6000_recip_name = (value) ? "default" : "none";
- break;
-
- case OPT_mrecip_:
- rs6000_recip_name = arg;
- break;
- }
- return true;
-}
\f
+/* Default CPU string for rs6000*_file_start functions. */
+static const char *rs6000_default_cpu;
+
/* Do anything needed at the start of the asm file. */
static void
rs6000_file_start (void)
{
- size_t i;
char buffer[80];
const char *start = buffer;
- struct rs6000_cpu_select *ptr;
- const char *default_cpu = TARGET_CPU_DEFAULT;
FILE *file = asm_out_file;
+ rs6000_default_cpu = TARGET_CPU_DEFAULT;
+
default_file_start ();
#ifdef TARGET_BI_ARCH
if ((TARGET_DEFAULT ^ target_flags) & MASK_64BIT)
- default_cpu = 0;
+ rs6000_default_cpu = 0;
#endif
if (flag_verbose_asm)
{
sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
- rs6000_select[0].string = default_cpu;
- for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
+ if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
{
- ptr = &rs6000_select[i];
- if (ptr->string != (char *)0 && ptr->string[0] != '\0')
- {
- fprintf (file, "%s %s%s", start, ptr->name, ptr->string);
- start = "";
- }
+ fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
+ start = "";
+ }
+
+ if (global_options_set.x_rs6000_cpu_index)
+ {
+ fprintf (file, "%s -mcpu=%s", start,
+ processor_target_table[rs6000_cpu_index].name);
+ start = "";
+ }
+
+ if (global_options_set.x_rs6000_tune_index)
+ {
+ fprintf (file, "%s -mtune=%s", start,
+ processor_target_table[rs6000_tune_index].name);
+ start = "";
}
if (PPC405_ERRATUM77)
putc ('\n', file);
}
-#ifdef HAVE_AS_GNU_ATTRIBUTE
- if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
- {
- fprintf (file, "\t.gnu_attribute 4, %d\n",
- ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
- : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
- : 2));
- fprintf (file, "\t.gnu_attribute 8, %d\n",
- (TARGET_ALTIVEC_ABI ? 2
- : TARGET_SPE_ABI ? 3
- : 1));
- fprintf (file, "\t.gnu_attribute 12, %d\n",
- aix_struct_return ? 2 : 1);
-
- }
-#endif
-
if (DEFAULT_ABI == ABI_AIX || (TARGET_ELF && flag_pic == 2))
{
switch_to_section (toc_section);
HOST_WIDE_INT
const_vector_elt_as_int (rtx op, unsigned int elt)
{
- rtx tmp = CONST_VECTOR_ELT (op, elt);
+ rtx tmp;
+
+ /* We can't handle V2DImode and V2DFmode vector constants here yet. */
+ gcc_assert (GET_MODE (op) != V2DImode
+ && GET_MODE (op) != V2DFmode);
+
+ tmp = CONST_VECTOR_ELT (op, elt);
if (GET_MODE (op) == V4SFmode
|| GET_MODE (op) == V2SFmode)
tmp = gen_lowpart (SImode, tmp);
enum machine_mode inner = GET_MODE_INNER (mode);
unsigned i;
- unsigned nunits = GET_MODE_NUNITS (mode);
- unsigned bitsize = GET_MODE_BITSIZE (inner);
- unsigned mask = GET_MODE_MASK (inner);
+ unsigned nunits;
+ unsigned bitsize;
+ unsigned mask;
+
+ HOST_WIDE_INT val;
+ HOST_WIDE_INT splat_val;
+ HOST_WIDE_INT msb_val;
+
+ if (mode == V2DImode || mode == V2DFmode)
+ return false;
+
+ nunits = GET_MODE_NUNITS (mode);
+ bitsize = GET_MODE_BITSIZE (inner);
+ mask = GET_MODE_MASK (inner);
- HOST_WIDE_INT val = const_vector_elt_as_int (op, nunits - 1);
- HOST_WIDE_INT splat_val = val;
- HOST_WIDE_INT msb_val = val > 0 ? 0 : -1;
+ val = const_vector_elt_as_int (op, nunits - 1);
+ splat_val = val;
+ msb_val = val > 0 ? 0 : -1;
/* Construct the value to be splatted, if possible. If not, return 0. */
for (i = 2; i <= copies; i *= 2)
else if (mode != GET_MODE (op))
return false;
+ /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
+ constants. */
+ if (mode == V2DFmode)
+ return zero_constant (op, mode);
+
+ if (mode == V2DImode)
+ {
+ /* In case the compiler is built 32-bit, CONST_DOUBLE constants are not
+ easy. */
+ if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
+ || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
+ return false;
+
+ if (zero_constant (op, mode))
+ return true;
+
+ if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
+ && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
+ return true;
+
+ return false;
+ }
+
/* Start with a vspltisw. */
step = GET_MODE_NUNITS (mode) / 4;
copies = 1;
vec = operands[1];
mode = GET_MODE (dest);
- if (TARGET_VSX && zero_constant (vec, mode))
- return "xxlxor %x0,%x0,%x0";
+ if (TARGET_VSX)
+ {
+ if (zero_constant (vec, mode))
+ return "xxlxor %x0,%x0,%x0";
+
+ if (mode == V2DImode
+ && INTVAL (CONST_VECTOR_ELT (vec, 0)) == -1
+ && INTVAL (CONST_VECTOR_ELT (vec, 1)) == -1)
+ return "vspltisw %0,-1";
+ }
if (TARGET_ALTIVEC)
{
}
else
{
- rtx op0 = copy_to_reg (XVECEXP (vals, 0, 0));
- rtx op1 = copy_to_reg (XVECEXP (vals, 0, 1));
if (mode == V2DFmode)
- emit_insn (gen_vsx_concat_v2df (target, op0, op1));
+ {
+ rtx op0 = copy_to_mode_reg (DFmode, XVECEXP (vals, 0, 0));
+ rtx op1 = copy_to_mode_reg (DFmode, XVECEXP (vals, 0, 1));
+ emit_insn (gen_vsx_concat_v2df (target, op0, op1));
+ }
else
- emit_insn (gen_vsx_concat_v2di (target, op0, op1));
+ {
+ rtx op0 = copy_to_mode_reg (DImode, XVECEXP (vals, 0, 0));
+ rtx op1 = copy_to_mode_reg (DImode, XVECEXP (vals, 0, 1));
+ emit_insn (gen_vsx_concat_v2di (target, op0, op1));
+ }
}
return;
}
{
enum machine_mode mode = GET_MODE (vec);
enum machine_mode inner_mode = GET_MODE_INNER (mode);
- rtx mem, x;
+ rtx mem;
- if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
+ if (VECTOR_MEM_VSX_P (mode))
{
- rtx (*extract_func) (rtx, rtx, rtx)
- = ((mode == V2DFmode) ? gen_vsx_extract_v2df : gen_vsx_extract_v2di);
- emit_insn (extract_func (target, vec, GEN_INT (elt)));
- return;
+ switch (mode)
+ {
+ default:
+ break;
+ case V2DFmode:
+ emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
+ return;
+ case V2DImode:
+ emit_insn (gen_vsx_extract_v2di (target, vec, GEN_INT (elt)));
+ return;
+ case V4SFmode:
+ emit_insn (gen_vsx_extract_v4sf (target, vec, GEN_INT (elt)));
+ return;
+ }
}
/* Allocate mode-sized buffer. */
mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ emit_move_insn (mem, vec);
+
/* Add offset to field within buffer matching vector element. */
- mem = adjust_address_nv (mem, mode, elt * GET_MODE_SIZE (inner_mode));
+ mem = adjust_address_nv (mem, inner_mode, elt * GET_MODE_SIZE (inner_mode));
- /* Store single field into mode-sized buffer. */
- x = gen_rtx_UNSPEC (VOIDmode,
- gen_rtvec (1, const0_rtx), UNSPEC_STVE);
- emit_insn (gen_rtx_PARALLEL (VOIDmode,
- gen_rtvec (2,
- gen_rtx_SET (VOIDmode,
- mem, vec),
- x)));
emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
}
&& regnum <= LAST_VIRTUAL_POINTER_REGISTER);
}
+/* Return true if memory accesses to OP are known to never straddle
+ a 32k boundary. */
+
+static bool
+offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
+ enum machine_mode mode)
+{
+ tree decl, type;
+ unsigned HOST_WIDE_INT dsize, dalign;
+
+ if (GET_CODE (op) != SYMBOL_REF)
+ return false;
+
+ decl = SYMBOL_REF_DECL (op);
+ if (!decl)
+ {
+ if (GET_MODE_SIZE (mode) == 0)
+ return false;
+
+ /* -fsection-anchors loses the original SYMBOL_REF_DECL when
+ replacing memory addresses with an anchor plus offset. We
+ could find the decl by rummaging around in the block->objects
+ VEC for the given offset but that seems like too much work. */
+ dalign = 1;
+ if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
+ && SYMBOL_REF_ANCHOR_P (op)
+ && SYMBOL_REF_BLOCK (op) != NULL)
+ {
+ struct object_block *block = SYMBOL_REF_BLOCK (op);
+ HOST_WIDE_INT lsb, mask;
+
+ /* Given the alignment of the block.. */
+ dalign = block->alignment;
+ mask = dalign / BITS_PER_UNIT - 1;
+
+ /* ..and the combined offset of the anchor and any offset
+ to this block object.. */
+ offset += SYMBOL_REF_BLOCK_OFFSET (op);
+ lsb = offset & -offset;
+
+ /* ..find how many bits of the alignment we know for the
+ object. */
+ mask &= lsb - 1;
+ dalign = mask + 1;
+ }
+ return dalign >= GET_MODE_SIZE (mode);
+ }
+
+ if (DECL_P (decl))
+ {
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ return true;
+
+ if (!DECL_SIZE_UNIT (decl))
+ return false;
+
+ if (!host_integerp (DECL_SIZE_UNIT (decl), 1))
+ return false;
+
+ dsize = tree_low_cst (DECL_SIZE_UNIT (decl), 1);
+ if (dsize > 32768)
+ return false;
+
+ dalign = DECL_ALIGN_UNIT (decl);
+ return dalign >= dsize;
+ }
+
+ type = TREE_TYPE (decl);
+
+ if (TREE_CODE (decl) == STRING_CST)
+ dsize = TREE_STRING_LENGTH (decl);
+ else if (TYPE_SIZE_UNIT (type)
+ && host_integerp (TYPE_SIZE_UNIT (type), 1))
+ dsize = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
+ else
+ return false;
+ if (dsize > 32768)
+ return false;
+
+ dalign = TYPE_ALIGN (type);
+ if (CONSTANT_CLASS_P (decl))
+ dalign = CONSTANT_ALIGNMENT (decl, dalign);
+ else
+ dalign = DATA_ALIGNMENT (decl, dalign);
+ dalign /= BITS_PER_UNIT;
+ return dalign >= dsize;
+}
+
static bool
constant_pool_expr_p (rtx op)
{
&& XINT (tocrel_base, 1) == UNSPEC_TOCREL);
}
+/* Return true if X is a constant pool address, and also for cmodel=medium
+ if X is a toc-relative address known to be offsettable within MODE. */
+
bool
-legitimate_constant_pool_address_p (const_rtx x, bool strict)
+legitimate_constant_pool_address_p (const_rtx x, enum machine_mode mode,
+ bool strict)
{
return (TARGET_TOC
&& (GET_CODE (x) == PLUS || GET_CODE (x) == LO_SUM)
|| ((TARGET_MINIMAL_TOC
|| TARGET_CMODEL != CMODEL_SMALL)
&& INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict)))
- && toc_relative_expr_p (XEXP (x, 1)));
+ && toc_relative_expr_p (XEXP (x, 1))
+ && (TARGET_CMODEL != CMODEL_MEDIUM
+ || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
+ || mode == QImode
+ || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
+ INTVAL (tocrel_offset), mode)));
}
static bool
return false;
if (!reg_offset_addressing_ok_p (mode))
return virtual_stack_registers_memory_p (x);
- if (legitimate_constant_pool_address_p (x, strict))
+ if (legitimate_constant_pool_address_p (x, mode, strict))
return true;
if (GET_CODE (XEXP (x, 1)) != CONST_INT)
return false;
if (MEM_P (x))
x = XEXP (x, 0);
- if ((GET_CODE (x) == PLUS
- || GET_CODE (x) == LO_SUM)
- && GET_CODE (XEXP (x, 0)) == REG
- && (REGNO (XEXP (x, 0)) == TOC_REGISTER
- || TARGET_MINIMAL_TOC
- || TARGET_CMODEL != CMODEL_SMALL)
+ if (GET_CODE (x) == (TARGET_CMODEL != CMODEL_SMALL ? LO_SUM : PLUS)
&& GET_CODE (XEXP (x, 1)) == CONST)
{
+ rtx offset = NULL_RTX;
+
y = XEXP (XEXP (x, 1), 0);
+ if (GET_CODE (y) == PLUS
+ && GET_MODE (y) == Pmode
+ && CONST_INT_P (XEXP (y, 1)))
+ {
+ offset = XEXP (y, 1);
+ y = XEXP (y, 0);
+ }
if (GET_CODE (y) == UNSPEC
- && XINT (y, 1) == UNSPEC_TOCREL)
+ && XINT (y, 1) == UNSPEC_TOCREL
+ && ((GET_CODE (XEXP (x, 0)) == REG
+ && (REGNO (XEXP (x, 0)) == TOC_REGISTER
+ || TARGET_MINIMAL_TOC
+ || TARGET_CMODEL != CMODEL_SMALL))
+ || (TARGET_CMODEL != CMODEL_SMALL
+ && GET_CODE (XEXP (x, 0)) == CONST
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == REG
+ && REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0)) == TOC_REGISTER
+ && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == HIGH
+ && rtx_equal_p (XEXP (x, 1),
+ XEXP (XEXP (XEXP (XEXP (x, 0), 0), 1), 0)))))
{
y = XVECEXP (y, 0, 0);
+ if (offset != NULL_RTX)
+ y = gen_rtx_PLUS (Pmode, y, offset);
if (!MEM_P (orig_x))
return y;
else
if (TARGET_MACHO
&& GET_CODE (orig_x) == LO_SUM
- && GET_CODE (XEXP (x, 1)) == CONST)
+ && GET_CODE (XEXP (orig_x, 1)) == CONST)
{
- y = XEXP (XEXP (x, 1), 0);
+ y = XEXP (XEXP (orig_x, 1), 0);
if (GET_CODE (y) == UNSPEC
&& XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
return XVECEXP (y, 0, 0);
if (model == TLS_MODEL_GLOBAL_DYNAMIC)
{
- r3 = gen_rtx_REG (Pmode, 3);
tga = rs6000_tls_get_addr ();
- emit_library_call_value (tga, dest, LCT_CONST, Pmode, 1, r3, Pmode);
+ emit_library_call_value (tga, dest, LCT_CONST, Pmode,
+ 1, const0_rtx, Pmode);
+ r3 = gen_rtx_REG (Pmode, 3);
if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
}
else if (model == TLS_MODEL_LOCAL_DYNAMIC)
{
- r3 = gen_rtx_REG (Pmode, 3);
tga = rs6000_tls_get_addr ();
tmp1 = gen_reg_rtx (Pmode);
- emit_library_call_value (tga, tmp1, LCT_CONST, Pmode, 1, r3, Pmode);
+ emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
+ 1, const0_rtx, Pmode);
+ r3 = gen_rtx_REG (Pmode, 3);
if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
return for_each_rtx (&x, &rs6000_tls_symbol_ref_1, 0);
}
+/* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
+
+static bool
+rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
+{
+ return rs6000_tls_referenced_p (x);
+}
+
/* Return 1 if *X is a thread-local symbol. This is the same as
rs6000_tls_symbol_ref except for the type of the unused argument. */
{
bool reg_offset_p = reg_offset_addressing_ok_p (mode);
+ /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
+ DFmode/DImode MEM. */
+ if (reg_offset_p
+ && opnum == 1
+ && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
+ || (mode == DImode && recog_data.operand_mode[0] == V2DImode)))
+ reg_offset_p = false;
+
/* We must recognize output that we have already generated ourselves. */
if (GET_CODE (x) == PLUS
&& GET_CODE (XEXP (x, 0)) == PLUS
&& GET_CODE (XEXP (x, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
&& REGNO (XEXP (XEXP (x, 0), 0)) == TOC_REGISTER
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST
+ && GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 0)) == HIGH
&& GET_CODE (XEXP (x, 1)) == CONST
&& GET_CODE (XEXP (XEXP (x, 1), 0)) == UNSPEC
&& XINT (XEXP (XEXP (x, 1), 0), 1) == UNSPEC_TOCREL
- && rtx_equal_p (XEXP (XEXP (XEXP (x, 0), 1), 0), XEXP (x, 1)))
+ && rtx_equal_p (XEXP (XEXP (XEXP (XEXP (x, 0), 1), 0), 0), XEXP (x, 1)))
{
push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
return 1;
if (reg_offset_p && legitimate_small_data_p (mode, x))
return 1;
- if (reg_offset_p && legitimate_constant_pool_address_p (x, reg_ok_strict))
+ if (reg_offset_p
+ && legitimate_constant_pool_address_p (x, mode, reg_ok_strict))
return 1;
/* If not REG_OK_STRICT (before reload) let pass any stack offset. */
if (! reg_ok_strict
case LO_SUM:
/* Anything in the constant pool is sufficiently aligned that
all bytes have the same high part address. */
- return !legitimate_constant_pool_address_p (addr, false);
+ return !legitimate_constant_pool_address_p (addr, QImode, false);
/* Auto-increment cases are now treated generically in recog.c. */
case PRE_MODIFY:
}
/* Change register usage conditional on target flags. */
-void
+static void
rs6000_conditional_register_usage (void)
{
int i;
if (GET_CODE (operands[0]) == MEM
&& GET_CODE (XEXP (operands[0], 0)) != REG
- && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0), false))
+ && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
+ GET_MODE (operands[0]), false))
operands[0]
= replace_equiv_address (operands[0],
copy_addr_to_reg (XEXP (operands[0], 0)));
- if (GET_CODE (operands[1]) == MEM
- && GET_CODE (XEXP (operands[1], 0)) != REG
- && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0), false))
- operands[1]
- = replace_equiv_address (operands[1],
- copy_addr_to_reg (XEXP (operands[1], 0)));
-}
-
-/* Return true if memory accesses to DECL are known to never straddle
- a 32k boundary. */
-
-static bool
-offsettable_ok_by_alignment (tree decl)
-{
- unsigned HOST_WIDE_INT dsize, dalign;
-
- /* Presume any compiler generated symbol_ref is suitably aligned. */
- if (!decl)
- return true;
-
- if (TREE_CODE (decl) != VAR_DECL
- && TREE_CODE (decl) != PARM_DECL
- && TREE_CODE (decl) != RESULT_DECL
- && TREE_CODE (decl) != FIELD_DECL)
- return true;
-
- if (!DECL_SIZE_UNIT (decl))
- return false;
-
- if (!host_integerp (DECL_SIZE_UNIT (decl), 1))
- return false;
-
- dsize = tree_low_cst (DECL_SIZE_UNIT (decl), 1);
- if (dsize <= 1)
- return true;
- if (dsize > 32768)
- return false;
-
- dalign = DECL_ALIGN_UNIT (decl);
- return dalign >= dsize;
+ if (GET_CODE (operands[1]) == MEM
+ && GET_CODE (XEXP (operands[1], 0)) != REG
+ && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
+ GET_MODE (operands[1]), false))
+ operands[1]
+ = replace_equiv_address (operands[1],
+ copy_addr_to_reg (XEXP (operands[1], 0)));
}
/* Emit a move from SOURCE to DEST in mode MODE. */
|| (TARGET_CMODEL == CMODEL_MEDIUM
&& GET_CODE (operands[1]) == SYMBOL_REF
&& !CONSTANT_POOL_ADDRESS_P (operands[1])
- && SYMBOL_REF_LOCAL_P (operands[1])
- && offsettable_ok_by_alignment (SYMBOL_REF_DECL (operands[1]))))
+ && SYMBOL_REF_LOCAL_P (operands[1])))
{
rtx reg = NULL_RTX;
if (TARGET_CMODEL != CMODEL_SMALL)
}
else if (mode == Pmode
&& CONSTANT_P (operands[1])
+ && GET_CODE (operands[1]) != HIGH
+ && !(TARGET_CMODEL != CMODEL_SMALL
+ && GET_CODE (operands[1]) == CONST
+ && GET_CODE (XEXP (operands[1], 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == HIGH)
&& ((GET_CODE (operands[1]) != CONST_INT
&& ! easy_fp_constant (operands[1], mode))
|| (GET_CODE (operands[1]) == CONST_INT
> (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
|| (GET_CODE (operands[0]) == REG
&& FP_REGNO_P (REGNO (operands[0]))))
- && GET_CODE (operands[1]) != HIGH
- && ! legitimate_constant_pool_address_p (operands[1], false)
+ && ! legitimate_constant_pool_address_p (operands[1], mode,
+ false)
&& ! toc_relative_expr_p (operands[1])
&& (TARGET_CMODEL == CMODEL_SMALL
|| can_create_pseudo_p ()
/* Nonzero if we can use an AltiVec register to pass this arg. */
#define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
- ((ALTIVEC_VECTOR_MODE (MODE) || VSX_VECTOR_MODE (MODE)) \
+ (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
&& (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
&& TARGET_ALTIVEC_ABI \
&& (NAMED))
return false;
}
+#ifdef HAVE_AS_GNU_ATTRIBUTE
+/* Return TRUE if a call to function FNDECL may be one that
+ potentially affects the function calling ABI of the object file. */
+
+static bool
+call_ABI_of_interest (tree fndecl)
+{
+ if (cgraph_state == CGRAPH_STATE_EXPANSION)
+ {
+ struct cgraph_node *c_node;
+
+ /* Libcalls are always interesting. */
+ if (fndecl == NULL_TREE)
+ return true;
+
+ /* Any call to an external function is interesting. */
+ if (DECL_EXTERNAL (fndecl))
+ return true;
+
+ /* Interesting functions that we are emitting in this object file. */
+ c_node = cgraph_get_node (fndecl);
+ return !cgraph_only_called_directly_p (c_node);
+ }
+ return false;
+}
+#endif
+
/* Initialize a variable CUM of type CUMULATIVE_ARGS
for a call to a function whose data type is FNTYPE.
- For a library call, FNTYPE is 0.
+ For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
For incoming args we set the number of arguments in the prototype large
so we never return a PARALLEL. */
void
init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
rtx libname ATTRIBUTE_UNUSED, int incoming,
- int libcall, int n_named_args)
+ int libcall, int n_named_args,
+ tree fndecl ATTRIBUTE_UNUSED,
+ enum machine_mode return_mode ATTRIBUTE_UNUSED)
{
static CUMULATIVE_ARGS zero_cumulative;
cum->words = 0;
cum->fregno = FP_ARG_MIN_REG;
cum->vregno = ALTIVEC_ARG_MIN_REG;
- cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
+ cum->prototype = (fntype && prototype_p (fntype));
cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
? CALL_LIBCALL : CALL_NORMAL);
cum->sysv_gregno = GP_ARG_MIN_REG;
cum->prototype, cum->nargs_prototype);
}
+#ifdef HAVE_AS_GNU_ATTRIBUTE
+ if (DEFAULT_ABI == ABI_V4)
+ {
+ cum->escapes = call_ABI_of_interest (fndecl);
+ if (cum->escapes)
+ {
+ tree return_type;
+
+ if (fntype)
+ {
+ return_type = TREE_TYPE (fntype);
+ return_mode = TYPE_MODE (return_type);
+ }
+ else
+ return_type = lang_hooks.types.type_for_mode (return_mode, 0);
+
+ if (return_type != NULL)
+ {
+ if (TREE_CODE (return_type) == RECORD_TYPE
+ && TYPE_TRANSPARENT_AGGR (return_type))
+ {
+ return_type = TREE_TYPE (first_field (return_type));
+ return_mode = TYPE_MODE (return_type);
+ }
+ if (AGGREGATE_TYPE_P (return_type)
+ && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
+ <= 8))
+ rs6000_returns_struct = true;
+ }
+ if (SCALAR_FLOAT_MODE_P (return_mode))
+ rs6000_passes_float = true;
+ else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
+ || SPE_VECTOR_MODE (return_mode))
+ rs6000_passes_vector = true;
+ }
+ }
+#endif
+
if (fntype
&& !TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
existing library interfaces.
Doubleword align SPE vectors.
- Quadword align Altivec vectors.
+ Quadword align Altivec/VSX vectors.
Quadword align large synthetic vector types. */
static unsigned int
&& int_size_in_bytes (type) >= 8
&& int_size_in_bytes (type) < 16))
return 64;
- else if ((ALTIVEC_VECTOR_MODE (mode) || VSX_VECTOR_MODE (mode))
+ else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
|| (type && TREE_CODE (type) == VECTOR_TYPE
&& int_size_in_bytes (type) >= 16))
return 128;
rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
else if (USE_FP_FOR_ARG_P (cum, mode, ftype))
{
+ unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
- cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
+ cum->fregno += n_fpregs;
/* Single-precision floats present a special problem for
us, because they are smaller than an 8-byte GPR, and so
the structure-packing rules combined with the standard
}
}
else
- cum->words += (GET_MODE_SIZE (mode) + 7) >> 3;
+ cum->words += n_fpregs;
}
else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, 1))
{
rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
const_tree type, bool named, int depth)
{
-
/* Only tick off an argument if we're not recursing. */
if (depth == 0)
cum->nargs_prototype--;
+#ifdef HAVE_AS_GNU_ATTRIBUTE
+ if (DEFAULT_ABI == ABI_V4
+ && cum->escapes)
+ {
+ if (SCALAR_FLOAT_MODE_P (mode))
+ rs6000_passes_float = true;
+ else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
+ rs6000_passes_vector = true;
+ else if (SPE_VECTOR_MODE (mode)
+ && !cum->stdarg
+ && cum->sysv_gregno <= GP_ARG_MAX_REG)
+ rs6000_passes_vector = true;
+ }
+#endif
+
if (TARGET_ALTIVEC_ABI
- && (ALTIVEC_VECTOR_MODE (mode)
- || VSX_VECTOR_MODE (mode)
+ && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
|| (type && TREE_CODE (type) == VECTOR_TYPE
&& int_size_in_bytes (type) == 16)))
{
}
static void
-rs6000_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+rs6000_function_arg_advance (cumulative_args_t cum, enum machine_mode mode,
const_tree type, bool named)
{
- rs6000_function_arg_advance_1 (cum, mode, type, named, 0);
+ rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
+ 0);
}
static rtx
rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
else if (cum->named && USE_FP_FOR_ARG_P (cum, mode, ftype))
{
+ unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
#if 0
switch (mode)
{
}
#endif
rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
+ if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
+ {
+ gcc_assert (cum->fregno == FP_ARG_MAX_REG
+ && (mode == TFmode || mode == TDmode));
+ /* Long double or _Decimal128 split over regs and memory. */
+ mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
+ cum->use_stack=1;
+ }
rvec[(*k)++]
= gen_rtx_EXPR_LIST (VOIDmode,
gen_rtx_REG (mode, cum->fregno++),
for the chunks of memory that go in int regs. Note we start at
element 1; 0 is reserved for an indication of using memory, and
may or may not be filled in below. */
- rs6000_darwin64_record_arg_recurse (cum, type, 0, rvec, &k);
+ rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
/* If any part of the struct went on the stack put all of it there.
itself. */
static rtx
-rs6000_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+rs6000_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
const_tree type, bool named)
{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
enum rs6000_abi abi = DEFAULT_ABI;
/* Return a marker to indicate whether CR1 needs to set or clear the
: CALL_V4_CLEAR_FP_ARGS));
}
- return GEN_INT (cum->call_cookie);
+ return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
}
if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
{
- rtx rslt = rs6000_darwin64_record_arg (cum, type, named, false);
+ rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
if (rslt != NULL_RTX)
return rslt;
/* Else fall through to usual handling. */
else
return gen_rtx_REG (mode, cum->vregno);
else if (TARGET_ALTIVEC_ABI
- && (ALTIVEC_VECTOR_MODE (mode)
- || VSX_VECTOR_MODE (mode)
+ && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
|| (type && TREE_CODE (type) == VECTOR_TYPE
&& int_size_in_bytes (type) == 16)))
{
returns the number of bytes used by the first element of the PARALLEL. */
static int
-rs6000_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+rs6000_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
tree type, bool named)
{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
int ret = 0;
int align_words;
reference. */
static bool
-rs6000_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
+rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
enum machine_mode mode, const_tree type,
bool named ATTRIBUTE_UNUSED)
{
stack and set PRETEND_SIZE to the length of the registers pushed. */
static void
-setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
tree type, int *pretend_size ATTRIBUTE_UNUSED,
int no_rtl)
{
alias_set_type set;
/* Skip the last named argument. */
- next_cum = *cum;
+ next_cum = *get_cumulative_args (cum);
rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
if (DEFAULT_ABI == ABI_V4)
build_int_cst (NULL_TREE, n_fpr));
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+#ifdef HAVE_AS_GNU_ATTRIBUTE
+ if (call_ABI_of_interest (cfun->decl))
+ rs6000_passes_float = true;
+#endif
}
/* Find the overflow area. */
rtx op2 = expand_normal (arg2);
rtx pat, addr;
enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode smode = insn_data[icode].operand[1].mode;
enum machine_mode mode1 = Pmode;
enum machine_mode mode2 = Pmode;
|| arg2 == error_mark_node)
return const0_rtx;
- if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
- op0 = copy_to_mode_reg (tmode, op0);
+ if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
+ op0 = copy_to_mode_reg (smode, op0);
op2 = copy_to_mode_reg (mode2, op2);
switch (fcode)
{
case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
- icode = CODE_FOR_vector_load_v16qi;
+ icode = CODE_FOR_vector_altivec_load_v16qi;
break;
case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
- icode = CODE_FOR_vector_load_v8hi;
+ icode = CODE_FOR_vector_altivec_load_v8hi;
break;
case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
- icode = CODE_FOR_vector_load_v4si;
+ icode = CODE_FOR_vector_altivec_load_v4si;
break;
case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
- icode = CODE_FOR_vector_load_v4sf;
+ icode = CODE_FOR_vector_altivec_load_v4sf;
+ break;
+ case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
+ icode = CODE_FOR_vector_altivec_load_v2df;
+ break;
+ case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
+ icode = CODE_FOR_vector_altivec_load_v2di;
break;
default:
*expandedp = false;
switch (fcode)
{
case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
- icode = CODE_FOR_vector_store_v16qi;
+ icode = CODE_FOR_vector_altivec_store_v16qi;
break;
case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
- icode = CODE_FOR_vector_store_v8hi;
+ icode = CODE_FOR_vector_altivec_store_v8hi;
break;
case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
- icode = CODE_FOR_vector_store_v4si;
+ icode = CODE_FOR_vector_altivec_store_v4si;
break;
case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
- icode = CODE_FOR_vector_store_v4sf;
+ icode = CODE_FOR_vector_altivec_store_v4sf;
+ break;
+ case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
+ icode = CODE_FOR_vector_altivec_store_v2df;
+ break;
+ case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
+ icode = CODE_FOR_vector_altivec_store_v2di;
break;
default:
*expandedp = false;
switch (fcode)
{
case ALTIVEC_BUILTIN_STVX:
- return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx, exp);
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
case ALTIVEC_BUILTIN_STVEBX:
return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
case ALTIVEC_BUILTIN_STVEHX:
case ALTIVEC_BUILTIN_STVRXL:
return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
+ case VSX_BUILTIN_STXVD2X_V2DF:
+ return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
+ case VSX_BUILTIN_STXVD2X_V2DI:
+ return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
+ case VSX_BUILTIN_STXVW4X_V4SF:
+ return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
+ case VSX_BUILTIN_STXVW4X_V4SI:
+ return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
+ case VSX_BUILTIN_STXVW4X_V8HI:
+ return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
+ case VSX_BUILTIN_STXVW4X_V16QI:
+ return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
+
case ALTIVEC_BUILTIN_MFVSCR:
icode = CODE_FOR_altivec_mfvscr;
tmode = insn_data[icode].operand[0].mode;
return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl,
exp, target, false);
case ALTIVEC_BUILTIN_LVX:
- return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx,
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
exp, target, false);
case ALTIVEC_BUILTIN_LVLX:
return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
case ALTIVEC_BUILTIN_LVRXL:
return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
exp, target, true);
+ case VSX_BUILTIN_LXVD2X_V2DF:
+ return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
+ exp, target, false);
+ case VSX_BUILTIN_LXVD2X_V2DI:
+ return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
+ exp, target, false);
+ case VSX_BUILTIN_LXVW4X_V4SF:
+ return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
+ exp, target, false);
+ case VSX_BUILTIN_LXVW4X_V4SI:
+ return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
+ exp, target, false);
+ case VSX_BUILTIN_LXVW4X_V8HI:
+ return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
+ exp, target, false);
+ case VSX_BUILTIN_LXVW4X_V16QI:
+ return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
+ exp, target, false);
+ break;
default:
break;
/* Fall through. */
long_integer_type_internal_node = long_integer_type_node;
long_unsigned_type_internal_node = long_unsigned_type_node;
+ long_long_integer_type_internal_node = long_long_integer_type_node;
+ long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
intQI_type_internal_node = intQI_type_node;
uintQI_type_internal_node = unsigned_intQI_type_node;
intHI_type_internal_node = intHI_type_node;
intDI_type_internal_node = intDI_type_node;
uintDI_type_internal_node = unsigned_intDI_type_node;
float_type_internal_node = float_type_node;
- double_type_internal_node = float_type_node;
+ double_type_internal_node = double_type_node;
void_type_internal_node = void_type_node;
/* Initialize the modes for builtin_function_type, mapping a machine mode to
static void
spe_init_builtins (void)
{
- tree endlink = void_list_node;
tree puint_type_node = build_pointer_type (unsigned_type_node);
tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
struct builtin_description *d;
size_t i;
tree v2si_ftype_4_v2si
- = build_function_type
- (opaque_V2SI_type_node,
- tree_cons (NULL_TREE, opaque_V2SI_type_node,
- tree_cons (NULL_TREE, opaque_V2SI_type_node,
- tree_cons (NULL_TREE, opaque_V2SI_type_node,
- tree_cons (NULL_TREE, opaque_V2SI_type_node,
- endlink)))));
+ = build_function_type_list (opaque_V2SI_type_node,
+ opaque_V2SI_type_node,
+ opaque_V2SI_type_node,
+ opaque_V2SI_type_node,
+ opaque_V2SI_type_node,
+ NULL_TREE);
tree v2sf_ftype_4_v2sf
- = build_function_type
- (opaque_V2SF_type_node,
- tree_cons (NULL_TREE, opaque_V2SF_type_node,
- tree_cons (NULL_TREE, opaque_V2SF_type_node,
- tree_cons (NULL_TREE, opaque_V2SF_type_node,
- tree_cons (NULL_TREE, opaque_V2SF_type_node,
- endlink)))));
+ = build_function_type_list (opaque_V2SF_type_node,
+ opaque_V2SF_type_node,
+ opaque_V2SF_type_node,
+ opaque_V2SF_type_node,
+ opaque_V2SF_type_node,
+ NULL_TREE);
tree int_ftype_int_v2si_v2si
- = build_function_type
- (integer_type_node,
- tree_cons (NULL_TREE, integer_type_node,
- tree_cons (NULL_TREE, opaque_V2SI_type_node,
- tree_cons (NULL_TREE, opaque_V2SI_type_node,
- endlink))));
+ = build_function_type_list (integer_type_node,
+ integer_type_node,
+ opaque_V2SI_type_node,
+ opaque_V2SI_type_node,
+ NULL_TREE);
tree int_ftype_int_v2sf_v2sf
- = build_function_type
- (integer_type_node,
- tree_cons (NULL_TREE, integer_type_node,
- tree_cons (NULL_TREE, opaque_V2SF_type_node,
- tree_cons (NULL_TREE, opaque_V2SF_type_node,
- endlink))));
+ = build_function_type_list (integer_type_node,
+ integer_type_node,
+ opaque_V2SF_type_node,
+ opaque_V2SF_type_node,
+ NULL_TREE);
tree void_ftype_v2si_puint_int
- = build_function_type (void_type_node,
- tree_cons (NULL_TREE, opaque_V2SI_type_node,
- tree_cons (NULL_TREE, puint_type_node,
- tree_cons (NULL_TREE,
- integer_type_node,
- endlink))));
+ = build_function_type_list (void_type_node,
+ opaque_V2SI_type_node,
+ puint_type_node,
+ integer_type_node,
+ NULL_TREE);
tree void_ftype_v2si_puint_char
- = build_function_type (void_type_node,
- tree_cons (NULL_TREE, opaque_V2SI_type_node,
- tree_cons (NULL_TREE, puint_type_node,
- tree_cons (NULL_TREE,
- char_type_node,
- endlink))));
+ = build_function_type_list (void_type_node,
+ opaque_V2SI_type_node,
+ puint_type_node,
+ char_type_node,
+ NULL_TREE);
tree void_ftype_v2si_pv2si_int
- = build_function_type (void_type_node,
- tree_cons (NULL_TREE, opaque_V2SI_type_node,
- tree_cons (NULL_TREE, opaque_p_V2SI_type_node,
- tree_cons (NULL_TREE,
- integer_type_node,
- endlink))));
+ = build_function_type_list (void_type_node,
+ opaque_V2SI_type_node,
+ opaque_p_V2SI_type_node,
+ integer_type_node,
+ NULL_TREE);
tree void_ftype_v2si_pv2si_char
- = build_function_type (void_type_node,
- tree_cons (NULL_TREE, opaque_V2SI_type_node,
- tree_cons (NULL_TREE, opaque_p_V2SI_type_node,
- tree_cons (NULL_TREE,
- char_type_node,
- endlink))));
+ = build_function_type_list (void_type_node,
+ opaque_V2SI_type_node,
+ opaque_p_V2SI_type_node,
+ char_type_node,
+ NULL_TREE);
tree void_ftype_int
- = build_function_type (void_type_node,
- tree_cons (NULL_TREE, integer_type_node, endlink));
+ = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
tree int_ftype_void
- = build_function_type (integer_type_node, endlink);
+ = build_function_type_list (integer_type_node, NULL_TREE);
tree v2si_ftype_pv2si_int
- = build_function_type (opaque_V2SI_type_node,
- tree_cons (NULL_TREE, opaque_p_V2SI_type_node,
- tree_cons (NULL_TREE, integer_type_node,
- endlink)));
+ = build_function_type_list (opaque_V2SI_type_node,
+ opaque_p_V2SI_type_node,
+ integer_type_node,
+ NULL_TREE);
tree v2si_ftype_puint_int
- = build_function_type (opaque_V2SI_type_node,
- tree_cons (NULL_TREE, puint_type_node,
- tree_cons (NULL_TREE, integer_type_node,
- endlink)));
+ = build_function_type_list (opaque_V2SI_type_node,
+ puint_type_node,
+ integer_type_node,
+ NULL_TREE);
tree v2si_ftype_pushort_int
- = build_function_type (opaque_V2SI_type_node,
- tree_cons (NULL_TREE, pushort_type_node,
- tree_cons (NULL_TREE, integer_type_node,
- endlink)));
+ = build_function_type_list (opaque_V2SI_type_node,
+ pushort_type_node,
+ integer_type_node,
+ NULL_TREE);
tree v2si_ftype_signed_char
- = build_function_type (opaque_V2SI_type_node,
- tree_cons (NULL_TREE, signed_char_type_node,
- endlink));
+ = build_function_type_list (opaque_V2SI_type_node,
+ signed_char_type_node,
+ NULL_TREE);
/* The initialization of the simple binary and unary builtins is
done in rs6000_common_init_builtins, but we have to enable the
{
const struct builtin_description *d;
size_t i;
- tree endlink = void_list_node;
tree int_ftype_int_v2sf_v2sf
- = build_function_type
- (integer_type_node,
- tree_cons (NULL_TREE, integer_type_node,
- tree_cons (NULL_TREE, V2SF_type_node,
- tree_cons (NULL_TREE, V2SF_type_node,
- endlink))));
+ = build_function_type_list (integer_type_node,
+ integer_type_node,
+ V2SF_type_node,
+ V2SF_type_node,
+ NULL_TREE);
tree pcfloat_type_node =
build_pointer_type (build_qualified_type
(float_type_node, TYPE_QUAL_CONST));
size_t i;
tree ftype;
- tree pfloat_type_node = build_pointer_type (float_type_node);
- tree pint_type_node = build_pointer_type (integer_type_node);
- tree pshort_type_node = build_pointer_type (short_integer_type_node);
- tree pchar_type_node = build_pointer_type (char_type_node);
-
tree pvoid_type_node = build_pointer_type (void_type_node);
- tree pcfloat_type_node = build_pointer_type (build_qualified_type (float_type_node, TYPE_QUAL_CONST));
- tree pcint_type_node = build_pointer_type (build_qualified_type (integer_type_node, TYPE_QUAL_CONST));
- tree pcshort_type_node = build_pointer_type (build_qualified_type (short_integer_type_node, TYPE_QUAL_CONST));
- tree pcchar_type_node = build_pointer_type (build_qualified_type (char_type_node, TYPE_QUAL_CONST));
-
- tree pcvoid_type_node = build_pointer_type (build_qualified_type (void_type_node, TYPE_QUAL_CONST));
+ tree pcvoid_type_node
+ = build_pointer_type (build_qualified_type (void_type_node,
+ TYPE_QUAL_CONST));
tree int_ftype_opaque
= build_function_type_list (integer_type_node,
opaque_V4SI_type_node, NULL_TREE);
tree opaque_ftype_opaque
- = build_function_type (integer_type_node,
- NULL_TREE);
+ = build_function_type_list (integer_type_node, NULL_TREE);
tree opaque_ftype_opaque_int
= build_function_type_list (opaque_V4SI_type_node,
opaque_V4SI_type_node, integer_type_node, NULL_TREE);
= build_function_type_list (integer_type_node,
integer_type_node, V4SI_type_node,
V4SI_type_node, NULL_TREE);
- tree v4sf_ftype_pcfloat
- = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
- tree void_ftype_pfloat_v4sf
- = build_function_type_list (void_type_node,
- pfloat_type_node, V4SF_type_node, NULL_TREE);
- tree v4si_ftype_pcint
- = build_function_type_list (V4SI_type_node, pcint_type_node, NULL_TREE);
- tree void_ftype_pint_v4si
- = build_function_type_list (void_type_node,
- pint_type_node, V4SI_type_node, NULL_TREE);
- tree v8hi_ftype_pcshort
- = build_function_type_list (V8HI_type_node, pcshort_type_node, NULL_TREE);
- tree void_ftype_pshort_v8hi
- = build_function_type_list (void_type_node,
- pshort_type_node, V8HI_type_node, NULL_TREE);
- tree v16qi_ftype_pcchar
- = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
- tree void_ftype_pchar_v16qi
- = build_function_type_list (void_type_node,
- pchar_type_node, V16QI_type_node, NULL_TREE);
tree void_ftype_v4si
= build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
tree v8hi_ftype_void
- = build_function_type (V8HI_type_node, void_list_node);
+ = build_function_type_list (V8HI_type_node, NULL_TREE);
tree void_ftype_void
- = build_function_type (void_type_node, void_list_node);
+ = build_function_type_list (void_type_node, NULL_TREE);
tree void_ftype_int
= build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
tree opaque_ftype_long_pcvoid
= build_function_type_list (opaque_V4SI_type_node,
- long_integer_type_node, pcvoid_type_node, NULL_TREE);
+ long_integer_type_node, pcvoid_type_node,
+ NULL_TREE);
tree v16qi_ftype_long_pcvoid
= build_function_type_list (V16QI_type_node,
- long_integer_type_node, pcvoid_type_node, NULL_TREE);
+ long_integer_type_node, pcvoid_type_node,
+ NULL_TREE);
tree v8hi_ftype_long_pcvoid
= build_function_type_list (V8HI_type_node,
- long_integer_type_node, pcvoid_type_node, NULL_TREE);
+ long_integer_type_node, pcvoid_type_node,
+ NULL_TREE);
tree v4si_ftype_long_pcvoid
= build_function_type_list (V4SI_type_node,
- long_integer_type_node, pcvoid_type_node, NULL_TREE);
+ long_integer_type_node, pcvoid_type_node,
+ NULL_TREE);
+ tree v4sf_ftype_long_pcvoid
+ = build_function_type_list (V4SF_type_node,
+ long_integer_type_node, pcvoid_type_node,
+ NULL_TREE);
+ tree v2df_ftype_long_pcvoid
+ = build_function_type_list (V2DF_type_node,
+ long_integer_type_node, pcvoid_type_node,
+ NULL_TREE);
+ tree v2di_ftype_long_pcvoid
+ = build_function_type_list (V2DI_type_node,
+ long_integer_type_node, pcvoid_type_node,
+ NULL_TREE);
tree void_ftype_opaque_long_pvoid
= build_function_type_list (void_type_node,
= build_function_type_list (void_type_node,
V8HI_type_node, long_integer_type_node,
pvoid_type_node, NULL_TREE);
+ tree void_ftype_v4sf_long_pvoid
+ = build_function_type_list (void_type_node,
+ V4SF_type_node, long_integer_type_node,
+ pvoid_type_node, NULL_TREE);
+ tree void_ftype_v2df_long_pvoid
+ = build_function_type_list (void_type_node,
+ V2DF_type_node, long_integer_type_node,
+ pvoid_type_node, NULL_TREE);
+ tree void_ftype_v2di_long_pvoid
+ = build_function_type_list (void_type_node,
+ V2DI_type_node, long_integer_type_node,
+ pvoid_type_node, NULL_TREE);
tree int_ftype_int_v8hi_v8hi
= build_function_type_list (integer_type_node,
integer_type_node, V8HI_type_node,
pcvoid_type_node, integer_type_node,
integer_type_node, NULL_TREE);
- def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_4sf", v4sf_ftype_pcfloat,
- ALTIVEC_BUILTIN_LD_INTERNAL_4sf);
- def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_4sf", void_ftype_pfloat_v4sf,
- ALTIVEC_BUILTIN_ST_INTERNAL_4sf);
- def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_4si", v4si_ftype_pcint,
- ALTIVEC_BUILTIN_LD_INTERNAL_4si);
- def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_4si", void_ftype_pint_v4si,
- ALTIVEC_BUILTIN_ST_INTERNAL_4si);
- def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_8hi", v8hi_ftype_pcshort,
- ALTIVEC_BUILTIN_LD_INTERNAL_8hi);
- def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_8hi", void_ftype_pshort_v8hi,
- ALTIVEC_BUILTIN_ST_INTERNAL_8hi);
- def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_16qi", v16qi_ftype_pcchar,
- ALTIVEC_BUILTIN_LD_INTERNAL_16qi);
- def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_16qi", void_ftype_pchar_v16qi,
- ALTIVEC_BUILTIN_ST_INTERNAL_16qi);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
def_builtin (MASK_ALTIVEC, "__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
def_builtin (MASK_ALTIVEC, "__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
+ def_builtin (MASK_VSX, "__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
+ VSX_BUILTIN_LXVD2X_V2DF);
+ def_builtin (MASK_VSX, "__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
+ VSX_BUILTIN_LXVD2X_V2DI);
+ def_builtin (MASK_VSX, "__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
+ VSX_BUILTIN_LXVW4X_V4SF);
+ def_builtin (MASK_VSX, "__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
+ VSX_BUILTIN_LXVW4X_V4SI);
+ def_builtin (MASK_VSX, "__builtin_vsx_lxvw4x_v8hi",
+ v8hi_ftype_long_pcvoid, VSX_BUILTIN_LXVW4X_V8HI);
+ def_builtin (MASK_VSX, "__builtin_vsx_lxvw4x_v16qi",
+ v16qi_ftype_long_pcvoid, VSX_BUILTIN_LXVW4X_V16QI);
+ def_builtin (MASK_VSX, "__builtin_vsx_stxvd2x_v2df",
+ void_ftype_v2df_long_pvoid, VSX_BUILTIN_STXVD2X_V2DF);
+ def_builtin (MASK_VSX, "__builtin_vsx_stxvd2x_v2di",
+ void_ftype_v2di_long_pvoid, VSX_BUILTIN_STXVD2X_V2DI);
+ def_builtin (MASK_VSX, "__builtin_vsx_stxvw4x_v4sf",
+ void_ftype_v4sf_long_pvoid, VSX_BUILTIN_STXVW4X_V4SF);
+ def_builtin (MASK_VSX, "__builtin_vsx_stxvw4x_v4si",
+ void_ftype_v4si_long_pvoid, VSX_BUILTIN_STXVW4X_V4SI);
+ def_builtin (MASK_VSX, "__builtin_vsx_stxvw4x_v8hi",
+ void_ftype_v8hi_long_pvoid, VSX_BUILTIN_STXVW4X_V8HI);
+ def_builtin (MASK_VSX, "__builtin_vsx_stxvw4x_v16qi",
+ void_ftype_v16qi_long_pvoid, VSX_BUILTIN_STXVW4X_V16QI);
+ def_builtin (MASK_VSX, "__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
+ VSX_BUILTIN_VEC_LD);
+ def_builtin (MASK_VSX, "__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
+ VSX_BUILTIN_VEC_ST);
+
if (rs6000_cpu == PROCESSOR_CELL)
{
def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
int i;
tree ret_type = NULL_TREE;
tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
- tree args;
/* Create builtin_hash_table. */
if (builtin_hash_table == NULL)
fatal_error ("internal error: builtin function %s had an unexpected "
"return type %s", name, GET_MODE_NAME (h.mode[0]));
+ for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
+ arg_type[i] = NULL_TREE;
+
for (i = 0; i < num_args; i++)
{
int m = (int) h.mode[i+1];
h2 = ggc_alloc_builtin_hash_struct ();
*h2 = h;
*found = (void *)h2;
- args = void_list_node;
- for (i = num_args - 1; i >= 0; i--)
- args = tree_cons (NULL_TREE, arg_type[i], args);
-
- h2->type = build_function_type (ret_type, args);
+ h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
+ arg_type[2], NULL_TREE);
}
return ((struct builtin_hash_struct *)(*found))->type;
needed for the immediate register.
For VSX and Altivec, we may need a register to convert sp+offset into
- reg+sp. */
+ reg+sp.
+
+ For misaligned 64-bit gpr loads and stores we need a register to
+ convert an offset address to indirect. */
static reg_class_t
rs6000_secondary_reload (bool in_p,
else
default_p = true;
}
+ else if (TARGET_POWERPC64
+ && rs6000_reload_register_type (rclass) == GPR_REGISTER_TYPE
+ && MEM_P (x)
+ && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
+ {
+ rtx addr = XEXP (x, 0);
+
+ if (GET_CODE (addr) == PRE_MODIFY)
+ addr = XEXP (addr, 1);
+ else if (GET_CODE (addr) == LO_SUM
+ && GET_CODE (XEXP (addr, 0)) == REG
+ && GET_CODE (XEXP (addr, 1)) == CONST)
+ addr = XEXP (XEXP (addr, 1), 0);
+
+ if (GET_CODE (addr) == PLUS
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT
+ && (INTVAL (XEXP (addr, 1)) & 3) != 0)
+ {
+ if (in_p)
+ sri->icode = CODE_FOR_reload_di_load;
+ else
+ sri->icode = CODE_FOR_reload_di_store;
+ sri->extra_cost = 2;
+ ret = NO_REGS;
+ }
+ else
+ default_p = true;
+ }
else
default_p = true;
return;
}
-/* Target hook to return the cover classes for Integrated Register Allocator.
- Cover classes is a set of non-intersected register classes covering all hard
- registers used for register allocation purpose. Any move between two
- registers of a cover class should be cheaper than load or store of the
- registers. The value is array of register classes with LIM_REG_CLASSES used
- as the end marker.
+/* Convert reloads involving 64-bit gprs and misaligned offset
+ addressing to use indirect addressing. */
- We need two IRA_COVER_CLASSES, one for pre-VSX, and the other for VSX to
- account for the Altivec and Floating registers being subsets of the VSX
- register set under VSX, but distinct register sets on pre-VSX machines. */
-
-static const reg_class_t *
-rs6000_ira_cover_classes (void)
+void
+rs6000_secondary_reload_ppc64 (rtx reg, rtx mem, rtx scratch, bool store_p)
{
- static const reg_class_t cover_pre_vsx[] = IRA_COVER_CLASSES_PRE_VSX;
- static const reg_class_t cover_vsx[] = IRA_COVER_CLASSES_VSX;
+ int regno = true_regnum (reg);
+ enum reg_class rclass;
+ rtx addr;
+ rtx scratch_or_premodify = scratch;
+
+ if (TARGET_DEBUG_ADDR)
+ {
+ fprintf (stderr, "\nrs6000_secondary_reload_ppc64, type = %s\n",
+ store_p ? "store" : "load");
+ fprintf (stderr, "reg:\n");
+ debug_rtx (reg);
+ fprintf (stderr, "mem:\n");
+ debug_rtx (mem);
+ fprintf (stderr, "scratch:\n");
+ debug_rtx (scratch);
+ }
+
+ gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
+ gcc_assert (GET_CODE (mem) == MEM);
+ rclass = REGNO_REG_CLASS (regno);
+ gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
+ addr = XEXP (mem, 0);
+
+ if (GET_CODE (addr) == PRE_MODIFY)
+ {
+ scratch_or_premodify = XEXP (addr, 0);
+ gcc_assert (REG_P (scratch_or_premodify));
+ addr = XEXP (addr, 1);
+ }
+ gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
+
+ rs6000_emit_move (scratch_or_premodify, addr, Pmode);
- return (TARGET_VSX) ? cover_vsx : cover_pre_vsx;
+ mem = replace_equiv_address_nv (mem, scratch_or_premodify);
+
+ /* Now create the move. */
+ if (store_p)
+ emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
+
+ return;
}
/* Allocate a 64-bit stack slot to be used for copying SDmode
output_address (XEXP (x, 0));
}
else
- output_addr_const (file, x);
+ {
+ if (toc_relative_expr_p (x))
+ /* This hack along with a corresponding hack in
+ rs6000_output_addr_const_extra arranges to output addends
+ where the assembler expects to find them. eg.
+ (const (plus (unspec [symbol_ref ("x") tocrel]) 4))
+ without this hack would be output as "x@toc+4". We
+ want "x+4@toc". */
+ output_addr_const (file, tocrel_base);
+ else
+ output_addr_const (file, x);
+ }
return;
case '&':
fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
}
#endif
- else if (legitimate_constant_pool_address_p (x, true))
+ else if (legitimate_constant_pool_address_p (x, QImode, true))
{
/* This hack along with a corresponding hack in
rs6000_output_addr_const_extra arranges to output addends
return default_assemble_integer (x, size, aligned_p);
}
-#ifdef HAVE_GAS_HIDDEN
+#if defined (HAVE_GAS_HIDDEN) && !defined (TARGET_MACHO)
/* Emit an assembler directive to set symbol visibility for DECL to
VISIBILITY_TYPE. */
static rs6000_stack_t *
rs6000_stack_info (void)
{
-#ifdef ENABLE_CHECKING
- static rs6000_stack_t info_save;
-#endif
rs6000_stack_t *info_ptr = &stack_info;
int reg_size = TARGET_32BIT ? 4 : 8;
int ehrd_size;
HOST_WIDE_INT non_fixed_size;
bool using_static_chain_p;
-#ifdef ENABLE_CHECKING
- memcpy (&info_save, &stack_info, sizeof stack_info);
-#else
if (reload_completed && info_ptr->reload_completed)
return info_ptr;
-#endif
- memset (&stack_info, 0, sizeof (stack_info));
+ memset (info_ptr, 0, sizeof (*info_ptr));
info_ptr->reload_completed = reload_completed;
if (TARGET_SPE)
if (! info_ptr->cr_save_p)
info_ptr->cr_save_offset = 0;
-#ifdef ENABLE_CHECKING
- gcc_assert (!(reload_completed && info_save.reload_completed)
- || memcmp (&info_save, &stack_info, sizeof stack_info) == 0);
-#endif
return info_ptr;
}
return get_hard_reg_initial_val (Pmode, LR_REGNO);
}
-/* Say whether a function is a candidate for sibcall handling or not.
- We do not allow indirect calls to be optimized into sibling calls.
- Also, we can't do it if there are any vector parameters; there's
- nowhere to put the VRsave code so it works; note that functions with
- vector parameters are required to have a prototype, so the argument
- type info must be available here. (The tail recursion case can work
- with vector parameters, but there's no way to distinguish here.) */
+/* Say whether a function is a candidate for sibcall handling or not. */
+
static bool
-rs6000_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
+rs6000_function_ok_for_sibcall (tree decl, tree exp)
{
- tree type;
+ tree fntype;
+
if (decl)
+ fntype = TREE_TYPE (decl);
+ else
+ fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
+
+ /* We can't do it if the called function has more vector parameters
+ than the current function; there's nowhere to put the VRsave code. */
+ if (TARGET_ALTIVEC_ABI
+ && TARGET_ALTIVEC_VRSAVE
+ && !(decl && decl == current_function_decl))
{
- if (TARGET_ALTIVEC_VRSAVE)
- {
- for (type = TYPE_ARG_TYPES (TREE_TYPE (decl));
- type; type = TREE_CHAIN (type))
- {
- if (TREE_CODE (TREE_VALUE (type)) == VECTOR_TYPE)
- return false;
- }
- }
- if (DEFAULT_ABI == ABI_DARWIN
- || ((*targetm.binds_local_p) (decl)
- && (DEFAULT_ABI != ABI_AIX || !DECL_EXTERNAL (decl))))
- {
- tree attr_list = TYPE_ATTRIBUTES (TREE_TYPE (decl));
+ function_args_iterator args_iter;
+ tree type;
+ int nvreg = 0;
+
+ /* Functions with vector parameters are required to have a
+ prototype, so the argument type info must be available
+ here. */
+ FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
+ if (TREE_CODE (type) == VECTOR_TYPE
+ && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
+ nvreg++;
+
+ FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
+ if (TREE_CODE (type) == VECTOR_TYPE
+ && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
+ nvreg--;
+
+ if (nvreg > 0)
+ return false;
+ }
- if (!lookup_attribute ("longcall", attr_list)
- || lookup_attribute ("shortcall", attr_list))
- return true;
- }
+ /* Under the AIX ABI we can't allow calls to non-local functions,
+ because the callee may have a different TOC pointer to the
+ caller and there's no way to ensure we restore the TOC when we
+ return. With the secure-plt SYSV ABI we can't make non-local
+ calls when -fpic/PIC because the plt call stubs use r30. */
+ if (DEFAULT_ABI == ABI_DARWIN
+ || (DEFAULT_ABI == ABI_AIX
+ && decl
+ && !DECL_EXTERNAL (decl)
+ && (*targetm.binds_local_p) (decl))
+ || (DEFAULT_ABI == ABI_V4
+ && (!TARGET_SECURE_PLT
+ || !flag_pic
+ || (decl
+ && (*targetm.binds_local_p) (decl)))))
+ {
+ tree attr_list = TYPE_ATTRIBUTES (fntype);
+
+ if (!lookup_attribute ("longcall", attr_list)
+ || lookup_attribute ("shortcall", attr_list))
+ return true;
}
+
return false;
}
tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
if (TARGET_CMODEL != CMODEL_SMALL)
{
- rtx hi = gen_rtx_PLUS (Pmode, tocreg, gen_rtx_HIGH (Pmode, tocrel));
+ rtx hi = gen_rtx_CONST (Pmode,
+ gen_rtx_PLUS (Pmode, tocreg,
+ gen_rtx_HIGH (Pmode, tocrel)));
if (largetoc_reg != NULL)
{
emit_move_insn (largetoc_reg, hi);
/* Some cases that need register indexed addressing. */
if ((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
- || (TARGET_VSX && VSX_VECTOR_MODE (mode))
+ || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
|| (TARGET_E500_DOUBLE && mode == DFmode)
|| (TARGET_SPE_ABI
&& SPE_VECTOR_MODE (mode)
p = rtvec_alloc ((lr ? 4 : 3) + n_regs);
if (!savep && lr)
- RTVEC_ELT (p, offset++) = gen_rtx_RETURN (VOIDmode);
+ RTVEC_ELT (p, offset++) = ret_rtx;
RTVEC_ELT (p, offset++)
= gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 65));
return (((crtl->calls_eh_return || df_regs_ever_live_p (reg))
&& (!call_used_regs[reg]
|| (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
+ && !TARGET_SINGLE_PIC_BASE
&& TARGET_TOC && TARGET_MINIMAL_TOC)))
|| (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
+ && !TARGET_SINGLE_PIC_BASE
&& ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
|| (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
}
&& call_used_regs[STATIC_CHAIN_REGNUM]);
HOST_WIDE_INT sp_offset = 0;
- if (flag_stack_usage)
+ if (flag_stack_usage_info)
current_function_static_stack_size = info->total_size;
if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && info->total_size)
insn = emit_insn (generate_set_vrsave (reg, info, 0));
}
+ if (TARGET_SINGLE_PIC_BASE)
+ return; /* Do not set PIC register */
+
/* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
if ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
|| (DEFAULT_ABI == ABI_V4
alloc_rname = ggc_strdup (rname);
j = 0;
- RTVEC_ELT (p, j++) = gen_rtx_RETURN (VOIDmode);
+ RTVEC_ELT (p, j++) = ret_rtx;
RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
gen_rtx_REG (Pmode,
LR_REGNO));
else
p = rtvec_alloc (2);
- RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
+ RTVEC_ELT (p, 0) = ret_rtx;
RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
? gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 65))
: gen_rtx_CLOBBER (VOIDmode,
use language_string.
C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
Java is 13. Objective-C is 14. Objective-C++ isn't assigned
- a number, so for now use 9. LTO isn't assigned a number either,
- so for now use 0. */
+ a number, so for now use 9. LTO and Go aren't assigned numbers
+ either, so for now use 0. */
if (! strcmp (language_string, "GNU C")
- || ! strcmp (language_string, "GNU GIMPLE"))
+ || ! strcmp (language_string, "GNU GIMPLE")
+ || ! strcmp (language_string, "GNU Go"))
i = 0;
else if (! strcmp (language_string, "GNU F77")
|| ! strcmp (language_string, "GNU Fortran"))
gen_rtx_USE (VOIDmode,
gen_rtx_REG (SImode,
LR_REGNO)),
- gen_rtx_RETURN (VOIDmode))));
+ ret_rtx)));
SIBLING_CALL_P (insn) = 1;
emit_barrier ();
rtx fun;
ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
- label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
+ label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
emit_library_call (init_one_libfunc (RS6000_MCOUNT),
darwin_file_start ();
/* Determine the argument to -mcpu=. Default to G3 if not specified. */
- for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
- if (rs6000_select[i].set_arch_p && rs6000_select[i].string
- && rs6000_select[i].string[0] != '\0')
- cpu_id = rs6000_select[i].string;
+
+ if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
+ cpu_id = rs6000_default_cpu;
+
+ if (global_options_set.x_rs6000_cpu_index)
+ cpu_id = processor_target_table[rs6000_cpu_index].name;
/* Look through the mapping array. Pick the first name that either
matches the argument, has a bit set in IF_SET that is also set
}
static void
-rs6000_elf_end_indicate_exec_stack (void)
+rs6000_elf_file_end (void)
{
+#ifdef HAVE_AS_GNU_ATTRIBUTE
+ if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
+ {
+ if (rs6000_passes_float)
+ fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
+ ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
+ : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
+ : 2));
+ if (rs6000_passes_vector)
+ fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
+ (TARGET_ALTIVEC_ABI ? 2
+ : TARGET_SPE_ABI ? 3
+ : 1));
+ if (rs6000_returns_struct)
+ fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
+ aix_struct_return ? 2 : 1);
+ }
+#endif
+#ifdef POWERPC_LINUX
if (TARGET_32BIT)
file_end_indicate_exec_stack ();
+#endif
}
#endif
return true;
case PLUS:
- if (mode == DFmode)
- {
- if (GET_CODE (XEXP (x, 0)) == MULT)
- {
- /* FNMA accounted in outer NEG. */
- if (outer_code == NEG)
- *total = rs6000_cost->dmul - rs6000_cost->fp;
- else
- *total = rs6000_cost->dmul;
- }
- else
- *total = rs6000_cost->fp;
- }
- else if (mode == SFmode)
- {
- /* FNMA accounted in outer NEG. */
- if (outer_code == NEG && GET_CODE (XEXP (x, 0)) == MULT)
- *total = 0;
- else
- *total = rs6000_cost->fp;
- }
- else
- *total = COSTS_N_INSNS (1);
- return false;
-
case MINUS:
- if (mode == DFmode)
- {
- if (GET_CODE (XEXP (x, 0)) == MULT
- || GET_CODE (XEXP (x, 1)) == MULT)
- {
- /* FNMA accounted in outer NEG. */
- if (outer_code == NEG)
- *total = rs6000_cost->dmul - rs6000_cost->fp;
- else
- *total = rs6000_cost->dmul;
- }
- else
- *total = rs6000_cost->fp;
- }
- else if (mode == SFmode)
- {
- /* FNMA accounted in outer NEG. */
- if (outer_code == NEG && GET_CODE (XEXP (x, 0)) == MULT)
- *total = 0;
- else
- *total = rs6000_cost->fp;
- }
+ if (FLOAT_MODE_P (mode))
+ *total = rs6000_cost->fp;
else
*total = COSTS_N_INSNS (1);
return false;
else
*total = rs6000_cost->mulsi_const;
}
- /* FMA accounted in outer PLUS/MINUS. */
- else if ((mode == DFmode || mode == SFmode)
- && (outer_code == PLUS || outer_code == MINUS))
- *total = 0;
- else if (mode == DFmode)
- *total = rs6000_cost->dmul;
else if (mode == SFmode)
*total = rs6000_cost->fp;
+ else if (FLOAT_MODE_P (mode))
+ *total = rs6000_cost->dmul;
else if (mode == DImode)
*total = rs6000_cost->muldi;
else
*total = rs6000_cost->mulsi;
return false;
+ case FMA:
+ if (mode == SFmode)
+ *total = rs6000_cost->fp;
+ else
+ *total = rs6000_cost->dmul;
+ break;
+
case DIV:
case MOD:
if (FLOAT_MODE_P (mode))
{
int ret;
+ if (TARGET_DEBUG_COST)
+ dbg_cost_ctrl++;
+
/* Moves from/to GENERAL_REGS. */
if (reg_classes_intersect_p (to, GENERAL_REGS)
|| reg_classes_intersect_p (from, GENERAL_REGS))
{
+ reg_class_t rclass = from;
+
if (! reg_classes_intersect_p (to, GENERAL_REGS))
- from = to;
+ rclass = to;
- if (from == FLOAT_REGS || from == ALTIVEC_REGS || from == VSX_REGS)
- ret = (rs6000_memory_move_cost (mode, from, false)
+ if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
+ ret = (rs6000_memory_move_cost (mode, rclass, false)
+ rs6000_memory_move_cost (mode, GENERAL_REGS, false));
/* It's more expensive to move CR_REGS than CR0_REGS because of the
shift. */
- else if (from == CR_REGS)
+ else if (rclass == CR_REGS)
ret = 4;
- /* Power6 has slower LR/CTR moves so make them more expensive than
- memory in order to bias spills to memory .*/
- else if (rs6000_cpu == PROCESSOR_POWER6
- && reg_classes_intersect_p (from, LINK_OR_CTR_REGS))
+ /* For those processors that have slow LR/CTR moves, make them more
+ expensive than memory in order to bias spills to memory .*/
+ else if ((rs6000_cpu == PROCESSOR_POWER6
+ || rs6000_cpu == PROCESSOR_POWER7)
+ && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
ret = 6 * hard_regno_nregs[0][mode];
else
+ rs6000_register_move_cost (mode, from, GENERAL_REGS));
if (TARGET_DEBUG_COST)
- fprintf (stderr,
- "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
- ret, GET_MODE_NAME (mode), reg_class_names[from],
- reg_class_names[to]);
+ {
+ if (dbg_cost_ctrl == 1)
+ fprintf (stderr,
+ "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
+ ret, GET_MODE_NAME (mode), reg_class_names[from],
+ reg_class_names[to]);
+ dbg_cost_ctrl--;
+ }
return ret;
}
{
int ret;
+ if (TARGET_DEBUG_COST)
+ dbg_cost_ctrl++;
+
if (reg_classes_intersect_p (rclass, GENERAL_REGS))
ret = 4 * hard_regno_nregs[0][mode];
else if (reg_classes_intersect_p (rclass, FLOAT_REGS))
ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
if (TARGET_DEBUG_COST)
- fprintf (stderr,
- "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
- ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
+ {
+ if (dbg_cost_ctrl == 1)
+ fprintf (stderr,
+ "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
+ ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
+ dbg_cost_ctrl--;
+ }
return ret;
}
valcum.vregno = ALTIVEC_ARG_MIN_REG;
/* Do a trial code generation as if this were going to be passed as
an argument; if any part goes in memory, we return NULL. */
- valret = rs6000_darwin64_record_arg (&valcum, valtype, true, true);
+ valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
if (valret)
return valret;
/* Otherwise fall through to standard ABI rules. */
else if (TREE_CODE (valtype) == COMPLEX_TYPE
&& targetm.calls.split_complex_arg)
return rs6000_complex_function_value (mode);
+ /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
+ return register is used in both cases, and we won't see V2DImode/V2DFmode
+ for pure altivec, combine the two cases. */
else if (TREE_CODE (valtype) == VECTOR_TYPE
&& TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
- && ALTIVEC_VECTOR_MODE (mode))
- regno = ALTIVEC_ARG_RETURN;
- else if (TREE_CODE (valtype) == VECTOR_TYPE
- && TARGET_VSX && TARGET_ALTIVEC_ABI
- && VSX_VECTOR_MODE (mode))
+ && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
regno = ALTIVEC_ARG_RETURN;
else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
&& (mode == DFmode || mode == DCmode
&& TARGET_HARD_FLOAT && TARGET_FPRS
&& ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
regno = FP_ARG_RETURN;
- else if (ALTIVEC_VECTOR_MODE (mode)
+ /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
+ return register is used in both cases, and we won't see V2DImode/V2DFmode
+ for pure altivec, combine the two cases. */
+ else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
&& TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
regno = ALTIVEC_ARG_RETURN;
- else if (VSX_VECTOR_MODE (mode)
- && TARGET_VSX && TARGET_ALTIVEC_ABI)
- regno = ALTIVEC_ARG_RETURN;
else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
return rs6000_complex_function_value (mode);
else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
calling __stack_chk_fail directly. Otherwise it is better to call
__stack_chk_fail directly. */
-static tree
+static tree ATTRIBUTE_UNUSED
rs6000_stack_protect_fail (void)
{
return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
return x;
}
+/* Given a memory reference, if it is not in the form for altivec memory
+ reference instructions (i.e. reg or reg+reg addressing with AND of -16),
+ convert to the altivec format. */
+
+rtx
+rs6000_address_for_altivec (rtx x)
+{
+ gcc_assert (MEM_P (x));
+ if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
+ {
+ rtx addr = XEXP (x, 0);
+ int strict_p = (reload_in_progress || reload_completed);
+
+ if (!legitimate_indexed_address_p (addr, strict_p)
+ && !legitimate_indirect_address_p (addr, strict_p))
+ addr = copy_to_mode_reg (Pmode, addr);
+
+ addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
+ x = change_address (x, GET_MODE (x), addr);
+ }
+
+ return x;
+}
+
+/* Implement TARGET_LEGITIMATE_CONSTANT_P.
+
+ On the RS/6000, all integer constants are acceptable, most won't be valid
+ for particular insns, though. Only easy FP constants are acceptable. */
+
+static bool
+rs6000_legitimate_constant_p (enum machine_mode mode, rtx x)
+{
+ if (rs6000_tls_referenced_p (x))
+ return false;
+
+ return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
+ || GET_MODE (x) == VOIDmode
+ || (TARGET_POWERPC64 && mode == DImode)
+ || easy_fp_constant (x, mode)
+ || easy_vector_constant (x, mode));
+}
+
#include "gt-rs6000.h"