#include "intl.h"
#include "params.h"
#include "tm-constrs.h"
+#include "opts.h"
#if TARGET_XCOFF
#include "xcoffout.h" /* get declarations of xcoff_*_section_name */
#endif
static void rs6000_option_override (void);
static void rs6000_option_init_struct (struct gcc_options *);
static void rs6000_option_default_params (void);
-static bool rs6000_handle_option (size_t, const char *, int);
+static bool rs6000_handle_option (struct gcc_options *, struct gcc_options *,
+ const struct cl_decoded_option *,
+ location_t);
static int rs6000_loop_align_max_skip (rtx);
-static void rs6000_parse_yes_no_option (const char *, const char *, int *);
static int first_altivec_reg_to_save (void);
static unsigned int compute_vrsave_mask (void);
static void compute_save_world_info (rs6000_stack_t *info_ptr);
enum machine_mode,
struct secondary_reload_info *);
-static const reg_class_t *rs6000_ira_cover_classes (void);
-
const int INSN_NOT_AVAILABLE = -1;
static enum machine_mode rs6000_eh_return_filter_mode (void);
static bool rs6000_can_eliminate (const int, const int);
static const struct attribute_spec rs6000_attribute_table[] =
{
- /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
- { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute },
- { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
- { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
- { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute },
- { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute },
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
+ affects_type_identity } */
+ { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
+ false },
+ { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
+ false },
+ { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
+ false },
+ { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
+ false },
+ { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
+ false },
#ifdef SUBTARGET_ATTRIBUTE_TABLE
SUBTARGET_ATTRIBUTE_TABLE,
#endif
- { NULL, 0, 0, false, false, false, NULL }
+ { NULL, 0, 0, false, false, false, NULL, false }
};
/* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
#undef TARGET_SECONDARY_RELOAD
#define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
-#undef TARGET_IRA_COVER_CLASSES
-#define TARGET_IRA_COVER_CLASSES rs6000_ira_cover_classes
-
#undef TARGET_LEGITIMATE_ADDRESS_P
#define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
return word_mode;
}
-/* Handle generic options of the form -mfoo=yes/no.
- NAME is the option name.
- VALUE is the option value.
- FLAG is the pointer to the flag where to store a 1 or 0, depending on
- whether the option value is 'yes' or 'no' respectively. */
-static void
-rs6000_parse_yes_no_option (const char *name, const char *value, int *flag)
-{
- if (value == 0)
- return;
- else if (!strcmp (value, "yes"))
- *flag = 1;
- else if (!strcmp (value, "no"))
- *flag = 0;
- else
- error ("unknown -m%s= option specified: '%s'", name, value);
-}
-
/* Implement TARGET_OPTION_INIT_STRUCT. */
static void
/* Implement TARGET_HANDLE_OPTION. */
static bool
-rs6000_handle_option (size_t code, const char *arg, int value)
+rs6000_handle_option (struct gcc_options *opts, struct gcc_options *opts_set,
+ const struct cl_decoded_option *decoded,
+ location_t loc ATTRIBUTE_UNUSED)
{
enum fpu_type_t fpu_type = FPU_NONE;
- int isel;
char *p, *q;
+ size_t code = decoded->opt_index;
+ const char *arg = decoded->arg;
+ int value = decoded->value;
+
+ gcc_assert (opts == &global_options);
+ gcc_assert (opts_set == &global_options_set);
switch (code)
{
TARGET_ALTIVEC_VRSAVE = value;
break;
- case OPT_mvrsave_:
- rs6000_explicit_options.vrsave = true;
- rs6000_parse_yes_no_option ("vrsave", arg, &(TARGET_ALTIVEC_VRSAVE));
- break;
-
- case OPT_misel_:
- target_flags_explicit |= MASK_ISEL;
- isel = 0;
- rs6000_parse_yes_no_option ("isel", arg, &isel);
- if (isel)
- target_flags |= MASK_ISEL;
- else
- target_flags &= ~MASK_ISEL;
- break;
-
case OPT_mspe:
rs6000_explicit_options.spe = true;
rs6000_spe = value;
break;
- case OPT_mspe_:
- rs6000_explicit_options.spe = true;
- rs6000_parse_yes_no_option ("spe", arg, &(rs6000_spe));
- break;
-
case OPT_mdebug_:
p = ASTRDUP (arg);
rs6000_debug = 0;
&& regnum <= LAST_VIRTUAL_POINTER_REGISTER);
}
+/* Return true if memory accesses to OP are known to never straddle
+ a 32k boundary. */
+
+static bool
+offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
+ enum machine_mode mode)
+{
+ tree decl, type;
+ unsigned HOST_WIDE_INT dsize, dalign;
+
+ if (GET_CODE (op) != SYMBOL_REF)
+ return false;
+
+ decl = SYMBOL_REF_DECL (op);
+ if (!decl)
+ {
+ if (GET_MODE_SIZE (mode) == 0)
+ return false;
+
+ /* -fsection-anchors loses the original SYMBOL_REF_DECL when
+ replacing memory addresses with an anchor plus offset. We
+ could find the decl by rummaging around in the block->objects
+ VEC for the given offset but that seems like too much work. */
+ dalign = 1;
+ if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
+ && SYMBOL_REF_ANCHOR_P (op)
+ && SYMBOL_REF_BLOCK (op) != NULL)
+ {
+ struct object_block *block = SYMBOL_REF_BLOCK (op);
+ HOST_WIDE_INT lsb, mask;
+
+ /* Given the alignment of the block.. */
+ dalign = block->alignment;
+ mask = dalign / BITS_PER_UNIT - 1;
+
+ /* ..and the combined offset of the anchor and any offset
+ to this block object.. */
+ offset += SYMBOL_REF_BLOCK_OFFSET (op);
+ lsb = offset & -offset;
+
+ /* ..find how many bits of the alignment we know for the
+ object. */
+ mask &= lsb - 1;
+ dalign = mask + 1;
+ }
+ return dalign >= GET_MODE_SIZE (mode);
+ }
+
+ if (DECL_P (decl))
+ {
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ return true;
+
+ if (!DECL_SIZE_UNIT (decl))
+ return false;
+
+ if (!host_integerp (DECL_SIZE_UNIT (decl), 1))
+ return false;
+
+ dsize = tree_low_cst (DECL_SIZE_UNIT (decl), 1);
+ if (dsize > 32768)
+ return false;
+
+ dalign = DECL_ALIGN_UNIT (decl);
+ return dalign >= dsize;
+ }
+
+ type = TREE_TYPE (decl);
+
+ if (TREE_CODE (decl) == STRING_CST)
+ dsize = TREE_STRING_LENGTH (decl);
+ else if (TYPE_SIZE_UNIT (type)
+ && host_integerp (TYPE_SIZE_UNIT (type), 1))
+ dsize = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
+ else
+ return false;
+ if (dsize > 32768)
+ return false;
+
+ dalign = TYPE_ALIGN (type);
+ if (CONSTANT_CLASS_P (decl))
+ dalign = CONSTANT_ALIGNMENT (decl, dalign);
+ else
+ dalign = DATA_ALIGNMENT (decl, dalign);
+ dalign /= BITS_PER_UNIT;
+ return dalign >= dsize;
+}
+
static bool
constant_pool_expr_p (rtx op)
{
&& XINT (tocrel_base, 1) == UNSPEC_TOCREL);
}
+/* Return true if X is a constant pool address, and also for cmodel=medium
+ if X is a toc-relative address known to be offsettable within MODE. */
+
bool
-legitimate_constant_pool_address_p (const_rtx x, bool strict)
+legitimate_constant_pool_address_p (const_rtx x, enum machine_mode mode,
+ bool strict)
{
return (TARGET_TOC
&& (GET_CODE (x) == PLUS || GET_CODE (x) == LO_SUM)
|| ((TARGET_MINIMAL_TOC
|| TARGET_CMODEL != CMODEL_SMALL)
&& INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict)))
- && toc_relative_expr_p (XEXP (x, 1)));
+ && toc_relative_expr_p (XEXP (x, 1))
+ && (TARGET_CMODEL != CMODEL_MEDIUM
+ || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
+ || mode == QImode
+ || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
+ INTVAL (tocrel_offset), mode)));
}
static bool
return false;
if (!reg_offset_addressing_ok_p (mode))
return virtual_stack_registers_memory_p (x);
- if (legitimate_constant_pool_address_p (x, strict))
+ if (legitimate_constant_pool_address_p (x, mode, strict))
return true;
if (GET_CODE (XEXP (x, 1)) != CONST_INT)
return false;
{
bool reg_offset_p = reg_offset_addressing_ok_p (mode);
+ /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
+ DFmode/DImode MEM. */
+ if (reg_offset_p
+ && opnum == 1
+ && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
+ || (mode == DImode && recog_data.operand_mode[0] == V2DImode)))
+ reg_offset_p = false;
+
/* We must recognize output that we have already generated ourselves. */
if (GET_CODE (x) == PLUS
&& GET_CODE (XEXP (x, 0)) == PLUS
return 1;
if (reg_offset_p && legitimate_small_data_p (mode, x))
return 1;
- if (reg_offset_p && legitimate_constant_pool_address_p (x, reg_ok_strict))
+ if (reg_offset_p
+ && legitimate_constant_pool_address_p (x, mode, reg_ok_strict))
return 1;
/* If not REG_OK_STRICT (before reload) let pass any stack offset. */
if (! reg_ok_strict
case LO_SUM:
/* Anything in the constant pool is sufficiently aligned that
all bytes have the same high part address. */
- return !legitimate_constant_pool_address_p (addr, false);
+ return !legitimate_constant_pool_address_p (addr, QImode, false);
/* Auto-increment cases are now treated generically in recog.c. */
case PRE_MODIFY:
if (GET_CODE (operands[0]) == MEM
&& GET_CODE (XEXP (operands[0], 0)) != REG
- && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0), false))
+ && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
+ GET_MODE (operands[0]), false))
operands[0]
= replace_equiv_address (operands[0],
copy_addr_to_reg (XEXP (operands[0], 0)));
if (GET_CODE (operands[1]) == MEM
&& GET_CODE (XEXP (operands[1], 0)) != REG
- && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0), false))
+ && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
+ GET_MODE (operands[1]), false))
operands[1]
= replace_equiv_address (operands[1],
copy_addr_to_reg (XEXP (operands[1], 0)));
}
-/* Return true if memory accesses to DECL are known to never straddle
- a 32k boundary. */
-
-static bool
-offsettable_ok_by_alignment (tree decl)
-{
- unsigned HOST_WIDE_INT dsize, dalign;
-
- /* Presume any compiler generated symbol_ref is suitably aligned. */
- if (!decl)
- return true;
-
- if (TREE_CODE (decl) != VAR_DECL
- && TREE_CODE (decl) != PARM_DECL
- && TREE_CODE (decl) != RESULT_DECL
- && TREE_CODE (decl) != FIELD_DECL)
- return true;
-
- if (!DECL_SIZE_UNIT (decl))
- return false;
-
- if (!host_integerp (DECL_SIZE_UNIT (decl), 1))
- return false;
-
- dsize = tree_low_cst (DECL_SIZE_UNIT (decl), 1);
- if (dsize <= 1)
- return true;
- if (dsize > 32768)
- return false;
-
- dalign = DECL_ALIGN_UNIT (decl);
- return dalign >= dsize;
-}
-
/* Emit a move from SOURCE to DEST in mode MODE. */
void
rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
|| (TARGET_CMODEL == CMODEL_MEDIUM
&& GET_CODE (operands[1]) == SYMBOL_REF
&& !CONSTANT_POOL_ADDRESS_P (operands[1])
- && SYMBOL_REF_LOCAL_P (operands[1])
- && offsettable_ok_by_alignment (SYMBOL_REF_DECL (operands[1]))))
+ && SYMBOL_REF_LOCAL_P (operands[1])))
{
rtx reg = NULL_RTX;
if (TARGET_CMODEL != CMODEL_SMALL)
|| (GET_CODE (operands[0]) == REG
&& FP_REGNO_P (REGNO (operands[0]))))
&& GET_CODE (operands[1]) != HIGH
- && ! legitimate_constant_pool_address_p (operands[1], false)
+ && ! legitimate_constant_pool_address_p (operands[1], mode,
+ false)
&& ! toc_relative_expr_p (operands[1])
&& (TARGET_CMODEL == CMODEL_SMALL
|| can_create_pseudo_p ()
needed for the immediate register.
For VSX and Altivec, we may need a register to convert sp+offset into
- reg+sp. */
+ reg+sp.
+
+ For misaligned 64-bit gpr loads and stores we need a register to
+ convert an offset address to indirect. */
static reg_class_t
rs6000_secondary_reload (bool in_p,
else
default_p = true;
}
+ else if (TARGET_POWERPC64
+ && rs6000_reload_register_type (rclass) == GPR_REGISTER_TYPE
+ && MEM_P (x)
+ && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
+ {
+ rtx addr = XEXP (x, 0);
+
+ if (GET_CODE (addr) == PRE_MODIFY)
+ addr = XEXP (addr, 1);
+ else if (GET_CODE (addr) == LO_SUM
+ && GET_CODE (XEXP (addr, 0)) == REG
+ && GET_CODE (XEXP (addr, 1)) == CONST)
+ addr = XEXP (XEXP (addr, 1), 0);
+
+ if (GET_CODE (addr) == PLUS
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT
+ && (INTVAL (XEXP (addr, 1)) & 3) != 0)
+ {
+ if (in_p)
+ sri->icode = CODE_FOR_reload_di_load;
+ else
+ sri->icode = CODE_FOR_reload_di_store;
+ sri->extra_cost = 2;
+ ret = NO_REGS;
+ }
+ else
+ default_p = true;
+ }
else
default_p = true;
return;
}
-/* Target hook to return the cover classes for Integrated Register Allocator.
- Cover classes is a set of non-intersected register classes covering all hard
- registers used for register allocation purpose. Any move between two
- registers of a cover class should be cheaper than load or store of the
- registers. The value is array of register classes with LIM_REG_CLASSES used
- as the end marker.
+/* Convert reloads involving 64-bit gprs and misaligned offset
+ addressing to use indirect addressing. */
- We need two IRA_COVER_CLASSES, one for pre-VSX, and the other for VSX to
- account for the Altivec and Floating registers being subsets of the VSX
- register set under VSX, but distinct register sets on pre-VSX machines. */
-
-static const reg_class_t *
-rs6000_ira_cover_classes (void)
+void
+rs6000_secondary_reload_ppc64 (rtx reg, rtx mem, rtx scratch, bool store_p)
{
- static const reg_class_t cover_pre_vsx[] = IRA_COVER_CLASSES_PRE_VSX;
- static const reg_class_t cover_vsx[] = IRA_COVER_CLASSES_VSX;
+ int regno = true_regnum (reg);
+ enum reg_class rclass;
+ rtx addr;
+ rtx scratch_or_premodify = scratch;
+
+ if (TARGET_DEBUG_ADDR)
+ {
+ fprintf (stderr, "\nrs6000_secondary_reload_ppc64, type = %s\n",
+ store_p ? "store" : "load");
+ fprintf (stderr, "reg:\n");
+ debug_rtx (reg);
+ fprintf (stderr, "mem:\n");
+ debug_rtx (mem);
+ fprintf (stderr, "scratch:\n");
+ debug_rtx (scratch);
+ }
+
+ gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
+ gcc_assert (GET_CODE (mem) == MEM);
+ rclass = REGNO_REG_CLASS (regno);
+ gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
+ addr = XEXP (mem, 0);
+
+ if (GET_CODE (addr) == PRE_MODIFY)
+ {
+ scratch_or_premodify = XEXP (addr, 0);
+ gcc_assert (REG_P (scratch_or_premodify));
+ addr = XEXP (addr, 1);
+ }
+ gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
+
+ rs6000_emit_move (scratch_or_premodify, addr, Pmode);
- return (TARGET_VSX) ? cover_vsx : cover_pre_vsx;
+ mem = replace_equiv_address_nv (mem, scratch_or_premodify);
+
+ /* Now create the move. */
+ if (store_p)
+ emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
+
+ return;
}
/* Allocate a 64-bit stack slot to be used for copying SDmode
fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
}
#endif
- else if (legitimate_constant_pool_address_p (x, true))
+ else if (legitimate_constant_pool_address_p (x, QImode, true))
{
/* This hack along with a corresponding hack in
rs6000_output_addr_const_extra arranges to output addends
static rs6000_stack_t *
rs6000_stack_info (void)
{
-#ifdef ENABLE_CHECKING
- static rs6000_stack_t info_save;
-#endif
rs6000_stack_t *info_ptr = &stack_info;
int reg_size = TARGET_32BIT ? 4 : 8;
int ehrd_size;
HOST_WIDE_INT non_fixed_size;
bool using_static_chain_p;
-#ifdef ENABLE_CHECKING
- memcpy (&info_save, &stack_info, sizeof stack_info);
-#else
if (reload_completed && info_ptr->reload_completed)
return info_ptr;
-#endif
- memset (&stack_info, 0, sizeof (stack_info));
+ memset (info_ptr, 0, sizeof (*info_ptr));
info_ptr->reload_completed = reload_completed;
if (TARGET_SPE)
if (! info_ptr->cr_save_p)
info_ptr->cr_save_offset = 0;
-#ifdef ENABLE_CHECKING
- gcc_assert (!(reload_completed && info_save.reload_completed)
- || memcmp (&info_save, &stack_info, sizeof stack_info) == 0);
-#endif
return info_ptr;
}
use language_string.
C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
Java is 13. Objective-C is 14. Objective-C++ isn't assigned
- a number, so for now use 9. LTO isn't assigned a number either,
- so for now use 0. */
+ a number, so for now use 9. LTO and Go aren't assigned numbers
+ either, so for now use 0. */
if (! strcmp (language_string, "GNU C")
- || ! strcmp (language_string, "GNU GIMPLE"))
+ || ! strcmp (language_string, "GNU GIMPLE")
+ || ! strcmp (language_string, "GNU Go"))
i = 0;
else if (! strcmp (language_string, "GNU F77")
|| ! strcmp (language_string, "GNU Fortran"))