int toc_save_p; /* true if the TOC needs to be saved */
int push_p; /* true if we need to allocate stack space */
int calls_p; /* true if the function makes any calls */
+ int world_save_p; /* true if we're saving *everything*:
+ r13-r31, cr, f14-f31, vrsave, v20-v31 */
enum rs6000_abi abi; /* which ABI to use */
int gp_save_offset; /* offset to save GP regs from initial SP */
int fp_save_offset; /* offset to save FP regs from initial SP */
/* ABI string from -mabi= option. */
const char *rs6000_abi_string;
+/* Whether to use variant of AIX ABI for PowerPC64 Linux. */
+int dot_symbols;
+
/* Debug flags */
const char *rs6000_debug_name;
int rs6000_debug_stack; /* debug stack applications */
static void rs6000_parse_yes_no_option (const char *, const char *, int *);
static int first_altivec_reg_to_save (void);
static unsigned int compute_vrsave_mask (void);
+static void compute_save_world_info(rs6000_stack_t *info_ptr);
static void is_altivec_return_reg (rtx, void *);
static rtx generate_set_vrsave (rtx, rs6000_stack_t *, int);
int easy_vector_constant (rtx, enum machine_mode);
static tree rs6000_gimplify_va_arg (tree, tree, tree *, tree *);
static bool rs6000_must_pass_in_stack (enum machine_mode, tree);
+static enum machine_mode rs6000_eh_return_filter_mode (void);
+
/* Hash table stuff for keeping track of TOC entries. */
struct toc_hash_struct GTY(())
#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
#undef TARGET_ASM_UNALIGNED_SI_OP
#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
+#undef TARGET_ASM_UNALIGNED_DI_OP
+#define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
#endif
#endif
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
#define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
+#undef TARGET_EH_RETURN_FILTER_MODE
+#define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
+
struct gcc_target targetm = TARGET_INITIALIZER;
\f
rs6000_hard_regno_mode_ok_p[m][r] = true;
}
+/* If not otherwise specified by a target, make 'long double' equivalent to
+ 'double'. */
+
+#ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
+#define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
+#endif
+
/* Override command line options. Mostly we process the processor
type and sometimes adjust other TARGET_ options. */
}
/* Set size of long double */
- rs6000_long_double_type_size = 64;
+ rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
if (rs6000_long_double_size_string)
{
char *tail;
if (rs6000_isel_string == 0)
rs6000_isel = 0;
if (rs6000_long_double_size_string == 0)
- rs6000_long_double_type_size = 64;
+ rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
}
rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
return 1;
}
+/* Returns 1 always. */
+
+int
+any_parallel_operand (rtx op ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ return 1;
+}
+
/* Returns 1 if op is the count register. */
+
int
count_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
}
/* Returns 1 if op is an altivec register. */
+
int
altivec_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
return false;
if (GET_MODE_NUNITS (mode) != 1)
return false;
- if (GET_MODE_BITSIZE (mode) > 32
- && !(TARGET_HARD_FLOAT && TARGET_FPRS && mode == DFmode))
+ if (GET_MODE_BITSIZE (mode) > 64)
return false;
return CONSTANT_P (x);
tmp1 = gen_reg_rtx (Pmode);
tmp2 = gen_reg_rtx (Pmode);
tmp3 = gen_reg_rtx (Pmode);
- mem = gen_rtx_MEM (Pmode, tmp1);
- RTX_UNCHANGING_P (mem) = 1;
+ mem = gen_const_mem (Pmode, tmp1);
first = emit_insn (gen_load_toc_v4_PIC_1b (tempLR, lab,
gsym));
return;
}
#endif
- emit_insn (gen_macho_high (target, operands[1]));
- emit_insn (gen_macho_low (operands[0], target, operands[1]));
+ if (mode == DImode)
+ {
+ emit_insn (gen_macho_high_di (target, operands[1]));
+ emit_insn (gen_macho_low_di (operands[0], target, operands[1]));
+ }
+ else
+ {
+ emit_insn (gen_macho_high (target, operands[1]));
+ emit_insn (gen_macho_low (operands[0], target, operands[1]));
+ }
return;
}
get_pool_mode (XEXP (operands[1], 0))))
{
operands[1]
- = gen_rtx_MEM (mode,
- create_TOC_reference (XEXP (operands[1], 0)));
+ = gen_const_mem (mode,
+ create_TOC_reference (XEXP (operands[1], 0)));
set_mem_alias_set (operands[1], get_TOC_alias_set ());
- RTX_UNCHANGING_P (operands[1]) = 1;
}
}
break;
return upward;
}
- /* SFmode parameters are padded upwards. */
- if (mode == SFmode)
- return upward;
-
/* Fall back to the default. */
return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
}
HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
words, n_gpr, n_fpr);
- t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr, build_int_2 (n_gpr, 0));
+ t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
+ build_int_cst (NULL_TREE, n_gpr, 0));
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
- t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr, build_int_2 (n_fpr, 0));
+ t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
+ build_int_cst (NULL_TREE, n_fpr, 0));
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
if (words != 0)
t = build (PLUS_EXPR, TREE_TYPE (ovf), t,
- build_int_2 (words * UNITS_PER_WORD, 0));
+ build_int_cst (NULL_TREE, words * UNITS_PER_WORD, 0));
t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
/* Find the register save area. */
t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
t = build (PLUS_EXPR, TREE_TYPE (sav), t,
- build_int_2 (-RS6000_VARARGS_SIZE, -1));
+ build_int_cst (NULL_TREE, -RS6000_VARARGS_SIZE, -1));
t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
if (align != 1)
{
t = build2 (PLUS_EXPR, TREE_TYPE (t), t, size_int (align - 1));
- t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, build_int_2 (-align, -1));
+ t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
+ build_int_cst (NULL_TREE, -align, -1));
}
gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
= build_function_type (V8HI_type_node, void_list_node);
tree void_ftype_void
= build_function_type (void_type_node, void_list_node);
- tree void_ftype_qi
- = build_function_type_list (void_type_node, char_type_node, NULL_TREE);
+ tree void_ftype_int
+ = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
tree v16qi_ftype_long_pcvoid
= build_function_type_list (V16QI_type_node,
def_builtin (MASK_ALTIVEC, "__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
- def_builtin (MASK_ALTIVEC, "__builtin_altivec_dss", void_ftype_qi, ALTIVEC_BUILTIN_DSS);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
}
}
+
+\f
+/* Expand a block clear operation, and return 1 if successful. Return 0
+ if we should let the compiler generate normal code.
+
+ operands[0] is the destination
+ operands[1] is the length
+ operands[2] is the alignment */
+
+int
+expand_block_clear (rtx operands[])
+{
+ rtx orig_dest = operands[0];
+ rtx bytes_rtx = operands[1];
+ rtx align_rtx = operands[2];
+ int constp = (GET_CODE (bytes_rtx) == CONST_INT);
+ int align;
+ int bytes;
+ int offset;
+ int clear_bytes;
+
+ /* If this is not a fixed size move, just call memcpy */
+ if (! constp)
+ return 0;
+
+ /* If this is not a fixed size alignment, abort */
+ if (GET_CODE (align_rtx) != CONST_INT)
+ abort ();
+ align = INTVAL (align_rtx) * BITS_PER_UNIT;
+
+ /* Anything to clear? */
+ bytes = INTVAL (bytes_rtx);
+ if (bytes <= 0)
+ return 1;
+
+ if (bytes > (TARGET_POWERPC64 && align >= 32 ? 64 : 32))
+ return 0;
+
+ if (optimize_size && bytes > 16)
+ return 0;
+
+ for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
+ {
+ rtx (*mov) (rtx, rtx);
+ enum machine_mode mode = BLKmode;
+ rtx dest;
+
+ if (bytes >= 8 && TARGET_POWERPC64
+ /* 64-bit loads and stores require word-aligned
+ displacements. */
+ && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
+ {
+ clear_bytes = 8;
+ mode = DImode;
+ mov = gen_movdi;
+ }
+ else if (bytes >= 4 && !STRICT_ALIGNMENT)
+ { /* move 4 bytes */
+ clear_bytes = 4;
+ mode = SImode;
+ mov = gen_movsi;
+ }
+ else if (bytes == 2 && !STRICT_ALIGNMENT)
+ { /* move 2 bytes */
+ clear_bytes = 2;
+ mode = HImode;
+ mov = gen_movhi;
+ }
+ else /* move 1 byte at a time */
+ {
+ clear_bytes = 1;
+ mode = QImode;
+ mov = gen_movqi;
+ }
+
+ dest = adjust_address (orig_dest, mode, offset);
+
+ emit_insn ((*mov) (dest, const0_rtx));
+ }
+
+ return 1;
+}
+
\f
/* Expand a block move operation, and return 1 if successful. Return 0
if we should let the compiler generate normal code.
/* If this is not a fixed size alignment, abort */
if (GET_CODE (align_rtx) != CONST_INT)
abort ();
- align = INTVAL (align_rtx);
+ align = INTVAL (align_rtx) * BITS_PER_UNIT;
/* Anything to move? */
bytes = INTVAL (bytes_rtx);
else if (bytes >= 8 && TARGET_POWERPC64
/* 64-bit loads and stores require word-aligned
displacements. */
- && (align >= 8 || (! STRICT_ALIGNMENT && align >= 4)))
+ && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
{
move_bytes = 8;
mode = DImode;
move_bytes = (bytes > 8) ? 8 : bytes;
gen_func.movmemsi = gen_movmemsi_2reg;
}
- else if (bytes >= 4 && (align >= 4 || ! STRICT_ALIGNMENT))
+ else if (bytes >= 4 && !STRICT_ALIGNMENT)
{ /* move 4 bytes */
move_bytes = 4;
mode = SImode;
gen_func.mov = gen_movsi;
}
- else if (bytes == 2 && (align >= 2 || ! STRICT_ALIGNMENT))
+ else if (bytes == 2 && !STRICT_ALIGNMENT)
{ /* move 2 bytes */
move_bytes = 2;
mode = HImode;
}
\f
+/* Return 1 if OP is suitable for a save_world call in prologue. It is
+ known to be a PARALLEL. */
+int
+save_world_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ int index;
+ int i;
+ rtx elt;
+ int count = XVECLEN (op, 0);
+
+ if (count != 55)
+ return 0;
+
+ index = 0;
+ if (GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE)
+ return 0;
+
+ for (i=1; i <= 18; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || ! memory_operand (SET_DEST (elt), DFmode)
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != DFmode)
+ return 0;
+ }
+
+ for (i=1; i <= 12; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != V4SImode)
+ return 0;
+ }
+
+ for (i=1; i <= 19; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || ! memory_operand (SET_DEST (elt), Pmode)
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != Pmode)
+ return 0;
+ }
+
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || ! memory_operand (SET_DEST (elt), Pmode)
+ || GET_CODE (SET_SRC (elt)) != REG
+ || REGNO (SET_SRC (elt)) != CR2_REGNO
+ || GET_MODE (SET_SRC (elt)) != Pmode)
+ return 0;
+
+ if (GET_CODE (XVECEXP (op, 0, index++)) != USE
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER)
+ return 0;
+ return 1;
+}
+
+/* Return 1 if OP is suitable for a save_world call in prologue. It is
+ known to be a PARALLEL. */
+int
+restore_world_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ int index;
+ int i;
+ rtx elt;
+ int count = XVECLEN (op, 0);
+
+ if (count != 59)
+ return 0;
+
+ index = 0;
+ if (GET_CODE (XVECEXP (op, 0, index++)) != RETURN
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER)
+ return 0;
+
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || ! memory_operand (SET_SRC (elt), Pmode)
+ || GET_CODE (SET_DEST (elt)) != REG
+ || REGNO (SET_DEST (elt)) != CR2_REGNO
+ || GET_MODE (SET_DEST (elt)) != Pmode)
+ return 0;
+
+ for (i=1; i <= 19; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || ! memory_operand (SET_SRC (elt), Pmode)
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != Pmode)
+ return 0;
+ }
+
+ for (i=1; i <= 12; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != V4SImode)
+ return 0;
+ }
+
+ for (i=1; i <= 18; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || ! memory_operand (SET_SRC (elt), DFmode)
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != DFmode)
+ return 0;
+ }
+
+ if (GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE)
+ return 0;
+ return 1;
+}
+
+\f
/* Return 1 if OP is a load multiple operation. It is known to be a
PARALLEL and the first section will be tested. */
return 0;
}
+/* Write out a function code label. */
+
+void
+rs6000_output_function_entry (FILE *file, const char *fname)
+{
+ if (fname[0] != '.')
+ {
+ switch (DEFAULT_ABI)
+ {
+ default:
+ abort ();
+
+ case ABI_AIX:
+ if (DOT_SYMBOLS)
+ putc ('.', file);
+ else
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
+ break;
+
+ case ABI_V4:
+ case ABI_DARWIN:
+ break;
+ }
+ }
+ if (TARGET_AIX)
+ RS6000_OUTPUT_BASENAME (file, fname);
+ else
+ assemble_name (file, fname);
+}
+
/* Print an operand. Recognize special options, documented below. */
#if TARGET_ELF
if (SYMBOL_REF_DECL (x))
mark_decl_referenced (SYMBOL_REF_DECL (x));
- if (XSTR (x, 0)[0] != '.')
- {
- switch (DEFAULT_ABI)
- {
- default:
- abort ();
-
- case ABI_AIX:
- putc ('.', file);
- break;
-
- case ABI_V4:
- case ABI_DARWIN:
- break;
- }
- }
- /* For macho, we need to check it see if we need a stub. */
+ /* For macho, check to see if we need a stub. */
if (TARGET_MACHO)
{
const char *name = XSTR (x, 0);
#endif
assemble_name (file, name);
}
- else if (TARGET_AIX)
- RS6000_OUTPUT_BASENAME (file, XSTR (x, 0));
- else
+ else if (!DOT_SYMBOLS)
assemble_name (file, XSTR (x, 0));
+ else
+ rs6000_output_function_entry (file, XSTR (x, 0));
return;
case 'Z':
{
/* Functions need to have their entry point symbol visibility set as
well as their descriptor symbol visibility. */
- if (DEFAULT_ABI == ABI_AIX && TREE_CODE (decl) == FUNCTION_DECL)
+ if (DEFAULT_ABI == ABI_AIX
+ && DOT_SYMBOLS
+ && TREE_CODE (decl) == FUNCTION_DECL)
{
static const char * const visibility_types[] = {
NULL, "internal", "hidden", "protected"
int j = -1;
bool used_update = false;
- if (GET_CODE (src) == MEM && INT_REGNO_P (reg))
+ if (MEM_P (src) && INT_REGNO_P (reg))
{
rtx breg;
: gen_adddi3 (breg, breg, delta_rtx));
src = gen_rtx_MEM (mode, breg);
}
+ else if (! offsettable_memref_p (src))
+ {
+ rtx newsrc, basereg;
+ basereg = gen_rtx_REG (Pmode, reg);
+ emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
+ newsrc = gen_rtx_MEM (GET_MODE (src), basereg);
+ MEM_COPY_ATTRIBUTES (newsrc, src);
+ src = newsrc;
+ }
/* We have now address involving an base register only.
If we use one of the registers to address memory,
: gen_adddi3 (breg, breg, delta_rtx));
dst = gen_rtx_MEM (mode, breg);
}
+ else if (! offsettable_memref_p (dst))
+ abort ();
}
for (i = 0; i < nregs; i++)
return mask;
}
+/* For a very restricted set of circumstances, we can cut down the
+ size of prologs/epilogs by calling our own save/restore-the-world
+ routines. */
+
+static void
+compute_save_world_info(rs6000_stack_t *info_ptr)
+{
+ info_ptr->world_save_p =
+ (DEFAULT_ABI == ABI_DARWIN)
+ && ! (current_function_calls_setjmp && flag_exceptions)
+ && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
+ && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
+ && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
+ && info_ptr->cr_save_p;
+
+ /* This will not work in conjunction with sibcalls. Make sure there
+ are none. (This check is expensive, but seldom executed.) */
+ if ( info_ptr->world_save_p )
+ {
+ rtx insn;
+ for ( insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
+ if ( GET_CODE (insn) == CALL_INSN
+ && SIBLING_CALL_P (insn))
+ {
+ info_ptr->world_save_p = 0;
+ break;
+ }
+ }
+
+ if (info_ptr->world_save_p)
+ {
+ /* Even if we're not touching VRsave, make sure there's room on the
+ stack for it, if it looks like we're calling SAVE_WORLD, which
+ will attempt to save it. */
+ info_ptr->vrsave_size = 4;
+
+ /* "Save" the VRsave register too if we're saving the world. */
+ if (info_ptr->vrsave_mask == 0)
+ info_ptr->vrsave_mask = compute_vrsave_mask ();
+
+ /* Because the Darwin register save/restore routines only handle
+ F14 .. F31 and V20 .. V31 as per the ABI, perform a consistancy
+ check and abort if there's something worng. */
+ if (info_ptr->first_fp_reg_save < FIRST_SAVED_FP_REGNO
+ || info_ptr->first_altivec_reg_save < FIRST_SAVED_ALTIVEC_REGNO)
+ abort ();
+ }
+ return;
+}
+
+
static void
is_altivec_return_reg (rtx reg, void *xyes)
{
else
info_ptr->vrsave_size = 0;
+ compute_save_world_info (info_ptr);
+
/* Calculate the offsets. */
switch (DEFAULT_ABI)
{
return gen_rtx_MEM (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
}
+#ifndef TARGET_FIX_AND_CONTINUE
+#define TARGET_FIX_AND_CONTINUE 0
+#endif
+
/* Emit function prologue as insns. */
void
int using_store_multiple;
HOST_WIDE_INT sp_offset = 0;
- if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
- {
- reg_mode = V2SImode;
- reg_size = 8;
- }
+ if (TARGET_FIX_AND_CONTINUE)
+ {
+ /* gdb on darwin arranges to forward a function from the old
+ address by modifying the first 4 instructions of the function
+ to branch to the overriding function. This is necessary to
+ permit function pointers that point to the old function to
+ actually forward to the new function. */
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ }
+
+ if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
+ {
+ reg_mode = V2SImode;
+ reg_size = 8;
+ }
using_store_multiple = (TARGET_MULTIPLE && ! TARGET_POWERPC64
&& (!TARGET_SPE_ABI
rs6000_emit_stack_tie ();
}
+ /* Handle world saves specially here. */
+ if (info->world_save_p)
+ {
+ int i, j, sz;
+ rtx treg;
+ rtvec p;
+
+ /* save_world expects lr in r0. */
+ if (info->lr_save_p)
+ {
+ insn = emit_move_insn (gen_rtx_REG (Pmode, 0),
+ gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
+ assumptions about the offsets of various bits of the stack
+ frame. Abort if things aren't what they should be. */
+ if (info->gp_save_offset != -220
+ || info->fp_save_offset != -144
+ || info->lr_save_offset != 8
+ || info->cr_save_offset != 4
+ || !info->push_p
+ || !info->lr_save_p
+ || (current_function_calls_eh_return && info->ehrd_offset != -432)
+ || (info->vrsave_save_offset != -224
+ || info->altivec_save_offset != (-224 -16 -192)))
+ abort ();
+
+ treg = gen_rtx_REG (SImode, 11);
+ emit_move_insn (treg, GEN_INT (-info->total_size));
+
+ /* SAVE_WORLD takes the caller's LR in R0 and the frame size
+ in R11. It also clobbers R12, so beware! */
+
+ /* Preserve CR2 for save_world prologues */
+ sz = 6;
+ sz += 32 - info->first_gp_reg_save;
+ sz += 64 - info->first_fp_reg_save;
+ sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
+ p = rtvec_alloc (sz);
+ j = 0;
+ RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode,
+ LINK_REGISTER_REGNUM));
+ RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
+ gen_rtx_SYMBOL_REF (Pmode,
+ "*save_world"));
+ /* We do floats first so that the instruction pattern matches
+ properly. */
+ for (i = 0; i < 64 - info->first_fp_reg_save; i++)
+ {
+ rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->fp_save_offset
+ + sp_offset + 8 * i));
+ rtx mem = gen_rtx_MEM (DFmode, addr);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+ for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
+ {
+ rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->altivec_save_offset
+ + sp_offset + 16 * i));
+ rtx mem = gen_rtx_MEM (V4SImode, addr);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ {
+ rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset + reg_size * i));
+ rtx mem = gen_rtx_MEM (reg_mode, addr);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+
+ {
+ /* CR register traditionally saved as CR2. */
+ rtx reg = gen_rtx_REG (reg_mode, CR2_REGNO);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->cr_save_offset
+ + sp_offset));
+ rtx mem = gen_rtx_MEM (reg_mode, addr);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+ /* Prevent any attempt to delete the setting of r0 and treg! */
+ RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
+ RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode, treg);
+ RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode, sp_reg_rtx);
+
+ insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+
+ if (current_function_calls_eh_return)
+ {
+ unsigned int i;
+ for (i = 0; ; ++i)
+ {
+ unsigned int regno = EH_RETURN_DATA_REGNO (i);
+ if (regno == INVALID_REGNUM)
+ break;
+ emit_frame_save (frame_reg_rtx, frame_ptr_rtx, reg_mode, regno,
+ info->ehrd_offset + sp_offset
+ + reg_size * (int) i,
+ info->total_size);
+ }
+ }
+ }
+
/* Save AltiVec registers if needed. */
- if (TARGET_ALTIVEC_ABI && info->altivec_size != 0)
+ if (! info->world_save_p && TARGET_ALTIVEC_ABI && info->altivec_size != 0)
{
int i;
epilogue. */
if (TARGET_ALTIVEC && TARGET_ALTIVEC_VRSAVE
- && info->vrsave_mask != 0)
+ && ! info->world_save_p && info->vrsave_mask != 0)
{
rtx reg, mem, vrsave;
int offset;
}
/* If we use the link register, get it into r0. */
- if (info->lr_save_p)
+ if (! info->world_save_p && info->lr_save_p)
{
insn = emit_move_insn (gen_rtx_REG (Pmode, 0),
gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM));
}
/* If we need to save CR, put it into r12. */
- if (info->cr_save_p && frame_reg_rtx != frame_ptr_rtx)
+ if (! info->world_save_p && info->cr_save_p && frame_reg_rtx != frame_ptr_rtx)
{
rtx set;
/* Do any required saving of fpr's. If only one or two to save, do
it ourselves. Otherwise, call function. */
- if (saving_FPRs_inline)
+ if (! info->world_save_p && saving_FPRs_inline)
{
int i;
for (i = 0; i < 64 - info->first_fp_reg_save; i++)
info->fp_save_offset + sp_offset + 8 * i,
info->total_size);
}
- else if (info->first_fp_reg_save != 64)
+ else if (! info->world_save_p && info->first_fp_reg_save != 64)
{
int i;
char rname[30];
/* Save GPRs. This is done as a PARALLEL if we are using
the store-multiple instructions. */
- if (using_store_multiple)
+ if (! info->world_save_p && using_store_multiple)
{
rtvec p;
int i;
rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
NULL_RTX, NULL_RTX);
}
- else
+ else if (! info->world_save_p)
{
int i;
for (i = 0; i < 32 - info->first_gp_reg_save; i++)
/* ??? There's no need to emit actual instructions here, but it's the
easiest way to get the frame unwind information emitted. */
- if (current_function_calls_eh_return)
+ if (! info->world_save_p && current_function_calls_eh_return)
{
unsigned int i, regno;
}
/* Save lr if we used it. */
- if (info->lr_save_p)
+ if (! info->world_save_p && info->lr_save_p)
{
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->lr_save_offset + sp_offset));
}
/* Save CR if we use any that must be preserved. */
- if (info->cr_save_p)
+ if (! info->world_save_p && info->cr_save_p)
{
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->cr_save_offset + sp_offset));
/* Update stack and set back pointer unless this is V.4,
for which it was done previously. */
- if (info->push_p
+ if (! info->world_save_p && info->push_p
&& !(DEFAULT_ABI == ABI_V4 || current_function_calls_eh_return))
rs6000_emit_allocate_stack (info->total_size, FALSE);
|| rs6000_cpu == PROCESSOR_PPC750
|| optimize_size);
+ if (info->world_save_p)
+ {
+ int i, j;
+ char rname[30];
+ const char *alloc_rname;
+ rtvec p;
+
+ /* eh_rest_world_r10 will return to the location saved in the LR
+ stack slot (which is not likely to be our caller.)
+ Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
+ rest_world is similar, except any R10 parameter is ignored.
+ The exception-handling stuff that was here in 2.95 is no
+ longer necessary. */
+
+ p = rtvec_alloc (9
+ + 1
+ + 32 - info->first_gp_reg_save
+ + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
+ + 63 + 1 - info->first_fp_reg_save);
+
+ strcpy (rname, (current_function_calls_eh_return) ?
+ "*eh_rest_world_r10" : "*rest_world");
+ alloc_rname = ggc_strdup (rname);
+
+ j = 0;
+ RTVEC_ELT (p, j++) = gen_rtx_RETURN (VOIDmode);
+ RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
+ gen_rtx_REG (Pmode,
+ LINK_REGISTER_REGNUM));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
+ /* The instruction pattern requires a clobber here;
+ it is shared with the restVEC helper. */
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
+
+ {
+ /* CR register traditionally saved as CR2. */
+ rtx reg = gen_rtx_REG (reg_mode, CR2_REGNO);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->cr_save_offset));
+ rtx mem = gen_rtx_MEM (reg_mode, addr);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ }
+
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ {
+ rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + reg_size * i));
+ rtx mem = gen_rtx_MEM (reg_mode, addr);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ }
+ for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
+ {
+ rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->altivec_save_offset
+ + 16 * i));
+ rtx mem = gen_rtx_MEM (V4SImode, addr);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ }
+ for (i = 0; info->first_fp_reg_save + i <= 63; i++)
+ {
+ rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->fp_save_offset
+ + 8 * i));
+ rtx mem = gen_rtx_MEM (DFmode, addr);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ }
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
+ emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
+
+ return;
+ }
+
/* If we have a frame pointer, a call to alloca, or a large stack
frame, restore the old stack pointer using the backchain. Otherwise,
we know what size to update it with. */
/* Offset from start of code to tb table. */
fputs ("\t.long ", file);
ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
-#if TARGET_AIX
- RS6000_OUTPUT_BASENAME (file, fname);
-#else
- assemble_name (file, fname);
-#endif
- fputs ("-.", file);
-#if TARGET_AIX
- RS6000_OUTPUT_BASENAME (file, fname);
-#else
- assemble_name (file, fname);
-#endif
+ if (TARGET_AIX)
+ RS6000_OUTPUT_BASENAME (file, fname);
+ else
+ assemble_name (file, fname);
+ putc ('-', file);
+ rs6000_output_function_entry (file, fname);
putc ('\n', file);
/* Interrupt handler mask. */
add_compiler_branch_island (tree label_name, tree function_name, int line_number)
{
tree branch_island = build_tree_list (function_name, label_name);
- TREE_TYPE (branch_island) = build_int_2 (line_number, 0);
+ TREE_TYPE (branch_island) = build_int_cst (NULL_TREE, line_number, 0);
TREE_CHAIN (branch_island) = branch_island_list;
branch_island_list = branch_island;
}
machopic_picsymbol_stub1_section ();
else
machopic_symbol_stub1_section ();
- fprintf (file, "\t.align 2\n");
-
- fprintf (file, "%s:\n", stub);
- fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
if (flag_pic == 2)
{
+ fprintf (file, "\t.align 5\n");
+
+ fprintf (file, "%s:\n", stub);
+ fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
+
label++;
local_label_0 = alloca (sizeof("\"L0000000000$spb\""));
sprintf (local_label_0, "\"L%011d$spb\"", label);
fprintf (file, "\tbctr\n");
}
else
- {
- fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
- fprintf (file, "\tlwzu r12,lo16(%s)(r11)\n", lazy_ptr_name);
- fprintf (file, "\tmtctr r12\n");
- fprintf (file, "\tbctr\n");
- }
+ {
+ fprintf (file, "\t.align 4\n");
+
+ fprintf (file, "%s:\n", stub);
+ fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
+
+ fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
+ fprintf (file, "\tlwzu r12,lo16(%s)(r11)\n", lazy_ptr_name);
+ fprintf (file, "\tmtctr r12\n");
+ fprintf (file, "\tbctr\n");
+ }
machopic_lazy_symbol_ptr_section ();
fprintf (file, "%s:\n", lazy_ptr_name);
fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
- fprintf (file, "\t.long dyld_stub_binding_helper\n");
+ fprintf (file, "%sdyld_stub_binding_helper\n",
+ (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
}
/* Legitimize PIC addresses. If the address is already
fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
ASM_OUTPUT_LABEL (file, name);
fputs (DOUBLE_INT_ASM_OP, file);
- putc ('.', file);
- assemble_name (file, name);
- fputs (",.TOC.@tocbase,0\n\t.previous\n\t.size\t", file);
- assemble_name (file, name);
- fputs (",24\n\t.type\t.", file);
- assemble_name (file, name);
- fputs (",@function\n", file);
- if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
+ rs6000_output_function_entry (file, name);
+ fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
+ if (DOT_SYMBOLS)
{
- fputs ("\t.globl\t.", file);
+ fputs ("\t.size\t", file);
+ assemble_name (file, name);
+ fputs (",24\n\t.type\t.", file);
assemble_name (file, name);
- putc ('\n', file);
+ fputs (",@function\n", file);
+ if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
+ {
+ fputs ("\t.globl\t.", file);
+ assemble_name (file, name);
+ putc ('\n', file);
+ }
}
+ else
+ ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
- putc ('.', file);
- ASM_OUTPUT_LABEL (file, name);
+ rs6000_output_function_entry (file, name);
+ fputs (":\n", file);
return;
}
switch (code)
{
- /* On the RS/6000, if it is valid in the insn, it is free.
- So this always returns 0. */
+ /* On the RS/6000, if it is valid in the insn, it is free. */
case CONST_INT:
- case CONST:
- case LABEL_REF:
- case SYMBOL_REF:
+ if (((outer_code == SET
+ || outer_code == PLUS
+ || outer_code == MINUS)
+ && (CONST_OK_FOR_LETTER_P (INTVAL (x), 'I')
+ || CONST_OK_FOR_LETTER_P (INTVAL (x), 'L')))
+ || ((outer_code == IOR || outer_code == XOR)
+ && (CONST_OK_FOR_LETTER_P (INTVAL (x), 'K')
+ || CONST_OK_FOR_LETTER_P (INTVAL (x), 'L')))
+ || ((outer_code == DIV || outer_code == UDIV
+ || outer_code == MOD || outer_code == UMOD)
+ && exact_log2 (INTVAL (x)) >= 0)
+ || (outer_code == AND
+ && (CONST_OK_FOR_LETTER_P (INTVAL (x), 'K')
+ || CONST_OK_FOR_LETTER_P (INTVAL (x), 'L')
+ || mask_operand (x, VOIDmode)))
+ || outer_code == ASHIFT
+ || outer_code == ASHIFTRT
+ || outer_code == LSHIFTRT
+ || outer_code == ROTATE
+ || outer_code == ROTATERT
+ || outer_code == ZERO_EXTRACT
+ || (outer_code == MULT
+ && CONST_OK_FOR_LETTER_P (INTVAL (x), 'I'))
+ || (outer_code == COMPARE
+ && (CONST_OK_FOR_LETTER_P (INTVAL (x), 'I')
+ || CONST_OK_FOR_LETTER_P (INTVAL (x), 'K'))))
+ {
+ *total = 0;
+ return true;
+ }
+ else if ((outer_code == PLUS
+ && reg_or_add_cint64_operand (x, VOIDmode))
+ || (outer_code == MINUS
+ && reg_or_sub_cint64_operand (x, VOIDmode))
+ || ((outer_code == SET
+ || outer_code == IOR
+ || outer_code == XOR)
+ && (INTVAL (x)
+ & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
+ {
+ *total = COSTS_N_INSNS (1);
+ return true;
+ }
+ /* FALLTHRU */
+
case CONST_DOUBLE:
+ if (mode == DImode
+ && ((outer_code == AND
+ && (CONST_OK_FOR_LETTER_P (INTVAL (x), 'K')
+ || CONST_OK_FOR_LETTER_P (INTVAL (x), 'L')
+ || mask64_operand (x, DImode)))
+ || ((outer_code == IOR || outer_code == XOR)
+ && CONST_DOUBLE_HIGH (x) == 0
+ && (CONST_DOUBLE_LOW (x)
+ & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0)))
+ {
+ *total = 0;
+ return true;
+ }
+ else if (mode == DImode
+ && (outer_code == SET
+ || outer_code == IOR
+ || outer_code == XOR)
+ && CONST_DOUBLE_HIGH (x) == 0)
+ {
+ *total = COSTS_N_INSNS (1);
+ return true;
+ }
+ /* FALLTHRU */
+
+ case CONST:
case HIGH:
+ case SYMBOL_REF:
+ case MEM:
+ /* When optimizing for size, MEM should be slightly more expensive
+ than generating address, e.g., (plus (reg) (const)).
+ L1 cache latecy is about two instructions. */
+ *total = optimize_size ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
+ return true;
+
+ case LABEL_REF:
*total = 0;
return true;
case PLUS:
if (mode == DFmode)
- *total = GET_CODE (XEXP (x, 0)) == MULT
- ? rs6000_cost->dmul
- : rs6000_cost->fp;
+ {
+ if (GET_CODE (XEXP (x, 0)) == MULT)
+ {
+ /* FNMA accounted in outer NEG. */
+ if (outer_code == NEG)
+ *total = rs6000_cost->dmul - rs6000_cost->fp;
+ else
+ *total = rs6000_cost->dmul;
+ }
+ else
+ *total = rs6000_cost->fp;
+ }
else if (mode == SFmode)
- *total = rs6000_cost->fp;
+ {
+ /* FNMA accounted in outer NEG. */
+ if (outer_code == NEG && GET_CODE (XEXP (x, 0)) == MULT)
+ *total = 0;
+ else
+ *total = rs6000_cost->fp;
+ }
else if (GET_CODE (XEXP (x, 0)) == MULT)
{
/* The rs6000 doesn't have shift-and-add instructions. */
*total += COSTS_N_INSNS (1);
}
else
- *total = ((GET_CODE (XEXP (x, 1)) == CONST_INT
- && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1))
- + 0x8000) >= 0x10000)
- && ((INTVAL (XEXP (x, 1)) & 0xffff) != 0))
- ? COSTS_N_INSNS (2)
- : COSTS_N_INSNS (1));
- return true;
+ *total = COSTS_N_INSNS (1);
+ return false;
case MINUS:
if (mode == DFmode)
- *total = GET_CODE (XEXP (x, 0)) == MULT
- ? rs6000_cost->dmul
- : rs6000_cost->fp;
+ {
+ if (GET_CODE (XEXP (x, 0)) == MULT)
+ {
+ /* FNMA accounted in outer NEG. */
+ if (outer_code == NEG)
+ *total = 0;
+ else
+ *total = rs6000_cost->dmul;
+ }
+ else
+ *total = rs6000_cost->fp;
+ }
else if (mode == SFmode)
- *total = rs6000_cost->fp;
+ {
+ /* FNMA accounted in outer NEG. */
+ if (outer_code == NEG && GET_CODE (XEXP (x, 0)) == MULT)
+ *total = 0;
+ else
+ *total = rs6000_cost->fp;
+ }
else if (GET_CODE (XEXP (x, 0)) == MULT)
{
/* The rs6000 doesn't have shift-and-sub instructions. */
}
else
*total = COSTS_N_INSNS (1);
- return true;
-
- case AND:
- case IOR:
- case XOR:
- *total = ((GET_CODE (XEXP (x, 1)) == CONST_INT
- && (INTVAL (XEXP (x, 1)) & (~ (HOST_WIDE_INT) 0xffff)) != 0
- && ((INTVAL (XEXP (x, 1)) & 0xffff) != 0))
- ? COSTS_N_INSNS (2)
- : COSTS_N_INSNS (1));
- return true;
+ return false;
case MULT:
if (GET_CODE (XEXP (x, 1)) == CONST_INT)
else
*total = rs6000_cost->mulsi_const;
}
+ /* FMA accounted in outer PLUS/MINUS. */
+ else if ((mode == DFmode || mode == SFmode)
+ && (outer_code == PLUS || outer_code == MINUS))
+ *total = 0;
else if (mode == DFmode)
*total = rs6000_cost->dmul;
else if (mode == SFmode)
*total = rs6000_cost->muldi;
else
*total = rs6000_cost->mulsi;
- return true;
+ return false;
case DIV:
case MOD:
{
*total = mode == DFmode ? rs6000_cost->ddiv
: rs6000_cost->sdiv;
- return true;
- }
- if (GET_CODE (XEXP (x, 1)) == CONST_INT
- && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
- {
- *total = COSTS_N_INSNS (2);
- return true;
+ return false;
}
/* FALLTHRU */
case UDIV:
case UMOD:
- if (GET_MODE (XEXP (x, 1)) == DImode)
- *total = rs6000_cost->divdi;
- else
- *total = rs6000_cost->divsi;
- return true;
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
+ {
+ if (code == DIV || code == MOD)
+ /* Shift, addze */
+ *total = COSTS_N_INSNS (2);
+ else
+ /* Shift */
+ *total = COSTS_N_INSNS (1);
+ }
+ else
+ {
+ if (GET_MODE (XEXP (x, 1)) == DImode)
+ *total = rs6000_cost->divdi;
+ else
+ *total = rs6000_cost->divsi;
+ }
+ /* Add in shift and subtract for MOD. */
+ if (code == MOD || code == UMOD)
+ *total += COSTS_N_INSNS (2);
+ return false;
case FFS:
*total = COSTS_N_INSNS (4);
- return true;
+ return false;
- case NEG:
- case ABS:
- if (FLOAT_MODE_P (mode))
- *total = rs6000_cost->fp;
+ case NOT:
+ if (outer_code == AND || outer_code == IOR || outer_code == XOR)
+ {
+ *total = 0;
+ return false;
+ }
+ /* FALLTHRU */
+
+ case AND:
+ case IOR:
+ case XOR:
+ case ZERO_EXTRACT:
+ *total = COSTS_N_INSNS (1);
+ return false;
+
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case ROTATE:
+ case ROTATERT:
+ /* Handle mul_highpart. */
+ if (outer_code == TRUNCATE
+ && GET_CODE (XEXP (x, 0)) == MULT)
+ {
+ if (mode == DImode)
+ *total = rs6000_cost->muldi;
+ else
+ *total = rs6000_cost->mulsi;
+ return true;
+ }
+ else if (outer_code == AND)
+ *total = 0;
else
*total = COSTS_N_INSNS (1);
- return true;
-
- case MEM:
- /* MEM should be slightly more expensive than (plus (reg) (const)). */
- *total = COSTS_N_INSNS (1) + 1;
- return true;
+ return false;
- case NOT:
case SIGN_EXTEND:
case ZERO_EXTEND:
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+ *total = 0;
+ else
+ *total = COSTS_N_INSNS (1);
+ return false;
+
case COMPARE:
- *total = COSTS_N_INSNS (1);
- break;
+ case NEG:
+ case ABS:
+ if (!FLOAT_MODE_P (mode))
+ {
+ *total = COSTS_N_INSNS (1);
+ return false;
+ }
+ /* FALLTHRU */
+ case FLOAT:
+ case UNSIGNED_FLOAT:
+ case FIX:
+ case UNSIGNED_FIX:
+ case FLOAT_EXTEND:
case FLOAT_TRUNCATE:
*total = rs6000_cost->fp;
- return true;
+ return false;
case UNSPEC:
switch (XINT (x, 1))
*total = COSTS_N_INSNS (1);
return true;
}
+ else if (FLOAT_MODE_P (mode)
+ && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
+ {
+ *total = rs6000_cost->fp;
+ return false;
+ }
+
break;
default:
&& targetm.calls.split_complex_arg)
return rs6000_complex_function_value (mode);
else if (TREE_CODE (valtype) == VECTOR_TYPE
- && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
+ && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
+ && ALTIVEC_VECTOR_MODE(mode))
regno = ALTIVEC_ARG_RETURN;
else
regno = GP_ARG_RETURN;
abort ();
}
+/* target hook eh_return_filter_mode */
+static enum machine_mode
+rs6000_eh_return_filter_mode (void)
+{
+ return TARGET_32BIT ? SImode : word_mode;
+}
+
#include "gt-rs6000.h"