static int toc_hash_eq (const void *, const void *);
static int constant_pool_expr_1 (rtx, int *, int *);
static bool constant_pool_expr_p (rtx);
-static bool legitimate_small_data_p (enum machine_mode, rtx);
static bool legitimate_indexed_address_p (rtx, int);
static bool legitimate_lo_sum_address_p (enum machine_mode, rtx, int);
static struct machine_function * rs6000_init_machine_status (void);
const char *, tree, rtx);
static rtx altivec_expand_lv_builtin (enum insn_code, tree, rtx);
static rtx altivec_expand_stv_builtin (enum insn_code, tree);
+static rtx altivec_expand_vec_init_builtin (tree, tree, rtx);
+static rtx altivec_expand_vec_set_builtin (tree);
+static rtx altivec_expand_vec_ext_builtin (tree, rtx);
+static int get_element_number (tree, tree);
static bool rs6000_handle_option (size_t, const char *, int);
static void rs6000_parse_tls_size_option (void);
static void rs6000_parse_yes_no_option (const char *, const char *, int *);
num_insns_constant (rtx op, enum machine_mode mode)
{
HOST_WIDE_INT low, high;
-
+
switch (GET_CODE (op))
{
case CONST_INT:
#if HOST_BITS_PER_WIDE_INT == 64
if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
- && mask_operand (op, mode))
+ && mask64_operand (op, mode))
return 2;
else
#endif
{
long l;
REAL_VALUE_TYPE rv;
-
+
REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
REAL_VALUE_TO_TARGET_SINGLE (rv, l);
return num_insns_constant_wide ((HOST_WIDE_INT) l);
{
long l[2];
REAL_VALUE_TYPE rv;
-
+
REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
high = l[WORDS_BIG_ENDIAN == 0];
if ((high == 0 && low >= 0)
|| (high == -1 && low < 0))
return num_insns_constant_wide (low);
-
- else if (mask_operand (op, mode))
+
+ else if (mask64_operand (op, mode))
return 2;
-
+
else if (low == 0)
return num_insns_constant_wide (high) + 1;
-
+
else
return (num_insns_constant_wide (high)
+ num_insns_constant_wide (low) + 1);
}
-
+
default:
gcc_unreachable ();
}
return "vxor %0,%0,%0";
gcc_assert (easy_vector_constant (vec, mode));
-
+
operands[1] = GEN_INT (cst);
switch (mode)
{
}
else if (EASY_VECTOR_15_ADD_SELF (cst))
return "#";
-
+
default:
gcc_unreachable ();
}
}
gcc_assert (TARGET_SPE);
-
+
/* Vector constant 0 is handled as a splitter of V2SI, and in the
pattern of V1DI, V4HI, and V2SF.
return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
}
-int
-mask64_1or2_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED,
- bool allow_one)
-{
- if (GET_CODE (op) == CONST_INT)
+/* Initialize vector TARGET to VALS. */
+
+void
+rs6000_expand_vector_init (rtx target, rtx vals)
+{
+ enum machine_mode mode = GET_MODE (target);
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ int n_elts = GET_MODE_NUNITS (mode);
+ int n_var = 0, one_var = -1;
+ bool all_same = true, all_const_zero = true;
+ rtx x, mem;
+ int i;
+
+ for (i = 0; i < n_elts; ++i)
{
- HOST_WIDE_INT c, lsb;
- bool one_ok;
-
- c = INTVAL (op);
+ x = XVECEXP (vals, 0, i);
+ if (!CONSTANT_P (x))
+ ++n_var, one_var = i;
+ else if (x != CONST0_RTX (inner_mode))
+ all_const_zero = false;
- /* Disallow all zeros. */
- if (c == 0)
- return 0;
+ if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
+ all_same = false;
+ }
- /* We can use a single rlwinm insn if no upper bits of C are set
- AND there are zero, one or two transitions in the _whole_ of
- C. */
- one_ok = !(c & ~(HOST_WIDE_INT)0xffffffff);
-
- /* We don't change the number of transitions by inverting,
- so make sure we start with the LS bit zero. */
- if (c & 1)
- c = ~c;
-
- /* Find the first transition. */
- lsb = c & -c;
+ if (n_var == 0)
+ {
+ if (mode != V4SFmode && all_const_zero)
+ {
+ /* Zero register. */
+ emit_insn (gen_rtx_SET (VOIDmode, target,
+ gen_rtx_XOR (mode, target, target)));
+ return;
+ }
+ else if (mode != V4SFmode && easy_vector_same (vals, mode))
+ {
+ /* Splat immediate. */
+ x = gen_rtx_VEC_DUPLICATE (mode, CONST_VECTOR_ELT (vals, 0));
+ emit_insn (gen_rtx_SET (VOIDmode, target, x));
+ return;
+ }
+ else if (all_same)
+ ; /* Splat vector element. */
+ else
+ {
+ /* Load from constant pool. */
+ emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
+ return;
+ }
+ }
- /* Invert to look for a second transition. */
- c = ~c;
+ /* Store value to stack temp. Load vector element. Splat. */
+ if (all_same)
+ {
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
+ emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
+ XVECEXP (vals, 0, 0));
+ x = gen_rtx_UNSPEC (VOIDmode,
+ gen_rtvec (1, const0_rtx), UNSPEC_LVE);
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode,
+ target, mem),
+ x)));
+ x = gen_rtx_VEC_SELECT (inner_mode, target,
+ gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (1, const0_rtx)));
+ emit_insn (gen_rtx_SET (VOIDmode, target,
+ gen_rtx_VEC_DUPLICATE (mode, x)));
+ return;
+ }
- /* Erase first transition. */
- c &= -lsb;
+ /* One field is non-constant. Load constant then overwrite
+ varying field. */
+ if (n_var == 1)
+ {
+ rtx copy = copy_rtx (vals);
- /* Find the second transition. */
- lsb = c & -c;
+ /* Load constant part of vector, substitute neighboring value for
+ varying element. */
+ XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
+ rs6000_expand_vector_init (target, copy);
- /* Invert to look for a third transition. */
- c = ~c;
+ /* Insert variable. */
+ rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
+ return;
+ }
- /* Erase second transition. */
- c &= -lsb;
+ /* Construct the vector in memory one field at a time
+ and load the whole vector. */
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ for (i = 0; i < n_elts; i++)
+ emit_move_insn (adjust_address_nv (mem, inner_mode,
+ i * GET_MODE_SIZE (inner_mode)),
+ XVECEXP (vals, 0, i));
+ emit_move_insn (target, mem);
+}
- if (one_ok && !(allow_one || c))
- return 0;
+/* Set field ELT of TARGET to VAL. */
- /* Find the third transition (if any). */
- lsb = c & -c;
+void
+rs6000_expand_vector_set (rtx target, rtx val, int elt)
+{
+ enum machine_mode mode = GET_MODE (target);
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ rtx reg = gen_reg_rtx (mode);
+ rtx mask, mem, x;
+ int width = GET_MODE_SIZE (inner_mode);
+ int i;
- /* Match if all the bits above are 1's (or c is zero). */
- return c == -lsb;
- }
- return 0;
+ /* Load single variable value. */
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
+ emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
+ x = gen_rtx_UNSPEC (VOIDmode,
+ gen_rtvec (1, const0_rtx), UNSPEC_LVE);
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode,
+ reg, mem),
+ x)));
+
+ /* Linear sequence. */
+ mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
+ for (i = 0; i < 16; ++i)
+ XVECEXP (mask, 0, i) = GEN_INT (i);
+
+ /* Set permute mask to insert element into target. */
+ for (i = 0; i < width; ++i)
+ XVECEXP (mask, 0, elt*width + i)
+ = GEN_INT (i + 0x10);
+ x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
+ x = gen_rtx_UNSPEC (mode,
+ gen_rtvec (3, target, reg,
+ force_reg (V16QImode, x)),
+ UNSPEC_VPERM);
+ emit_insn (gen_rtx_SET (VOIDmode, target, x));
+}
+
+/* Extract field ELT from VEC into TARGET. */
+
+void
+rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
+{
+ enum machine_mode mode = GET_MODE (vec);
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ rtx mem, x;
+
+ /* Allocate mode-sized buffer. */
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+
+ /* Add offset to field within buffer matching vector element. */
+ mem = adjust_address_nv (mem, mode, elt * GET_MODE_SIZE (inner_mode));
+
+ /* Store single field into mode-sized buffer. */
+ x = gen_rtx_UNSPEC (VOIDmode,
+ gen_rtvec (1, const0_rtx), UNSPEC_STVE);
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode,
+ mem, vec),
+ x)));
+ emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
}
/* Generates shifts and masks for a pair of rldicl or rldicr insns to
{
tree field = TYPE_FIELDS (type);
- /* Skip all non field decls */
+ /* Skip all non field decls */
while (field != NULL && TREE_CODE (field) != FIELD_DECL)
field = TREE_CHAIN (field);
&& constant_pool_expr_p (XEXP (x, 1)));
}
-static bool
-legitimate_small_data_p (enum machine_mode mode, rtx x)
+bool
+rs6000_legitimate_small_data_p (enum machine_mode mode, rtx x)
{
return (DEFAULT_ABI == ABI_V4
&& !flag_pic && !TARGET_TOC
case V8HImode:
case V4SFmode:
case V4SImode:
- /* AltiVec vector modes. Only reg+reg addressing is valid here,
- which leaves the only valid constant offset of zero, which by
- canonicalization rules is also invalid. */
- return false;
+ /* AltiVec vector modes. Only reg+reg addressing is valid and
+ constant offset zero should not occur due to canonicalization.
+ Allow any offset when not strict before reload. */
+ return !strict;
case V4HImode:
case V2SImode:
{
rtx r3, got, tga, tmp1, tmp2, eqv;
+ /* We currently use relocations like @got@tlsgd for tls, which
+ means the linker will handle allocation of tls entries, placing
+ them in the .got section. So use a pointer to the .got section,
+ not one to secondary TOC sections used by 64-bit -mminimal-toc,
+ or to secondary GOT sections used by 32-bit -fPIC. */
if (TARGET_64BIT)
- got = gen_rtx_REG (Pmode, TOC_REGISTER);
+ got = gen_rtx_REG (Pmode, 2);
else
{
if (flag_pic == 1)
return x;
}
-#if TARGET_MACHO
if (GET_CODE (x) == SYMBOL_REF
- && DEFAULT_ABI == ABI_DARWIN
&& !ALTIVEC_VECTOR_MODE (mode)
+#if TARGET_MACHO
+ && DEFAULT_ABI == ABI_DARWIN
&& (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
+#else
+ && DEFAULT_ABI == ABI_V4
+ && !flag_pic
+#endif
/* Don't do this for TFmode, since the result isn't offsettable.
The same goes for DImode without 64-bit gprs. */
&& mode != TFmode
&& (mode != DImode || TARGET_POWERPC64))
{
+#if TARGET_MACHO
if (flag_pic)
{
rtx offset = gen_rtx_CONST (Pmode,
gen_rtx_HIGH (Pmode, offset)), offset);
}
else
+#endif
x = gen_rtx_LO_SUM (GET_MODE (x),
gen_rtx_HIGH (Pmode, x), x);
*win = 1;
return x;
}
-#endif
+
+ /* Reload an offset address wrapped by an AND that represents the
+ masking of the lower bits. Strip the outer AND and let reload
+ convert the offset address into an indirect address. */
+ if (TARGET_ALTIVEC
+ && ALTIVEC_VECTOR_MODE (mode)
+ && GET_CODE (x) == AND
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) == -16)
+ {
+ x = XEXP (x, 0);
+ *win = 1;
+ return x;
+ }
if (TARGET_TOC
&& constant_pool_expr_p (x)
&& TARGET_UPDATE
&& legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
return 1;
- if (legitimate_small_data_p (mode, x))
+ if (rs6000_legitimate_small_data_p (mode, x))
return 1;
if (legitimate_constant_pool_address_p (x))
return 1;
dest = gen_reg_rtx (mode);
emit_insn (gen_rtx_SET (VOIDmode, dest, source));
return dest;
-
+
case SImode:
result = no_new_pseudos ? dest : gen_reg_rtx (SImode);
-
+
emit_insn (gen_rtx_SET (VOIDmode, result,
GEN_INT (INTVAL (source)
& (~ (HOST_WIDE_INT) 0xffff))));
c0 = INTVAL (source);
c1 = -(c0 < 0);
break;
-
+
case CONST_DOUBLE:
#if HOST_BITS_PER_WIDE_INT >= 64
c0 = CONST_DOUBLE_LOW (source);
|| CONST_DOUBLE_LOW (operands[1]) < 0)
&& (CONST_DOUBLE_HIGH (operands[1]) != -1
|| CONST_DOUBLE_LOW (operands[1]) >= 0)));
-
+
/* Check if GCC is setting up a block move that will end up using FP
registers as temporaries. We must make sure this is acceptable. */
if (GET_CODE (operands[0]) == MEM
grovel through the fields for these too. */
cum->intoffset = 0;
rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
- rs6000_darwin64_record_arg_advance_flush (cum,
+ rs6000_darwin64_record_arg_advance_flush (cum,
size * BITS_PER_UNIT);
}
}
structure between cum->intoffset and bitpos to integer registers. */
static void
-rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
+rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
HOST_WIDE_INT bitpos, rtx rvec[], int *k)
{
enum machine_mode mode;
if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
cum->use_stack = 1;
-
+
intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
if (intregs <= 0)
return;
/* Recursive workhorse for the following. */
static void
-rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, tree type,
+rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, tree type,
HOST_WIDE_INT startbitpos, rtx rvec[],
int *k)
{
#endif
rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
rvec[(*k)++]
- = gen_rtx_EXPR_LIST (VOIDmode,
+ = gen_rtx_EXPR_LIST (VOIDmode,
gen_rtx_REG (mode, cum->fregno++),
GEN_INT (bitpos / BITS_PER_UNIT));
if (mode == TFmode)
{
rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
rvec[(*k)++]
- = gen_rtx_EXPR_LIST (VOIDmode,
- gen_rtx_REG (mode, cum->vregno++),
+ = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode, cum->vregno++),
GEN_INT (bitpos / BITS_PER_UNIT));
}
else if (cum->intoffset == -1)
being passed by value, along with the offset of where the
register's value may be found in the block. FP fields go in FP
register, vector fields go in vector registers, and everything
- else goes in int registers, packed as in memory.
+ else goes in int registers, packed as in memory.
This code is also used for function return values. RETVAL indicates
whether this is the case.
- Much of this is taken from the Sparc V9 port, which has a similar
+ Much of this is taken from the SPARC V9 port, which has a similar
calling convention. */
static rtx
if (mode == VOIDmode)
{
if (abi == ABI_V4
- && cum->nargs_prototype < 0
&& (cum->call_cookie & CALL_LIBCALL) == 0
- && (cum->prototype || TARGET_NO_PROTOTYPE))
+ && (cum->stdarg
+ || (cum->nargs_prototype < 0
+ && (cum->prototype || TARGET_NO_PROTOTYPE))))
{
/* For the SPE, we need to crxor CR6 always. */
if (TARGET_SPE_ABI)
/* If we are passing this arg in the fixed parameter save area
(gprs or memory) as well as fprs, then this function should
return the number of bytes passed in the parameter save area
- rather than bytes passed in fprs. */
+ rather than bytes passed in fprs. */
&& !(type
&& (cum->nargs_prototype <= 0
|| (DEFAULT_ABI == ABI_AIX
mem = gen_rtx_MEM (BLKmode,
plus_constant (save_area,
- first_reg_offset * reg_size)),
+ first_reg_offset * reg_size));
+ MEM_NOTRAP_P (mem) = 1;
set_mem_alias_set (mem, set);
set_mem_align (mem, BITS_PER_WORD);
fregno++, off += UNITS_PER_FP_WORD, nregs++)
{
mem = gen_rtx_MEM (DFmode, plus_constant (save_area, off));
+ MEM_NOTRAP_P (mem) = 1;
set_mem_alias_set (mem, set);
set_mem_align (mem, GET_MODE_ALIGNMENT (DFmode));
emit_move_insn (mem, gen_rtx_REG (DFmode, fregno));
return NULL_RTX;
}
+/* Expand vec_init builtin. */
+static rtx
+altivec_expand_vec_init_builtin (tree type, tree arglist, rtx target)
+{
+ enum machine_mode tmode = TYPE_MODE (type);
+ enum machine_mode inner_mode = GET_MODE_INNER (tmode);
+ int i, n_elt = GET_MODE_NUNITS (tmode);
+ rtvec v = rtvec_alloc (n_elt);
+
+ gcc_assert (VECTOR_MODE_P (tmode));
+
+ for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
+ {
+ rtx x = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
+ RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
+ }
+
+ gcc_assert (arglist == NULL);
+
+ if (!target || !register_operand (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
+ return target;
+}
+
+/* Return the integer constant in ARG. Constrain it to be in the range
+ of the subparts of VEC_TYPE; issue an error if not. */
+
+static int
+get_element_number (tree vec_type, tree arg)
+{
+ unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
+
+ if (!host_integerp (arg, 1)
+ || (elt = tree_low_cst (arg, 1), elt > max))
+ {
+ error ("selector must be an integer constant in the range 0..%wi", max);
+ return 0;
+ }
+
+ return elt;
+}
+
+/* Expand vec_set builtin. */
+static rtx
+altivec_expand_vec_set_builtin (tree arglist)
+{
+ enum machine_mode tmode, mode1;
+ tree arg0, arg1, arg2;
+ int elt;
+ rtx op0, op1;
+
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+
+ tmode = TYPE_MODE (TREE_TYPE (arg0));
+ mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
+ gcc_assert (VECTOR_MODE_P (tmode));
+
+ op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
+ op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
+ elt = get_element_number (TREE_TYPE (arg0), arg2);
+
+ if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
+ op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
+
+ op0 = force_reg (tmode, op0);
+ op1 = force_reg (mode1, op1);
+
+ rs6000_expand_vector_set (op0, op1, elt);
+
+ return op0;
+}
+
+/* Expand vec_ext builtin. */
+static rtx
+altivec_expand_vec_ext_builtin (tree arglist, rtx target)
+{
+ enum machine_mode tmode, mode0;
+ tree arg0, arg1;
+ int elt;
+ rtx op0;
+
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ elt = get_element_number (TREE_TYPE (arg0), arg1);
+
+ tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
+ mode0 = TYPE_MODE (TREE_TYPE (arg0));
+ gcc_assert (VECTOR_MODE_P (mode0));
+
+ op0 = force_reg (mode0, op0);
+
+ if (optimize || !target || !register_operand (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ rs6000_expand_vector_extract (target, op0, elt);
+
+ return target;
+}
+
/* Expand the builtin in EXP and store the result in TARGET. Store
true in *EXPANDEDP if we found a builtin to expand. */
static rtx
emit_insn (gen_altivec_dss (op0));
return NULL_RTX;
+
+ case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
+ case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
+ case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
+ case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
+ return altivec_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
+
+ case ALTIVEC_BUILTIN_VEC_SET_V4SI:
+ case ALTIVEC_BUILTIN_VEC_SET_V8HI:
+ case ALTIVEC_BUILTIN_VEC_SET_V16QI:
+ case ALTIVEC_BUILTIN_VEC_SET_V4SF:
+ return altivec_expand_vec_set_builtin (arglist);
+
+ case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
+ case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
+ case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
+ case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
+ return altivec_expand_vec_ext_builtin (arglist, target);
+
+ default:
+ break;
+ /* Fall through. */
}
/* Expand abs* operations. */
}
gcc_assert (TARGET_ALTIVEC || TARGET_SPE);
-
+
/* Handle simple unary operations. */
d = (struct builtin_description *) bdesc_1arg;
for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
if (d->code == fcode)
return rs6000_expand_unop_builtin (d->icode, arglist, target);
-
+
/* Handle simple binary operations. */
d = (struct builtin_description *) bdesc_2arg;
for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
if (d->code == fcode)
return rs6000_expand_ternop_builtin (d->icode, arglist, target);
-
+
gcc_unreachable ();
}
struct builtin_description *d;
struct builtin_description_predicates *dp;
size_t i;
+ tree ftype;
+
tree pfloat_type_node = build_pointer_type (float_type_node);
tree pint_type_node = build_pointer_type (integer_type_node);
tree pshort_type_node = build_pointer_type (short_integer_type_node);
/* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
altivec_builtin_mask_for_load = decl;
}
+
+ /* Access to the vec_init patterns. */
+ ftype = build_function_type_list (V4SI_type_node, integer_type_node,
+ integer_type_node, integer_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v4si", ftype,
+ ALTIVEC_BUILTIN_VEC_INIT_V4SI);
+
+ ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v8hi", ftype,
+ ALTIVEC_BUILTIN_VEC_INIT_V8HI);
+
+ ftype = build_function_type_list (V16QI_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v16qi", ftype,
+ ALTIVEC_BUILTIN_VEC_INIT_V16QI);
+
+ ftype = build_function_type_list (V4SF_type_node, float_type_node,
+ float_type_node, float_type_node,
+ float_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v4sf", ftype,
+ ALTIVEC_BUILTIN_VEC_INIT_V4SF);
+
+ /* Access to the vec_set patterns. */
+ ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
+ intSI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v4si", ftype,
+ ALTIVEC_BUILTIN_VEC_SET_V4SI);
+
+ ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
+ intHI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v8hi", ftype,
+ ALTIVEC_BUILTIN_VEC_SET_V8HI);
+
+ ftype = build_function_type_list (V8HI_type_node, V16QI_type_node,
+ intQI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v16qi", ftype,
+ ALTIVEC_BUILTIN_VEC_SET_V16QI);
+
+ ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
+ float_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v4sf", ftype,
+ ALTIVEC_BUILTIN_VEC_SET_V4SF);
+
+ /* Access to the vec_extract patterns. */
+ ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v4si", ftype,
+ ALTIVEC_BUILTIN_VEC_EXT_V4SI);
+
+ ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v8hi", ftype,
+ ALTIVEC_BUILTIN_VEC_EXT_V8HI);
+
+ ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v16qi", ftype,
+ ALTIVEC_BUILTIN_VEC_EXT_V16QI);
+
+ ftype = build_function_type_list (float_type_node, V4SF_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v4sf", ftype,
+ ALTIVEC_BUILTIN_VEC_EXT_V4SF);
}
static void
mode2 = insn_data[d->icode].operand[2].mode;
mode3 = insn_data[d->icode].operand[3].mode;
}
-
+
/* When all four are of the same mode. */
if (mode0 == mode1 && mode1 == mode2 && mode2 == mode3)
{
mode2 = VOIDmode;
}
else
- {
+ {
if (d->name == 0 || d->icode == CODE_FOR_nothing)
continue;
{
if (d->name == 0 || d->icode == CODE_FOR_nothing)
continue;
-
+
mode0 = insn_data[d->icode].operand[0].mode;
mode1 = insn_data[d->icode].operand[1].mode;
}
clear_bytes = 4;
mode = SImode;
}
- else if (bytes == 2 && (align >= 16 || !STRICT_ALIGNMENT))
+ else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
{ /* move 2 bytes */
clear_bytes = 2;
mode = HImode;
mode = SImode;
gen_func.mov = gen_movsi;
}
- else if (bytes == 2 && (align >= 16 || !STRICT_ALIGNMENT))
+ else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
{ /* move 2 bytes */
move_bytes = 2;
mode = HImode;
mems_ok_for_quad_peep (rtx mem1, rtx mem2)
{
rtx addr1, addr2;
- unsigned int reg1;
- int offset1;
+ unsigned int reg1, reg2;
+ int offset1, offset2;
/* The mems cannot be volatile. */
if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
offset1 = 0;
}
- /* Make sure the second address is a (mem (plus (reg) (const_int)))
- or if it is (mem (reg)) then make sure that offset1 is -8 and the same
- register as addr1. */
- if (offset1 == -8 && GET_CODE (addr2) == REG && reg1 == REGNO (addr2))
- return 1;
- if (GET_CODE (addr2) != PLUS)
- return 0;
-
- if (GET_CODE (XEXP (addr2, 0)) != REG
- || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
+ /* And now for the second addr. */
+ if (GET_CODE (addr2) == PLUS)
+ {
+ /* If not a REG, return zero. */
+ if (GET_CODE (XEXP (addr2, 0)) != REG)
+ return 0;
+ else
+ {
+ reg2 = REGNO (XEXP (addr2, 0));
+ /* The offset must be constant. */
+ if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
+ return 0;
+ offset2 = INTVAL (XEXP (addr2, 1));
+ }
+ }
+ else if (GET_CODE (addr2) != REG)
return 0;
+ else
+ {
+ reg2 = REGNO (addr2);
+ /* This was a simple (mem (reg)) expression. Offset is 0. */
+ offset2 = 0;
+ }
- if (reg1 != REGNO (XEXP (addr2, 0)))
+ /* Both of these must have the same base register. */
+ if (reg1 != reg2)
return 0;
/* The offset for the second addr must be 8 more than the first addr. */
- if (INTVAL (XEXP (addr2, 1)) != offset1 + 8)
+ if (offset2 != offset1 + 8)
return 0;
/* All the tests passed. addr1 and addr2 are valid for lfq or stfq
/* PowerPC64 mask position. All 0's is excluded.
CONST_INT 32-bit mask is considered sign-extended so any
transition must occur within the CONST_INT, not on the boundary. */
- if (! mask_operand (x, DImode))
+ if (! mask64_operand (x, DImode))
output_operand_lossage ("invalid %%S value");
uval = INT_LOWPART (x);
{
gcc_assert (GET_CODE (tmp) == PLUS
&& GET_CODE (XEXP (tmp, 1)) == REG);
-
+
if (REGNO (XEXP (tmp, 0)) == 0)
fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
reg_names[ REGNO (XEXP (tmp, 0)) ]);
gcc_unreachable ();
}
break;
-
+
case GT: case GTU: case UNGT: case UNGE: case GE: case GEU:
switch (op_mode)
{
: gen_cmpsfgt_gpr (compare_result, rs6000_compare_op0,
rs6000_compare_op1);
break;
-
+
case DFmode:
cmp = flag_unsafe_math_optimizations
? gen_tstdfgt_gpr (compare_result, rs6000_compare_op0,
gcc_unreachable ();
}
break;
-
+
case LT: case LTU: case UNLT: case UNLE: case LE: case LEU:
switch (op_mode)
{
: gen_cmpsflt_gpr (compare_result, rs6000_compare_op0,
rs6000_compare_op1);
break;
-
+
case DFmode:
cmp = flag_unsafe_math_optimizations
? gen_tstdflt_gpr (compare_result, rs6000_compare_op0,
temp = gen_reg_rtx (dest_mode);
- /* For each vector element, select op1 when mask is 1 otherwise
+ /* For each vector element, select op1 when mask is 1 otherwise
select op2. */
t = gen_rtx_SET (VOIDmode, temp,
gen_rtx_UNSPEC (dest_mode,
(set M (CODE:MODE M OP))
If not NULL, BEFORE is atomically set to M before the operation, and
AFTER is set to M after the operation (that is, (CODE:MODE M OP)).
- If SYNC_P then a memory barrier is emitted before the operation.
+ If SYNC_P then a memory barrier is emitted before the operation.
Either OP or M may be wrapped in a NOT operation. */
void
rtvec vec;
HOST_WIDE_INT imask = GET_MODE_MASK (mode);
rtx shift = NULL_RTX;
-
+
if (sync_p)
emit_insn (gen_memory_barrier ());
-
+
if (GET_CODE (m) == NOT)
used_m = XEXP (m, 0);
else
int ishift = 0;
if (BYTES_BIG_ENDIAN)
ishift = GET_MODE_BITSIZE (SImode) - GET_MODE_BITSIZE (mode);
-
+
shift = GEN_INT (ishift);
}
else
{
rtx addrSI, aligned_addr;
int shift_mask = mode == QImode ? 0x18 : 0x10;
-
+
addrSI = force_reg (SImode, gen_lowpart_common (SImode,
XEXP (used_m, 0)));
shift = gen_reg_rtx (SImode);
}
else
oldop = lowpart_subreg (SImode, op, mode);
+
switch (code)
{
case IOR:
break;
case PLUS:
+ case MINUS:
{
rtx mask;
-
+
newop = expand_binop (SImode, and_optab,
oldop, GEN_INT (imask), NULL_RTX,
1, OPTAB_LIB_WIDEN);
emit_move_insn (mask, GEN_INT (imask));
emit_insn (gen_ashlsi3 (mask, mask, shift));
- newop = gen_rtx_AND (SImode, gen_rtx_PLUS (SImode, m, newop),
- mask);
+ if (code == PLUS)
+ newop = gen_rtx_PLUS (SImode, m, newop);
+ else
+ newop = gen_rtx_MINUS (SImode, m, newop);
+ newop = gen_rtx_AND (SImode, newop, mask);
newop = gen_rtx_IOR (SImode, newop,
gen_rtx_AND (SImode,
gen_rtx_NOT (SImode, mask),
if (after == NULL_RTX)
after = gen_reg_rtx (used_mode);
}
-
- if ((code == PLUS || GET_CODE (m) == NOT) && used_mode != mode)
+
+ if ((code == PLUS || code == MINUS || GET_CODE (m) == NOT)
+ && used_mode != mode)
the_op = op; /* Computed above. */
else if (GET_CODE (op) == NOT && GET_CODE (m) != NOT)
the_op = gen_rtx_fmt_ee (code, used_mode, op, m);
set_after = gen_rtx_SET (VOIDmode, after, the_op);
set_before = gen_rtx_SET (VOIDmode, before, used_m);
set_atomic = gen_rtx_SET (VOIDmode, used_m,
- gen_rtx_UNSPEC_VOLATILE (used_mode,
- gen_rtvec (1, the_op),
- UNSPECV_SYNC_OP));
+ gen_rtx_UNSPEC (used_mode,
+ gen_rtvec (1, the_op),
+ UNSPEC_SYNC_OP));
cc_scratch = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
- if (code == PLUS && used_mode != mode)
+ if ((code == PLUS || code == MINUS) && used_mode != mode)
vec = gen_rtvec (5, set_after, set_before, set_atomic, cc_scratch,
gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)));
else
else if (mode == DImode)
fn = gen_store_conditional_di;
+ /* Emit sync before stwcx. to address PPC405 Erratum. */
if (PPC405_ERRATUM77)
emit_insn (gen_memory_barrier ());
emit_insn (fn (res, mem, val));
}
+/* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
+ to perform. MEM is the memory on which to operate. VAL is the second
+ operand of the binary operator. BEFORE and AFTER are optional locations to
+ return the value of MEM either before of after the operation. SCRATCH is
+ a scratch register. */
+
+void
+rs6000_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
+ rtx before, rtx after, rtx scratch)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx label, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
+
+ emit_insn (gen_memory_barrier ());
+
+ label = gen_label_rtx ();
+ emit_label (label);
+ label = gen_rtx_LABEL_REF (VOIDmode, label);
+
+ if (before == NULL_RTX)
+ before = scratch;
+ emit_load_locked (mode, before, mem);
+
+ if (code == NOT)
+ x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
+ else if (code == AND)
+ x = gen_rtx_UNSPEC (mode, gen_rtvec (2, before, val), UNSPEC_AND);
+ else
+ x = gen_rtx_fmt_ee (code, mode, before, val);
+
+ if (after != NULL_RTX)
+ emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
+ emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
+
+ emit_store_conditional (mode, cond, mem, scratch);
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label);
+
+ emit_insn (gen_isync ());
+}
+
/* Expand an atomic compare and swap operation. MEM is the memory on which
to operate. OLDVAL is the old value to be compared. NEWVAL is the new
value to be stored. SCRATCH is a scratch GPR. */
emit_insn (TARGET_32BIT
? gen_addsi3 (breg, breg, delta_rtx)
: gen_adddi3 (breg, breg, delta_rtx));
- src = gen_rtx_MEM (mode, breg);
+ src = replace_equiv_address (src, breg);
}
else if (! offsettable_memref_p (src))
{
- rtx newsrc, basereg;
+ rtx basereg;
basereg = gen_rtx_REG (Pmode, reg);
emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
- newsrc = gen_rtx_MEM (GET_MODE (src), basereg);
- MEM_COPY_ATTRIBUTES (newsrc, src);
- src = newsrc;
+ src = replace_equiv_address (src, basereg);
}
breg = XEXP (src, 0);
emit_insn (TARGET_32BIT
? gen_addsi3 (breg, breg, delta_rtx)
: gen_adddi3 (breg, breg, delta_rtx));
- dst = gen_rtx_MEM (mode, breg);
+ dst = replace_equiv_address (dst, breg);
}
else
gcc_assert (offsettable_memref_p (dst));
else
{
gcc_assert (DEFAULT_ABI == ABI_AIX);
-
+
if (TARGET_32BIT)
insn = emit_insn (gen_load_toc_aix_si (dest));
else
|| current_function_calls_alloca
|| info->total_size > 32767)
{
- emit_move_insn (operands[1], gen_rtx_MEM (Pmode, frame_rtx));
+ tmp = gen_rtx_MEM (Pmode, frame_rtx);
+ MEM_NOTRAP_P (tmp) = 1;
+ set_mem_alias_set (tmp, rs6000_sr_alias_set);
+ emit_move_insn (operands[1], tmp);
frame_rtx = operands[1];
}
else if (info->push_p)
tmp = plus_constant (frame_rtx, info->lr_save_offset + sp_offset);
tmp = gen_rtx_MEM (Pmode, tmp);
+ MEM_NOTRAP_P (tmp) = 1;
+ set_mem_alias_set (tmp, rs6000_sr_alias_set);
emit_move_insn (tmp, operands[0]);
}
else
rtx tocompare = gen_reg_rtx (SImode);
rtx no_toc_save_needed = gen_label_rtx ();
- mem = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
+ mem = gen_frame_mem (Pmode, hard_frame_pointer_rtx);
emit_move_insn (stack_top, mem);
- mem = gen_rtx_MEM (Pmode,
- gen_rtx_PLUS (Pmode, stack_top,
- GEN_INT (2 * GET_MODE_SIZE (Pmode))));
+ mem = gen_frame_mem (Pmode,
+ gen_rtx_PLUS (Pmode, stack_top,
+ GEN_INT (2 * GET_MODE_SIZE (Pmode))));
emit_move_insn (opcode_addr, mem);
emit_move_insn (opcode, gen_rtx_MEM (SImode, opcode_addr));
emit_move_insn (tocompare, gen_int_mode (TARGET_32BIT ? 0x80410014
SImode, NULL_RTX, NULL_RTX,
no_toc_save_needed);
- mem = gen_rtx_MEM (Pmode,
- gen_rtx_PLUS (Pmode, stack_top,
- GEN_INT (5 * GET_MODE_SIZE (Pmode))));
+ mem = gen_frame_mem (Pmode,
+ gen_rtx_PLUS (Pmode, stack_top,
+ GEN_INT (5 * GET_MODE_SIZE (Pmode))));
emit_move_insn (mem, gen_rtx_REG (Pmode, 2));
emit_label (no_toc_save_needed);
}
vrsave,
gen_rtx_UNSPEC_VOLATILE (SImode,
gen_rtvec (2, reg, vrsave),
- 30));
+ UNSPECV_SET_VRSAVE));
nclobs = 1;
const char *name = buf;
const char *real_name;
rtx base = x;
- int offset = 0;
+ HOST_WIDE_INT offset = 0;
gcc_assert (!TARGET_NO_TOC);
fprintf (file, "\t.tc %s", real_name);
if (offset < 0)
- fprintf (file, ".N%d", - offset);
+ fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
else if (offset)
- fprintf (file, ".P%d", offset);
+ fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
fputs ("[TC],", file);
}
{
RS6000_OUTPUT_BASENAME (file, name);
if (offset < 0)
- fprintf (file, "%d", offset);
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
else if (offset > 0)
- fprintf (file, "+%d", offset);
+ fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
}
else
output_addr_const (file, x);
static rtx
get_next_active_insn (rtx insn, rtx tail)
{
- rtx next_insn;
-
- if (!insn || insn == tail)
+ if (insn == NULL_RTX || insn == tail)
return NULL_RTX;
- next_insn = NEXT_INSN (insn);
-
- while (next_insn
- && next_insn != tail
- && (GET_CODE (next_insn) == NOTE
- || GET_CODE (PATTERN (next_insn)) == USE
- || GET_CODE (PATTERN (next_insn)) == CLOBBER))
+ while (1)
{
- next_insn = NEXT_INSN (next_insn);
- }
-
- if (!next_insn || next_insn == tail)
- return NULL_RTX;
+ insn = NEXT_INSN (insn);
+ if (insn == NULL_RTX || insn == tail)
+ return NULL_RTX;
- return next_insn;
+ if (CALL_P (insn)
+ || JUMP_P (insn)
+ || (NONJUMP_INSN_P (insn)
+ && GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER
+ && INSN_CODE (insn) != CODE_FOR_stack_tie))
+ break;
+ }
+ return insn;
}
/* Return whether the presence of INSN causes a dispatch group termination
return orig;
gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
-
+
/* Use a different reg for the intermediate value, as
it will be marked UNCHANGING. */
reg_temp = no_new_pseudos ? reg : gen_reg_rtx (Pmode);
offset =
rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
Pmode, reg);
-
+
if (GET_CODE (offset) == CONST_INT)
{
if (SMALL_INT (offset))
&& (CONST_OK_FOR_LETTER_P (INTVAL (x), 'K')
|| (CONST_OK_FOR_LETTER_P (INTVAL (x),
mode == SImode ? 'L' : 'J'))
- || mask_operand (x, VOIDmode)))
+ || mask_operand (x, mode)
+ || (mode == DImode
+ && mask64_operand (x, DImode))))
|| ((outer_code == IOR || outer_code == XOR)
&& (CONST_OK_FOR_LETTER_P (INTVAL (x), 'K')
|| (CONST_OK_FOR_LETTER_P (INTVAL (x),
&& ((outer_code == AND
&& (CONST_OK_FOR_LETTER_P (INTVAL (x), 'K')
|| CONST_OK_FOR_LETTER_P (INTVAL (x), 'L')
- || mask_operand (x, DImode)))
+ || mask_operand (x, DImode)
+ || mask64_operand (x, DImode)))
|| ((outer_code == IOR || outer_code == XOR)
&& CONST_DOUBLE_HIGH (x) == 0
&& (CONST_DOUBLE_LOW (x)
else
*total = rs6000_cost->fp;
}
- else if (GET_CODE (XEXP (x, 0)) == MULT)
- {
- /* The rs6000 doesn't have shift-and-add instructions. */
- rs6000_rtx_costs (XEXP (x, 0), MULT, PLUS, total);
- *total += COSTS_N_INSNS (1);
- }
else
*total = COSTS_N_INSNS (1);
return false;
else
*total = rs6000_cost->fp;
}
- else if (GET_CODE (XEXP (x, 0)) == MULT)
- {
- /* The rs6000 doesn't have shift-and-sub instructions. */
- rs6000_rtx_costs (XEXP (x, 0), MULT, MINUS, total);
- *total += COSTS_N_INSNS (1);
- }
else
*total = COSTS_N_INSNS (1);
return false;
}
/* FALLTHRU */
- case FLOAT_EXTEND:
- if (mode == DFmode)
- *total = 0;
- else
- *total = rs6000_cost->fp;
- return false;
-
case FLOAT:
case UNSIGNED_FLOAT:
case FIX:
*total = rs6000_cost->fp;
return false;
+ case FLOAT_EXTEND:
+ if (mode == DFmode)
+ *total = 0;
+ else
+ *total = rs6000_cost->fp;
+ return false;
+
case UNSPEC:
switch (XINT (x, 1))
{
GP_ARG_RETURN + 1),
GEN_INT (4))));
}
-
+ if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
+ {
+ return gen_rtx_PARALLEL (DCmode,
+ gen_rtvec (4,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode, GP_ARG_RETURN),
+ const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 1),
+ GEN_INT (4)),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 2),
+ GEN_INT (8)),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 3),
+ GEN_INT (12))));
+ }
if ((INTEGRAL_TYPE_P (valtype)
&& TYPE_PRECISION (valtype) < BITS_PER_WORD)
|| POINTER_TYPE_P (valtype))
return false;
}
-/* Target hook for invalid_arg_for_unprototyped_fn. */
-static const char *
+/* Target hook for invalid_arg_for_unprototyped_fn. */
+static const char *
invalid_arg_for_unprototyped_fn (tree typelist, tree funcdecl, tree val)
{
return (!rs6000_darwin64_abi