#define m_486 (1<<PROCESSOR_I486)
#define m_PENT (1<<PROCESSOR_PENTIUM)
#define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
+#define m_PENT4 (1<<PROCESSOR_PENTIUM4)
+#define m_NOCONA (1<<PROCESSOR_NOCONA)
+#define m_CORE2 (1<<PROCESSOR_CORE2)
+
#define m_GEODE (1<<PROCESSOR_GEODE)
-#define m_K6_GEODE (m_K6 | m_GEODE)
#define m_K6 (1<<PROCESSOR_K6)
-#define m_ATHLON (1<<PROCESSOR_ATHLON)
-#define m_PENT4 (1<<PROCESSOR_PENTIUM4)
+#define m_K6_GEODE (m_K6 | m_GEODE)
#define m_K8 (1<<PROCESSOR_K8)
+#define m_ATHLON (1<<PROCESSOR_ATHLON)
#define m_ATHLON_K8 (m_K8 | m_ATHLON)
#define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
-#define m_NOCONA (1<<PROCESSOR_NOCONA)
-#define m_CORE2 (1<<PROCESSOR_CORE2)
+#define m_ATHLON_K8_AMDFAM10 (m_K8 | m_ATHLON | m_AMDFAM10)
+
#define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
#define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
-#define m_GENERIC (m_GENERIC32 | m_GENERIC64)
-#define m_ATHLON_K8_AMDFAM10 (m_K8 | m_ATHLON | m_AMDFAM10)
/* Generic instruction choice should be common subset of supported CPUs
(PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
+#define m_GENERIC (m_GENERIC32 | m_GENERIC64)
/* Leave is not affecting Nocona SPEC2000 results negatively, so enabling for
Generic64 seems like good code size tradeoff. We can't enable it for 32bit
| m_CORE2 | m_GENERIC);
const int x86_use_mov0 = m_K6;
const int x86_use_cltd = ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC);
+/* Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
+const int x86_use_xchgb = m_PENT4;
const int x86_read_modify_write = ~m_PENT;
const int x86_read_modify = ~(m_PENT | m_PPRO);
const int x86_split_long_moves = m_PPRO;
/* Which cpu are we scheduling for. */
enum processor_type ix86_tune;
+int ix86_tune_mask;
+
/* Which instruction set architecture to use. */
enum processor_type ix86_arch;
+int ix86_arch_mask;
/* true if sse prefetch instruction is not NOOP. */
int x86_prefetch_sse;
if (i == pta_size)
error ("bad value (%s) for -mtune= switch", ix86_tune_string);
+ ix86_arch_mask = 1 << ix86_arch;
+ ix86_tune_mask = 1 << ix86_tune;
+
if (optimize_size)
ix86_cost = &size_cost;
else
/* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
since the insns won't need emulation. */
- if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
+ if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
target_flags &= ~MASK_NO_FANCY_MATH_387;
/* Likewise, if the target doesn't have a 387, or we've specified
if (!TARGET_80387)
target_flags &= ~MASK_FLOAT_RETURNS;
- if ((x86_accumulate_outgoing_args & TUNEMASK)
+ if ((x86_accumulate_outgoing_args & ix86_tune_mask)
&& !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
&& !optimize_size)
target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
func = decl;
else
{
- func = TREE_TYPE (TREE_OPERAND (exp, 0));
+ func = TREE_TYPE (CALL_EXPR_FN (exp));
if (POINTER_TYPE_P (func))
func = TREE_TYPE (func);
}
tree type;
/* We're looking at the CALL_EXPR, we need the type of the function. */
- type = TREE_OPERAND (exp, 0); /* pointer expression */
+ type = CALL_EXPR_FN (exp); /* pointer expression */
type = TREE_TYPE (type); /* pointer type */
type = TREE_TYPE (type); /* function type */
src_offset = REGNO (reg) * 8;
}
src_addr = fold_convert (addr_type, src_addr);
- src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
- size_int (src_offset)));
+ src_addr = fold_build2 (PLUS_EXPR, addr_type, src_addr,
+ size_int (src_offset));
src = build_va_arg_indirect_ref (src_addr);
dest_addr = fold_convert (addr_type, addr);
- dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
- size_int (INTVAL (XEXP (slot, 1)))));
+ dest_addr = fold_build2 (PLUS_EXPR, addr_type, dest_addr,
+ size_int (INTVAL (XEXP (slot, 1))));
dest = build_va_arg_indirect_ref (dest_addr);
t = build2 (GIMPLE_MODIFY_STMT, void_type_node, dest, src);
/* For XFmode constants, try to find a special 80387 instruction when
optimizing for size or on those CPUs that benefit from them. */
if (GET_MODE (x) == XFmode
- && (optimize_size || x86_ext_80387_constants & TUNEMASK))
+ && (optimize_size || x86_ext_80387_constants & ix86_tune_mask))
{
int i;
break;
case SYMBOL_REF:
- output_addr_const (file, x);
+ if (! TARGET_MACHO || TARGET_64BIT)
+ output_addr_const (file, x);
+ else
+ {
+ const char *name = XSTR (x, 0);
+
+ /* Mark the decl as referenced so that cgraph will output the function. */
+ if (SYMBOL_REF_DECL (x))
+ mark_decl_referenced (SYMBOL_REF_DECL (x));
+
+#if TARGET_MACHO
+ if (MACHOPIC_INDIRECT
+ && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
+ name = machopic_indirection_name (x, /*stub_p=*/true);
+#endif
+ assemble_name (file, name);
+ }
if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
fputs ("@PLT", file);
break;
return sc;
}
+/* Return mode for the memcpy/memset loop counter. Preffer SImode over DImode
+ for constant loop counts. */
+
+static enum machine_mode
+counter_mode (rtx count_exp)
+{
+ if (GET_MODE (count_exp) != VOIDmode)
+ return GET_MODE (count_exp);
+ if (GET_CODE (count_exp) != CONST_INT)
+ return Pmode;
+ if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
+ return DImode;
+ return SImode;
+}
+
/* When SRCPTR is non-NULL, output simple loop to move memory
pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
int expected_size)
{
rtx out_label, top_label, iter, tmp;
- enum machine_mode iter_mode;
+ enum machine_mode iter_mode = counter_mode (count);
rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
rtx size;
rtx y_addr;
int i;
- iter_mode = GET_MODE (count);
- if (iter_mode == VOIDmode)
- iter_mode = word_mode;
-
top_label = gen_label_rtx ();
out_label = gen_label_rtx ();
iter = gen_reg_rtx (iter_mode);
HOST_WIDE_INT countval = INTVAL (count);
int offset = 0;
- if ((countval & 0x16) && max_size > 16)
+ if ((countval & 0x10) && max_size > 16)
{
if (TARGET_64BIT)
{
emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
else
{
- emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
- emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 4);
+ emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
+ emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
}
offset += 8;
}
rtx count, int max_size)
{
count =
- expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
- count, 1, OPTAB_DIRECT);
+ expand_simple_binop (counter_mode (count), AND, count,
+ GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
gen_lowpart (QImode, value), count, QImode,
1, max_size / 2);
HOST_WIDE_INT countval = INTVAL (count);
int offset = 0;
- if ((countval & 0x16) && max_size > 16)
+ if ((countval & 0x10) && max_size > 16)
{
if (TARGET_64BIT)
{
gcc_assert (desired_align >= 1 && align >= 1);
/* Ensure that alignment prologue won't copy past end of block. */
- if ((size_needed > 1 || (desired_align > 1 && desired_align > align))
- && !count)
+ if (size_needed > 1 || (desired_align > 1 && desired_align > align))
{
epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
-
/* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
Make sure it is power of 2. */
epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
label = gen_label_rtx ();
emit_cmp_and_jump_insns (count_exp,
GEN_INT (epilogue_size_needed),
- LTU, 0, GET_MODE (count_exp), 1, label);
- if (expected_size == -1 || expected_size < epilogue_size_needed)
+ LTU, 0, counter_mode (count_exp), 1, label);
+ if (GET_CODE (count_exp) == CONST_INT)
+ ;
+ else if (expected_size == -1 || expected_size < epilogue_size_needed)
predict_jump (REG_BR_PROB_BASE * 60 / 100);
else
predict_jump (REG_BR_PROB_BASE * 20 / 100);
if (size_needed < epilogue_size_needed)
{
tmp =
- expand_simple_binop (GET_MODE (count_exp), AND, count_exp,
+ expand_simple_binop (counter_mode (count_exp), AND, count_exp,
GEN_INT (size_needed - 1), count_exp, 1,
OPTAB_DIRECT);
if (tmp != count_exp)
return 0;
gcc_assert (alg != no_stringop);
if (!count)
- count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
+ count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
switch (alg)
{
promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
desired_align, align);
/* Ensure that alignment prologue won't copy past end of block. */
- if ((size_needed > 1 || (desired_align > 1 && desired_align > align))
- && !count)
+ if (size_needed > 1 || (desired_align > 1 && desired_align > align))
{
epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
-
/* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
Make sure it is power of 2. */
epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
label = gen_label_rtx ();
emit_cmp_and_jump_insns (count_exp,
GEN_INT (epilogue_size_needed),
- LTU, 0, GET_MODE (count_exp), 1, label);
- if (expected_size == -1 || expected_size <= epilogue_size_needed)
+ LTU, 0, counter_mode (count_exp), 1, label);
+ if (GET_CODE (count_exp) == CONST_INT)
+ ;
+ else if (expected_size == -1 || expected_size <= epilogue_size_needed)
predict_jump (REG_BR_PROB_BASE * 60 / 100);
else
predict_jump (REG_BR_PROB_BASE * 20 / 100);
rtx hot_label = gen_label_rtx ();
jump_around_label = gen_label_rtx ();
emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
- LEU, 0, GET_MODE (count_exp), 1, hot_label);
+ LEU, 0, counter_mode (count_exp), 1, hot_label);
predict_jump (REG_BR_PROB_BASE * 90 / 100);
set_storage_via_libcall (dst, count_exp, val_exp, false);
emit_jump (jump_around_label);
if (size_needed < desired_align - align)
{
tmp =
- expand_simple_binop (GET_MODE (count_exp), AND, count_exp,
+ expand_simple_binop (counter_mode (count_exp), AND, count_exp,
GEN_INT (size_needed - 1), count_exp, 1,
OPTAB_DIRECT);
size_needed = desired_align - align + 1;
int
ix86_data_alignment (tree type, int align)
{
- int max_align = optimize_size ? BITS_PER_WORD : 256;
+ int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
if (AGGREGATE_TYPE_P (type)
&& TYPE_SIZE (type)
{ MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
/* SSE3 */
- { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
- { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
+ { MASK_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, 0, 0 },
+ { MASK_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, 0, 0 },
/* SSSE3 */
{ MASK_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, 0, 0 },
def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
void_ftype_unsigned_unsigned,
IX86_BUILTIN_MWAIT);
- def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
- v4sf_ftype_v4sf,
- IX86_BUILTIN_MOVSHDUP);
- def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
- v4sf_ftype_v4sf,
- IX86_BUILTIN_MOVSLDUP);
def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
/* Subroutine of ix86_expand_builtin to take care of binop insns. */
static rtx
-ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
+ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
{
rtx pat, xops[3];
- tree arg0 = TREE_VALUE (arglist);
- tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
rtx op0 = expand_normal (arg0);
rtx op1 = expand_normal (arg1);
enum machine_mode tmode = insn_data[icode].operand[0].mode;
/* Subroutine of ix86_expand_builtin to take care of stores. */
static rtx
-ix86_expand_store_builtin (enum insn_code icode, tree arglist)
+ix86_expand_store_builtin (enum insn_code icode, tree exp)
{
rtx pat;
- tree arg0 = TREE_VALUE (arglist);
- tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
rtx op0 = expand_normal (arg0);
rtx op1 = expand_normal (arg1);
enum machine_mode mode0 = insn_data[icode].operand[0].mode;
/* Subroutine of ix86_expand_builtin to take care of unop insns. */
static rtx
-ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
+ix86_expand_unop_builtin (enum insn_code icode, tree exp,
rtx target, int do_load)
{
rtx pat;
- tree arg0 = TREE_VALUE (arglist);
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
rtx op0 = expand_normal (arg0);
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
sqrtss, rsqrtss, rcpss. */
static rtx
-ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
+ix86_expand_unop1_builtin (enum insn_code icode, tree exp, rtx target)
{
rtx pat;
- tree arg0 = TREE_VALUE (arglist);
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
rtx op1, op0 = expand_normal (arg0);
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
/* Subroutine of ix86_expand_builtin to take care of comparison insns. */
static rtx
-ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
+ix86_expand_sse_compare (const struct builtin_description *d, tree exp,
rtx target)
{
rtx pat;
- tree arg0 = TREE_VALUE (arglist);
- tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
rtx op0 = expand_normal (arg0);
rtx op1 = expand_normal (arg1);
rtx op2;
/* Subroutine of ix86_expand_builtin to take care of comi insns. */
static rtx
-ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
+ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
rtx target)
{
rtx pat;
- tree arg0 = TREE_VALUE (arglist);
- tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
rtx op0 = expand_normal (arg0);
rtx op1 = expand_normal (arg1);
rtx op2;
these sorts of instructions. */
static rtx
-ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
+ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
{
enum machine_mode tmode = TYPE_MODE (type);
enum machine_mode inner_mode = GET_MODE_INNER (tmode);
rtvec v = rtvec_alloc (n_elt);
gcc_assert (VECTOR_MODE_P (tmode));
+ gcc_assert (call_expr_nargs (exp) == n_elt);
- for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
+ for (i = 0; i < n_elt; ++i)
{
- rtx x = expand_normal (TREE_VALUE (arglist));
+ rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
}
- gcc_assert (arglist == NULL);
-
if (!target || !register_operand (target, tmode))
target = gen_reg_rtx (tmode);
had a language-level syntax for referencing vector elements. */
static rtx
-ix86_expand_vec_ext_builtin (tree arglist, rtx target)
+ix86_expand_vec_ext_builtin (tree exp, rtx target)
{
enum machine_mode tmode, mode0;
tree arg0, arg1;
int elt;
rtx op0;
- arg0 = TREE_VALUE (arglist);
- arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
op0 = expand_normal (arg0);
elt = get_element_number (TREE_TYPE (arg0), arg1);
a language-level syntax for referencing vector elements. */
static rtx
-ix86_expand_vec_set_builtin (tree arglist)
+ix86_expand_vec_set_builtin (tree exp)
{
enum machine_mode tmode, mode1;
tree arg0, arg1, arg2;
int elt;
rtx op0, op1;
- arg0 = TREE_VALUE (arglist);
- arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
+ arg2 = CALL_EXPR_ARG (exp, 2);
tmode = TYPE_MODE (TREE_TYPE (arg0));
mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
const struct builtin_description *d;
size_t i;
enum insn_code icode;
- tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
- tree arglist = TREE_OPERAND (exp, 1);
+ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
tree arg0, arg1, arg2, arg3;
rtx op0, op1, op2, op3, pat;
enum machine_mode tmode, mode0, mode1, mode2, mode3, mode4;
? CODE_FOR_mmx_maskmovq
: CODE_FOR_sse2_maskmovdqu);
/* Note the arg order is different from the operand order. */
- arg1 = TREE_VALUE (arglist);
- arg2 = TREE_VALUE (TREE_CHAIN (arglist));
- arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ arg1 = CALL_EXPR_ARG (exp, 0);
+ arg2 = CALL_EXPR_ARG (exp, 1);
+ arg0 = CALL_EXPR_ARG (exp, 2);
op0 = expand_normal (arg0);
op1 = expand_normal (arg1);
op2 = expand_normal (arg2);
return 0;
case IX86_BUILTIN_SQRTSS:
- return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
+ return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, exp, target);
case IX86_BUILTIN_RSQRTSS:
- return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
+ return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, exp, target);
case IX86_BUILTIN_RCPSS:
- return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
+ return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, exp, target);
case IX86_BUILTIN_LOADUPS:
- return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
+ return ix86_expand_unop_builtin (CODE_FOR_sse_movups, exp, target, 1);
case IX86_BUILTIN_STOREUPS:
- return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
+ return ix86_expand_store_builtin (CODE_FOR_sse_movups, exp);
case IX86_BUILTIN_LOADHPS:
case IX86_BUILTIN_LOADLPS:
: fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
: fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
: CODE_FOR_sse2_loadlpd);
- arg0 = TREE_VALUE (arglist);
- arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
op0 = expand_normal (arg0);
op1 = expand_normal (arg1);
tmode = insn_data[icode].operand[0].mode;
case IX86_BUILTIN_STORELPS:
icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
: CODE_FOR_sse_storelps);
- arg0 = TREE_VALUE (arglist);
- arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
op0 = expand_normal (arg0);
op1 = expand_normal (arg1);
mode0 = insn_data[icode].operand[0].mode;
return const0_rtx;
case IX86_BUILTIN_MOVNTPS:
- return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
+ return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, exp);
case IX86_BUILTIN_MOVNTQ:
- return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
+ return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, exp);
case IX86_BUILTIN_LDMXCSR:
- op0 = expand_normal (TREE_VALUE (arglist));
+ op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
target = assign_386_stack_local (SImode, SLOT_TEMP);
emit_move_insn (target, op0);
emit_insn (gen_sse_ldmxcsr (target));
icode = (fcode == IX86_BUILTIN_SHUFPS
? CODE_FOR_sse_shufps
: CODE_FOR_sse2_shufpd);
- arg0 = TREE_VALUE (arglist);
- arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
+ arg2 = CALL_EXPR_ARG (exp, 2);
op0 = expand_normal (arg0);
op1 = expand_normal (arg1);
op2 = expand_normal (arg2);
: fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
: fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
: CODE_FOR_mmx_pshufw);
- arg0 = TREE_VALUE (arglist);
- arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
op0 = expand_normal (arg0);
op1 = expand_normal (arg1);
tmode = insn_data[icode].operand[0].mode;
case IX86_BUILTIN_PSRLDQI128:
icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
: CODE_FOR_sse2_lshrti3);
- arg0 = TREE_VALUE (arglist);
- arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
op0 = expand_normal (arg0);
op1 = expand_normal (arg1);
tmode = insn_data[icode].operand[0].mode;
return NULL_RTX;
case IX86_BUILTIN_PAVGUSB:
- return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, exp, target);
case IX86_BUILTIN_PF2ID:
- return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
+ return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, exp, target, 0);
case IX86_BUILTIN_PFACC:
- return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, exp, target);
case IX86_BUILTIN_PFADD:
- return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, exp, target);
case IX86_BUILTIN_PFCMPEQ:
- return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, exp, target);
case IX86_BUILTIN_PFCMPGE:
- return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, exp, target);
case IX86_BUILTIN_PFCMPGT:
- return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, exp, target);
case IX86_BUILTIN_PFMAX:
- return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, exp, target);
case IX86_BUILTIN_PFMIN:
- return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, exp, target);
case IX86_BUILTIN_PFMUL:
- return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, exp, target);
case IX86_BUILTIN_PFRCP:
- return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
+ return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, exp, target, 0);
case IX86_BUILTIN_PFRCPIT1:
- return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, exp, target);
case IX86_BUILTIN_PFRCPIT2:
- return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, exp, target);
case IX86_BUILTIN_PFRSQIT1:
- return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, exp, target);
case IX86_BUILTIN_PFRSQRT:
- return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
+ return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, exp, target, 0);
case IX86_BUILTIN_PFSUB:
- return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, exp, target);
case IX86_BUILTIN_PFSUBR:
- return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, exp, target);
case IX86_BUILTIN_PI2FD:
- return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
+ return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, exp, target, 0);
case IX86_BUILTIN_PMULHRW:
- return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, exp, target);
case IX86_BUILTIN_PF2IW:
- return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
+ return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, exp, target, 0);
case IX86_BUILTIN_PFNACC:
- return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, exp, target);
case IX86_BUILTIN_PFPNACC:
- return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, exp, target);
case IX86_BUILTIN_PI2FW:
- return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
+ return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, exp, target, 0);
case IX86_BUILTIN_PSWAPDSI:
- return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
+ return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, exp, target, 0);
case IX86_BUILTIN_PSWAPDSF:
- return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
+ return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, exp, target, 0);
case IX86_BUILTIN_SQRTSD:
- return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
+ return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, exp, target);
case IX86_BUILTIN_LOADUPD:
- return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
+ return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, exp, target, 1);
case IX86_BUILTIN_STOREUPD:
- return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
+ return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, exp);
case IX86_BUILTIN_MFENCE:
emit_insn (gen_sse2_mfence ());
return 0;
case IX86_BUILTIN_CLFLUSH:
- arg0 = TREE_VALUE (arglist);
+ arg0 = CALL_EXPR_ARG (exp, 0);
op0 = expand_normal (arg0);
icode = CODE_FOR_sse2_clflush;
if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
return 0;
case IX86_BUILTIN_MOVNTPD:
- return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
+ return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, exp);
case IX86_BUILTIN_MOVNTDQ:
- return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
+ return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, exp);
case IX86_BUILTIN_MOVNTI:
- return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
+ return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, exp);
case IX86_BUILTIN_LOADDQU:
- return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
+ return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, exp, target, 1);
case IX86_BUILTIN_STOREDQU:
- return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
+ return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, exp);
case IX86_BUILTIN_MONITOR:
- arg0 = TREE_VALUE (arglist);
- arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
+ arg2 = CALL_EXPR_ARG (exp, 2);
op0 = expand_normal (arg0);
op1 = expand_normal (arg1);
op2 = expand_normal (arg2);
return 0;
case IX86_BUILTIN_MWAIT:
- arg0 = TREE_VALUE (arglist);
- arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
op0 = expand_normal (arg0);
op1 = expand_normal (arg1);
if (!REG_P (op0))
return 0;
case IX86_BUILTIN_LDDQU:
- return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
+ return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, exp,
target, 1);
case IX86_BUILTIN_PALIGNR:
icode = CODE_FOR_ssse3_palignrti;
mode = V2DImode;
}
- arg0 = TREE_VALUE (arglist);
- arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
+ arg2 = CALL_EXPR_ARG (exp, 2);
op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
return target;
case IX86_BUILTIN_MOVNTSD:
- return ix86_expand_store_builtin (CODE_FOR_sse4a_vmmovntv2df, arglist);
+ return ix86_expand_store_builtin (CODE_FOR_sse4a_vmmovntv2df, exp);
case IX86_BUILTIN_MOVNTSS:
- return ix86_expand_store_builtin (CODE_FOR_sse4a_vmmovntv4sf, arglist);
+ return ix86_expand_store_builtin (CODE_FOR_sse4a_vmmovntv4sf, exp);
case IX86_BUILTIN_INSERTQ:
case IX86_BUILTIN_EXTRQ:
icode = (fcode == IX86_BUILTIN_EXTRQ
? CODE_FOR_sse4a_extrq
: CODE_FOR_sse4a_insertq);
- arg0 = TREE_VALUE (arglist);
- arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
op0 = expand_normal (arg0);
op1 = expand_normal (arg1);
tmode = insn_data[icode].operand[0].mode;
case IX86_BUILTIN_EXTRQI:
icode = CODE_FOR_sse4a_extrqi;
- arg0 = TREE_VALUE (arglist);
- arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
+ arg2 = CALL_EXPR_ARG (exp, 2);
op0 = expand_normal (arg0);
op1 = expand_normal (arg1);
op2 = expand_normal (arg2);
case IX86_BUILTIN_INSERTQI:
icode = CODE_FOR_sse4a_insertqi;
- arg0 = TREE_VALUE (arglist);
- arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
- arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
+ arg2 = CALL_EXPR_ARG (exp, 2);
+ arg3 = CALL_EXPR_ARG (exp, 3);
op0 = expand_normal (arg0);
op1 = expand_normal (arg1);
op2 = expand_normal (arg2);
case IX86_BUILTIN_VEC_INIT_V2SI:
case IX86_BUILTIN_VEC_INIT_V4HI:
case IX86_BUILTIN_VEC_INIT_V8QI:
- return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
+ return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
case IX86_BUILTIN_VEC_EXT_V2DF:
case IX86_BUILTIN_VEC_EXT_V2DI:
case IX86_BUILTIN_VEC_EXT_V8HI:
case IX86_BUILTIN_VEC_EXT_V2SI:
case IX86_BUILTIN_VEC_EXT_V4HI:
- return ix86_expand_vec_ext_builtin (arglist, target);
+ return ix86_expand_vec_ext_builtin (exp, target);
case IX86_BUILTIN_VEC_SET_V8HI:
case IX86_BUILTIN_VEC_SET_V4HI:
- return ix86_expand_vec_set_builtin (arglist);
+ return ix86_expand_vec_set_builtin (exp);
default:
break;
|| d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
|| d->icode == CODE_FOR_sse2_maskcmpv2df3
|| d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
- return ix86_expand_sse_compare (d, arglist, target);
+ return ix86_expand_sse_compare (d, exp, target);
- return ix86_expand_binop_builtin (d->icode, arglist, target);
+ return ix86_expand_binop_builtin (d->icode, exp, target);
}
for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
if (d->code == fcode)
- return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
+ return ix86_expand_unop_builtin (d->icode, exp, target, 0);
for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
if (d->code == fcode)
- return ix86_expand_sse_comi (d, arglist, target);
+ return ix86_expand_sse_comi (d, exp, target);
gcc_unreachable ();
}
/* If MODE2 is only appropriate for an SSE register, then tie with
any other mode acceptable to SSE registers. */
- if (GET_MODE_SIZE (mode2) >= 8
+ if (GET_MODE_SIZE (mode2) == 16
&& ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
- return ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1);
+ return (GET_MODE_SIZE (mode1) == 16
+ && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
- /* If MODE2 is appropriate for an MMX (or SSE) register, then tie
+ /* If MODE2 is appropriate for an MMX register, then tie
with any other mode acceptable to MMX registers. */
if (GET_MODE_SIZE (mode2) == 8
&& ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
- return ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1);
+ return (GET_MODE_SIZE (mode1) == 8
+ && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
return false;
}