{ (const char *)0, "-mtune=", 1, 0 },
};
+static GTY(()) bool rs6000_cell_dont_microcode;
+
/* Always emit branch hint bits. */
static GTY(()) bool rs6000_always_hint;
COSTS_N_INSNS (21), /* ddiv */
};
+/* Instruction costs on Cell processor. */
+/* COSTS_N_INSNS (1) ~ one add. */
+static const
+struct processor_costs ppccell_cost = {
+ COSTS_N_INSNS (9/2)+2, /* mulsi */
+ COSTS_N_INSNS (6/2), /* mulsi_const */
+ COSTS_N_INSNS (6/2), /* mulsi_const9 */
+ COSTS_N_INSNS (15/2)+2, /* muldi */
+ COSTS_N_INSNS (38/2), /* divsi */
+ COSTS_N_INSNS (70/2), /* divdi */
+ COSTS_N_INSNS (10/2), /* fp */
+ COSTS_N_INSNS (10/2), /* dmul */
+ COSTS_N_INSNS (74/2), /* sdiv */
+ COSTS_N_INSNS (74/2), /* ddiv */
+};
+
/* Instruction costs on PPC750 and PPC7400 processors. */
static const
struct processor_costs ppc750_cost = {
static int rs6000_adjust_cost (rtx, rtx, rtx, int);
static void rs6000_sched_init (FILE *, int, int);
static bool is_microcoded_insn (rtx);
+static bool is_nonpipeline_insn (rtx);
static bool is_cracked_insn (rtx);
static bool is_branch_slot_insn (rtx);
static bool is_load_insn (rtx);
static int rs6000_sched_reorder (FILE *, int, rtx *, int *, int);
static int rs6000_sched_reorder2 (FILE *, int, rtx *, int *, int);
static int rs6000_use_sched_lookahead (void);
+static int rs6000_use_sched_lookahead_guard (rtx);
static tree rs6000_builtin_mask_for_load (void);
+static tree rs6000_builtin_mul_widen_even (tree);
+static tree rs6000_builtin_mul_widen_odd (tree);
static void def_builtin (int, const char *, tree, int);
static void rs6000_init_builtins (void);
#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
+#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
+#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
+
#undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
#define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
+#undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN
+#define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN rs6000_builtin_mul_widen_even
+#undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD
+#define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD rs6000_builtin_mul_widen_odd
#undef TARGET_INIT_BUILTINS
#define TARGET_INIT_BUILTINS rs6000_init_builtins
{"860", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
{"970", PROCESSOR_POWER4,
POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
+ {"cell", PROCESSOR_CELL,
+ POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
{"common", PROCESSOR_COMMON, MASK_NEW_MNEMONICS},
{"ec603e", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
{"G3", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
&& rs6000_cpu != PROCESSOR_POWER5
- && rs6000_cpu != PROCESSOR_POWER6);
+ && rs6000_cpu != PROCESSOR_POWER6
+ && rs6000_cpu != PROCESSOR_CELL);
rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
|| rs6000_cpu == PROCESSOR_POWER5);
rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
/* Set branch target alignment, if not optimizing for size. */
if (!optimize_size)
{
+ /* Cell wants to be aligned 8byte for dual issue. */
+ if (rs6000_cpu == PROCESSOR_CELL)
+ {
+ if (align_functions <= 0)
+ align_functions = 8;
+ if (align_jumps <= 0)
+ align_jumps = 8;
+ if (align_loops <= 0)
+ align_loops = 8;
+ }
if (rs6000_align_branch_targets)
{
if (align_functions <= 0)
rs6000_cost = &ppc630_cost;
break;
+ case PROCESSOR_CELL:
+ rs6000_cost = &ppccell_cost;
+ break;
+
case PROCESSOR_PPC750:
case PROCESSOR_PPC7400:
rs6000_cost = &ppc750_cost;
return 0;
}
+/* Implement targetm.vectorize.builtin_mul_widen_even. */
+static tree
+rs6000_builtin_mul_widen_even (tree type)
+{
+ if (!TARGET_ALTIVEC)
+ return NULL_TREE;
+
+ switch (TYPE_MODE (type))
+ {
+ case V8HImode:
+ return TYPE_UNSIGNED (type) ?
+ rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULEUH] :
+ rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULESH];
+
+ case V16QImode:
+ return TYPE_UNSIGNED (type) ?
+ rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULEUB] :
+ rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULESB];
+ default:
+ return NULL_TREE;
+ }
+}
+
+/* Implement targetm.vectorize.builtin_mul_widen_odd. */
+static tree
+rs6000_builtin_mul_widen_odd (tree type)
+{
+ if (!TARGET_ALTIVEC)
+ return NULL_TREE;
+
+ switch (TYPE_MODE (type))
+ {
+ case V8HImode:
+ return TYPE_UNSIGNED (type) ?
+ rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOUH] :
+ rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOSH];
+
+ case V16QImode:
+ return TYPE_UNSIGNED (type) ?
+ rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOUB] :
+ rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOSB];
+ default:
+ return NULL_TREE;
+ }
+}
+
/* Handle generic options of the form -mfoo=yes/no.
NAME is the option name.
VALUE is the option value.
rs6000_stack_t *info;
int restoring_FPRs_inline;
int using_load_multiple;
- int using_mfcr_multiple;
+ int using_mtcr_multiple;
int use_backchain_to_restore_sp;
int sp_offset = 0;
rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
use_backchain_to_restore_sp = (frame_pointer_needed
|| current_function_calls_alloca
|| info->total_size > 32767);
- using_mfcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
+ using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
|| rs6000_cpu == PROCESSOR_PPC603
|| rs6000_cpu == PROCESSOR_PPC750
|| optimize_size);
rtx r12_rtx = gen_rtx_REG (SImode, 12);
int count = 0;
- if (using_mfcr_multiple)
+ if (using_mtcr_multiple)
{
for (i = 0; i < 8; i++)
if (regs_ever_live[CR0_REGNO+i] && ! call_used_regs[CR0_REGNO+i])
gcc_assert (count);
}
- if (using_mfcr_multiple && count > 1)
+ if (using_mtcr_multiple && count > 1)
{
rtvec p;
int ndx;
return cached_can_issue_more;
}
+ /* If no reservation, but reach here */
+ if (recog_memoized (insn) < 0)
+ return more;
+
if (rs6000_sched_groups)
{
if (is_microcoded_insn (insn))
return cached_can_issue_more;
}
+ if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
+ return 0;
+
cached_can_issue_more = more - 1;
return cached_can_issue_more;
}
|| rs6000_cpu_attr == CPU_PPC7400
|| rs6000_cpu_attr == CPU_PPC7450
|| rs6000_cpu_attr == CPU_POWER4
- || rs6000_cpu_attr == CPU_POWER5)
+ || rs6000_cpu_attr == CPU_POWER5
+ || rs6000_cpu_attr == CPU_CELL)
&& recog_memoized (dep_insn)
&& (INSN_CODE (dep_insn) >= 0))
|| GET_CODE (PATTERN (insn)) == CLOBBER)
return false;
+ if (rs6000_cpu_attr == CPU_CELL)
+ return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
+
if (rs6000_sched_groups)
{
enum attr_type type = get_attr_type (insn);
return priority;
}
+/* Return true if the instruction is nonpipelined on the Cell. */
+static bool
+is_nonpipeline_insn (rtx insn)
+{
+ enum attr_type type;
+ if (!insn || !INSN_P (insn)
+ || GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ return false;
+
+ type = get_attr_type (insn);
+ if (type == TYPE_IMUL
+ || type == TYPE_IMUL2
+ || type == TYPE_IMUL3
+ || type == TYPE_LMUL
+ || type == TYPE_IDIV
+ || type == TYPE_LDIV
+ || type == TYPE_SDIV
+ || type == TYPE_DDIV
+ || type == TYPE_SSQRT
+ || type == TYPE_DSQRT
+ || type == TYPE_MFCR
+ || type == TYPE_MFCRF
+ || type == TYPE_MFJMPR)
+ {
+ return true;
+ }
+ return false;
+}
+
+
/* Return how many instructions the machine can issue per cycle. */
static int
case CPU_PPC750:
case CPU_PPC7400:
case CPU_PPC8540:
+ case CPU_CELL:
return 2;
case CPU_RIOS2:
case CPU_PPC604:
{
if (rs6000_cpu_attr == CPU_PPC8540)
return 4;
+ if (rs6000_cpu_attr == CPU_CELL)
+ return (reload_completed ? 8 : 0);
return 0;
}
+/* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
+static int
+rs6000_use_sched_lookahead_guard (rtx insn)
+{
+ if (rs6000_cpu_attr != CPU_CELL)
+ return 1;
+
+ if (insn == NULL_RTX || !INSN_P (insn))
+ abort ();
+
+ if (!reload_completed
+ || is_nonpipeline_insn (insn)
+ || is_microcoded_insn (insn))
+ return 0;
+
+ return 1;
+}
+
/* Determine is PAT refers to memory. */
static bool
int *pn_ready ATTRIBUTE_UNUSED,
int clock_var ATTRIBUTE_UNUSED)
{
+ int n_ready = *pn_ready;
+
if (sched_verbose)
fprintf (dump, "// rs6000_sched_reorder :\n");
+ /* Reorder the ready list, if the second to last ready insn
+ is a nonepipeline insn. */
+ if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
+ {
+ if (is_nonpipeline_insn (ready[n_ready - 1])
+ && (recog_memoized (ready[n_ready - 2]) > 0))
+ /* Simply swap first two insns. */
+ {
+ rtx tmp = ready[n_ready - 1];
+ ready[n_ready - 1] = ready[n_ready - 2];
+ ready[n_ready - 2] = tmp;
+ }
+ }
+
if (rs6000_cpu == PROCESSOR_POWER6)
load_store_pendulum = 0;