+2011-05-27 Bernd Schmidt <bernds@codesourcery.com>
+
+ * sched-int.h (struct _haifa_deps_insn_data): New members cond
+ and reverse_cond.
+ (INSN_COND, INSN_REVERSE_COND): New macros.
+ * sched-deps.c (deps_analyze_insn): Call sched_get_condition_with_rev
+ once.
+ (sched_get_condition_with_rev): Cache the results, and look them up
+ if possible.
+ (sched_analyze_insn): Destroy INSN_COND of previous insns if they
+ are clobbered by the current insn.
+ * target.def (exposed_pipline): New sched data hook.
+ * doc/tm.texi.in: TARGET_SCHED_EXPOSED_PIPELINE: Add hook.
+ * doc/tm.texi: Regenerate.
+
2011-05-27 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
PR tree-optimization/49170
* tree-ssa-math-opts.c (execute_cse_sincos): Add checks for
sincos or cexp.
-
+
2011-05-27 Richard Guenther <rguenther@suse.de>
PR middle-end/49189
/* Find the condition under which INSN is executed. If REV is not NULL,
it is set to TRUE when the returned comparison should be reversed
- to get the actual condition. */
+ to get the actual condition.
+ We only do actual work the first time we come here for an insn; the
+ results are cached in INSN_COND and INSN_REVERSE_COND. */
static rtx
sched_get_condition_with_rev (const_rtx insn, bool *rev)
{
rtx pat = PATTERN (insn);
rtx src;
+ if (INSN_COND (insn) == const_true_rtx)
+ return NULL_RTX;
+
+ if (INSN_COND (insn) != NULL_RTX)
+ {
+ if (rev)
+ *rev = INSN_REVERSE_COND (insn);
+ return INSN_COND (insn);
+ }
+
+ INSN_COND (insn) = const_true_rtx;
+ INSN_REVERSE_COND (insn) = false;
if (pat == 0)
return 0;
*rev = false;
if (GET_CODE (pat) == COND_EXEC)
- return COND_EXEC_TEST (pat);
+ {
+ INSN_COND (insn) = COND_EXEC_TEST (pat);
+ return COND_EXEC_TEST (pat);
+ }
if (!any_condjump_p (insn) || !onlyjump_p (insn))
return 0;
src = SET_SRC (pc_set (insn));
if (XEXP (src, 2) == pc_rtx)
- return XEXP (src, 0);
+ {
+ INSN_COND (insn) = XEXP (src, 0);
+ return XEXP (src, 0);
+ }
else if (XEXP (src, 1) == pc_rtx)
{
rtx cond = XEXP (src, 0);
if (rev)
*rev = true;
+ INSN_COND (insn) = cond;
+ INSN_REVERSE_COND (insn) = true;
return cond;
}
}
else
{
+ regset_head set_or_clobbered;
+
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
}
}
+ if (targetm.sched.exposed_pipeline)
+ {
+ INIT_REG_SET (&set_or_clobbered);
+ bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
+ reg_pending_sets);
+ EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
+ {
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ rtx list;
+ for (list = reg_last->uses; list; list = XEXP (list, 1))
+ {
+ rtx other = XEXP (list, 0);
+ if (INSN_COND (other) != const_true_rtx
+ && refers_to_regno_p (i, i + 1, INSN_COND (other), NULL))
+ INSN_COND (other) = const_true_rtx;
+ }
+ }
+ }
+
/* If the current insn is conditional, we can't free any
of the lists. */
if (sched_has_condition_p (insn))
if (sched_deps_info->start_insn)
sched_deps_info->start_insn (insn);
+ /* Record the condition for this insn. */
+ if (NONDEBUG_INSN_P (insn))
+ sched_get_condition_with_rev (insn, NULL);
+
if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn))
{
/* Make each JUMP_INSN (but not a speculative check)
search in 'forw_deps'. */
deps_list_t resolved_forw_deps;
+ /* If the insn is conditional (either through COND_EXEC, or because
+ it is a conditional branch), this records the condition. NULL
+ for insns that haven't been seen yet or don't have a condition;
+ const_true_rtx to mark an insn without a condition, or with a
+ condition that has been clobbered by a subsequent insn. */
+ rtx cond;
+
+ /* True if the condition in 'cond' should be reversed to get the actual
+ condition. */
+ unsigned int reverse_cond : 1;
+
/* Some insns (e.g. call) are not allowed to move across blocks. */
unsigned int cant_move : 1;
};
#define INSN_RESOLVED_FORW_DEPS(INSN) (HDID (INSN)->resolved_forw_deps)
#define INSN_HARD_BACK_DEPS(INSN) (HDID (INSN)->hard_back_deps)
#define INSN_SPEC_BACK_DEPS(INSN) (HDID (INSN)->spec_back_deps)
+#define INSN_COND(INSN) (HDID (INSN)->cond)
+#define INSN_REVERSE_COND(INSN) (HDID (INSN)->reverse_cond)
#define CANT_MOVE(INSN) (HDID (INSN)->cant_move)
#define CANT_MOVE_BY_LUID(LUID) (VEC_index (haifa_deps_insn_data_def, h_d_i_d, \
LUID)->cant_move)