/* Expand the basic unary and binary arithmetic operations, for GNU compiler.
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
Free Software Foundation, Inc.
This file is part of GCC.
part to OP. */
result = gen_reg_rtx (mode);
- emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
+ emit_clobber (result);
emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
return result;
}
\f
-/* Return the optab used for computing the operation given by
- the tree code, CODE. This function is not always usable (for
- example, it cannot give complete results for multiplication
- or division) but probably ought to be relied on more widely
- throughout the expander. */
+/* Return the optab used for computing the operation given by the tree code,
+ CODE and the tree EXP. This function is not always usable (for example, it
+ cannot give complete results for multiplication or division) but probably
+ ought to be relied on more widely throughout the expander. */
optab
-optab_for_tree_code (enum tree_code code, const_tree type)
+optab_for_tree_code (enum tree_code code, const_tree type,
+ enum optab_subtype subtype)
{
bool trapv;
switch (code)
return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
case LSHIFT_EXPR:
+ if (VECTOR_MODE_P (TYPE_MODE (type)))
+ {
+ if (subtype == optab_vector)
+ return TYPE_SATURATING (type) ? NULL : vashl_optab;
+
+ gcc_assert (subtype == optab_scalar);
+ }
if (TYPE_SATURATING(type))
return TYPE_UNSIGNED(type) ? usashl_optab : ssashl_optab;
return ashl_optab;
case RSHIFT_EXPR:
+ if (VECTOR_MODE_P (TYPE_MODE (type)))
+ {
+ if (subtype == optab_vector)
+ return TYPE_UNSIGNED (type) ? vlshr_optab : vashr_optab;
+
+ gcc_assert (subtype == optab_scalar);
+ }
return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
case LROTATE_EXPR:
+ if (VECTOR_MODE_P (TYPE_MODE (type)))
+ {
+ if (subtype == optab_vector)
+ return vrotl_optab;
+
+ gcc_assert (subtype == optab_scalar);
+ }
return rotl_optab;
case RROTATE_EXPR:
+ if (VECTOR_MODE_P (TYPE_MODE (type)))
+ {
+ if (subtype == optab_vector)
+ return vrotr_optab;
+
+ gcc_assert (subtype == optab_scalar);
+ }
return rotr_optab;
case MAX_EXPR:
oprnd0 = TREE_OPERAND (exp, 0);
tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
widen_pattern_optab =
- optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
+ optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0), optab_default);
icode = (int) optab_handler (widen_pattern_optab, tmode0)->insn_code;
gcc_assert (icode != CODE_FOR_nothing);
xmode0 = insn_data[icode].operand[1].mode;
if (mode != VOIDmode
&& optimize
&& CONSTANT_P (x)
- && rtx_cost (x, binoptab->code) > COSTS_N_INSNS (1))
+ && rtx_cost (x, binoptab->code, optimize_insn_for_speed_p ())
+ > COSTS_N_INSNS (1))
{
if (GET_CODE (x) == CONST_INT)
{
enum optab_methods next_methods
= (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
? OPTAB_WIDEN : methods);
- enum mode_class class;
+ enum mode_class mclass;
enum machine_mode wider_mode;
rtx libfunc;
rtx temp;
rtx entry_last = get_last_insn ();
rtx last;
- class = GET_MODE_CLASS (mode);
+ mclass = GET_MODE_CLASS (mode);
/* If subtracting an integer constant, convert this into an addition of
the negated constant. */
&& optab_handler (rotr_optab, mode)->insn_code != CODE_FOR_nothing)
|| (binoptab == rotr_optab
&& optab_handler (rotl_optab, mode)->insn_code != CODE_FOR_nothing))
- && class == MODE_INT)
+ && mclass == MODE_INT)
{
optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
rtx newop1;
can open-code the operation. Check for a widening multiply at the
wider mode as well. */
- if (CLASS_HAS_WIDER_MODES_P (class)
+ if (CLASS_HAS_WIDER_MODES_P (mclass)
&& methods != OPTAB_DIRECT && methods != OPTAB_LIB)
for (wider_mode = GET_MODE_WIDER_MODE (mode);
wider_mode != VOIDmode;
|| binoptab == xor_optab
|| binoptab == add_optab || binoptab == sub_optab
|| binoptab == smul_optab || binoptab == ashl_optab)
- && class == MODE_INT)
+ && mclass == MODE_INT)
{
no_extend = 1;
xop0 = avoid_expensive_constant (mode, binoptab,
unsignedp, OPTAB_DIRECT);
if (temp)
{
- if (class != MODE_INT
+ if (mclass != MODE_INT
|| !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
GET_MODE_BITSIZE (wider_mode)))
{
/* These can be done a word at a time. */
if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
- && class == MODE_INT
+ && mclass == MODE_INT
&& GET_MODE_SIZE (mode) > UNITS_PER_WORD
&& optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
{
/* Synthesize double word shifts from single word shifts. */
if ((binoptab == lshr_optab || binoptab == ashl_optab
|| binoptab == ashr_optab)
- && class == MODE_INT
- && (GET_CODE (op1) == CONST_INT || !optimize_size)
+ && mclass == MODE_INT
+ && (GET_CODE (op1) == CONST_INT || optimize_insn_for_speed_p ())
&& GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
&& optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing
&& optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
/* Synthesize double word rotates from single word shifts. */
if ((binoptab == rotl_optab || binoptab == rotr_optab)
- && class == MODE_INT
+ && mclass == MODE_INT
&& GET_CODE (op1) == CONST_INT
&& GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
&& optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
/* These can be done a word at a time by propagating carries. */
if ((binoptab == add_optab || binoptab == sub_optab)
- && class == MODE_INT
+ && mclass == MODE_INT
&& GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
&& optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
{
/* Indicate for flow that the entire target reg is being set. */
if (REG_P (target))
- emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
+ emit_clobber (xtarget);
/* Do the actual arithmetic. */
for (i = 0; i < nwords; i++)
try using a signed widening multiply. */
if (binoptab == smul_optab
- && class == MODE_INT
+ && mclass == MODE_INT
&& GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
&& optab_handler (smul_optab, word_mode)->insn_code != CODE_FOR_nothing
&& optab_handler (add_optab, word_mode)->insn_code != CODE_FOR_nothing)
/* Look for a wider mode of the same class for which it appears we can do
the operation. */
- if (CLASS_HAS_WIDER_MODES_P (class))
+ if (CLASS_HAS_WIDER_MODES_P (mclass))
{
for (wider_mode = GET_MODE_WIDER_MODE (mode);
wider_mode != VOIDmode;
|| binoptab == xor_optab
|| binoptab == add_optab || binoptab == sub_optab
|| binoptab == smul_optab || binoptab == ashl_optab)
- && class == MODE_INT)
+ && mclass == MODE_INT)
no_extend = 1;
xop0 = widen_operand (xop0, wider_mode, mode,
unsignedp, methods);
if (temp)
{
- if (class != MODE_INT
+ if (mclass != MODE_INT
|| !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
GET_MODE_BITSIZE (wider_mode)))
{
int unsignedp)
{
enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
- enum mode_class class;
+ enum mode_class mclass;
enum machine_mode wider_mode;
rtx entry_last = get_last_insn ();
rtx last;
- class = GET_MODE_CLASS (mode);
+ mclass = GET_MODE_CLASS (mode);
if (!targ0)
targ0 = gen_reg_rtx (mode);
/* It can't be done in this mode. Can we do it in a wider mode? */
- if (CLASS_HAS_WIDER_MODES_P (class))
+ if (CLASS_HAS_WIDER_MODES_P (mclass))
{
for (wider_mode = GET_MODE_WIDER_MODE (mode);
wider_mode != VOIDmode;
int unsignedp)
{
enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
- enum mode_class class;
+ enum mode_class mclass;
enum machine_mode wider_mode;
rtx entry_last = get_last_insn ();
rtx last;
- class = GET_MODE_CLASS (mode);
+ mclass = GET_MODE_CLASS (mode);
if (!targ0)
targ0 = gen_reg_rtx (mode);
/* It can't be done in this mode. Can we do it in a wider mode? */
- if (CLASS_HAS_WIDER_MODES_P (class))
+ if (CLASS_HAS_WIDER_MODES_P (mclass))
{
for (wider_mode = GET_MODE_WIDER_MODE (mode);
wider_mode != VOIDmode;
static rtx
widen_clz (enum machine_mode mode, rtx op0, rtx target)
{
- enum mode_class class = GET_MODE_CLASS (mode);
- if (CLASS_HAS_WIDER_MODES_P (class))
+ enum mode_class mclass = GET_MODE_CLASS (mode);
+ if (CLASS_HAS_WIDER_MODES_P (mclass))
{
enum machine_mode wider_mode;
for (wider_mode = GET_MODE_WIDER_MODE (mode);
static rtx
widen_bswap (enum machine_mode mode, rtx op0, rtx target)
{
- enum mode_class class = GET_MODE_CLASS (mode);
+ enum mode_class mclass = GET_MODE_CLASS (mode);
enum machine_mode wider_mode;
rtx x, last;
- if (!CLASS_HAS_WIDER_MODES_P (class))
+ if (!CLASS_HAS_WIDER_MODES_P (mclass))
return NULL_RTX;
for (wider_mode = GET_MODE_WIDER_MODE (mode);
if (target == 0)
target = gen_reg_rtx (mode);
if (REG_P (target))
- emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
+ emit_clobber (target);
emit_move_insn (operand_subword (target, 0, 1, mode), t0);
emit_move_insn (operand_subword (target, 1, 1, mode), t1);
static rtx
expand_parity (enum machine_mode mode, rtx op0, rtx target)
{
- enum mode_class class = GET_MODE_CLASS (mode);
- if (CLASS_HAS_WIDER_MODES_P (class))
+ enum mode_class mclass = GET_MODE_CLASS (mode);
+ if (CLASS_HAS_WIDER_MODES_P (mclass))
{
enum machine_mode wider_mode;
for (wider_mode = mode; wider_mode != VOIDmode;
expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
int unsignedp)
{
- enum mode_class class = GET_MODE_CLASS (mode);
+ enum mode_class mclass = GET_MODE_CLASS (mode);
enum machine_mode wider_mode;
rtx temp;
rtx libfunc;
goto try_libcall;
}
- if (CLASS_HAS_WIDER_MODES_P (class))
+ if (CLASS_HAS_WIDER_MODES_P (mclass))
for (wider_mode = GET_MODE_WIDER_MODE (mode);
wider_mode != VOIDmode;
wider_mode = GET_MODE_WIDER_MODE (wider_mode))
xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
(unoptab == neg_optab
|| unoptab == one_cmpl_optab)
- && class == MODE_INT);
+ && mclass == MODE_INT);
temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
unsignedp);
if (temp)
{
- if (class != MODE_INT
+ if (mclass != MODE_INT
|| !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
GET_MODE_BITSIZE (wider_mode)))
{
/* These can be done a word at a time. */
if (unoptab == one_cmpl_optab
- && class == MODE_INT
+ && mclass == MODE_INT
&& GET_MODE_SIZE (mode) > UNITS_PER_WORD
&& optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
{
/* It can't be done in this mode. Can we do it in a wider mode? */
- if (CLASS_HAS_WIDER_MODES_P (class))
+ if (CLASS_HAS_WIDER_MODES_P (mclass))
{
for (wider_mode = GET_MODE_WIDER_MODE (mode);
wider_mode != VOIDmode;
xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
(unoptab == neg_optab
|| unoptab == one_cmpl_optab)
- && class == MODE_INT);
+ && mclass == MODE_INT);
temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
unsignedp);
if (temp)
{
- if (class != MODE_INT)
+ if (mclass != MODE_INT)
{
if (target == 0)
target = gen_reg_rtx (mode);
value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
where W is the width of MODE. */
- if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && BRANCH_COST (optimize_insn_for_speed_p (),
+ false) >= 2)
{
rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
size_int (GET_MODE_BITSIZE (mode) - 1),
with two operands: an output TARGET and an input OP0.
TARGET *must* be nonzero, and the output is always stored there.
CODE is an rtx code such that (CODE OP0) is an rtx that describes
- the value that is stored into TARGET. */
+ the value that is stored into TARGET.
-void
-emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
+ Return false if expansion failed. */
+
+bool
+maybe_emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
{
rtx temp;
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
rtx pat;
+ rtx last = get_last_insn ();
temp = target;
temp = gen_reg_rtx (GET_MODE (temp));
pat = GEN_FCN (icode) (temp, op0);
+ if (!pat)
+ {
+ delete_insns_since (last);
+ return false;
+ }
if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
add_equal_note (pat, temp, code, op0, NULL_RTX);
if (temp != target)
emit_move_insn (target, temp);
+ return true;
+}
+/* Generate an instruction whose insn-code is INSN_CODE,
+ with two operands: an output TARGET and an input OP0.
+ TARGET *must* be nonzero, and the output is always stored there.
+ CODE is an rtx code such that (CODE OP0) is an rtx that describes
+ the value that is stored into TARGET. */
+
+void
+emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
+{
+ bool ok = maybe_emit_unop_insn (icode, target, op0, code);
+ gcc_assert (ok);
}
\f
struct no_conflict_data
static void
no_conflict_move_test (rtx dest, const_rtx set, void *p0)
{
- struct no_conflict_data *p= p0;
+ struct no_conflict_data *p= (struct no_conflict_data *) p0;
/* If this inns directly contributes to setting the target, it must stay. */
if (reg_overlap_mentioned_p (p->target, dest))
p->must_stay = true;
}
-/* Encapsulate the block starting at FIRST and ending with LAST, which is
- logically equivalent to EQUIV, so it gets manipulated as a unit if it
- is possible to do so. */
-
-void
-maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
-{
- if (!flag_non_call_exceptions || !may_trap_p (equiv))
- {
- /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
- encapsulated region would not be in one basic block, i.e. when
- there is a control_flow_insn_p insn between FIRST and LAST. */
- bool attach_libcall_retval_notes = true;
- rtx insn, next = NEXT_INSN (last);
-
- for (insn = first; insn != next; insn = NEXT_INSN (insn))
- if (control_flow_insn_p (insn))
- {
- attach_libcall_retval_notes = false;
- break;
- }
-
- if (attach_libcall_retval_notes)
- {
- REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
- REG_NOTES (first));
- REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
- REG_NOTES (last));
- }
- }
-}
-
\f
/* Emit code to make a call to a constant function or a library call.
loading constants into registers; doing so allows them to be safely cse'ed
between blocks. Then we emit all the other insns in the block, followed by
an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
- note with an operand of EQUIV.
-
- Moving assignments to pseudos outside of the block is done to improve
- the generated code, but is not required to generate correct code,
- hence being unable to move an assignment is not grounds for not making
- a libcall block. There are two reasons why it is safe to leave these
- insns inside the block: First, we know that these pseudos cannot be
- used in generated RTL outside the block since they are created for
- temporary purposes within the block. Second, CSE will not record the
- values of anything set inside a libcall block, so we know they must
- be dead at the end of the block.
-
- Except for the first group of insns (the ones setting pseudos), the
- block is delimited by REG_RETVAL and REG_LIBCALL notes. */
+ note with an operand of EQUIV. */
+
void
emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
{
rtx final_dest = target;
- rtx prev, next, first, last, insn;
+ rtx prev, next, last, insn;
/* If this is a reg with REG_USERVAR_P set, then it could possibly turn
into a MEM later. Protect the libcall block from this change. */
if (note != 0)
XEXP (note, 0) = constm1_rtx;
else
- REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
- REG_NOTES (insn));
+ add_reg_note (insn, REG_EH_REGION, constm1_rtx);
}
/* First emit all insns that set pseudos. Remove them from the list as
for (insn = insns; insn; insn = next)
{
rtx set = single_set (insn);
- rtx note;
-
- /* Some ports (cris) create a libcall regions at their own. We must
- avoid any potential nesting of LIBCALLs. */
- if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
- remove_note (insn, note);
- if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
- remove_note (insn, note);
next = NEXT_INSN (insn);
if (optab_handler (mov_optab, GET_MODE (target))->insn_code
!= CODE_FOR_nothing)
set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
- else
- {
- /* Remove any existing REG_EQUAL note from "last", or else it will
- be mistaken for a note referring to the full contents of the
- libcall value when found together with the REG_RETVAL note added
- below. An existing note can come from an insn expansion at
- "last". */
- remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
- }
if (final_dest != target)
emit_move_insn (final_dest, target);
-
- if (prev == 0)
- first = get_insns ();
- else
- first = NEXT_INSN (prev);
-
- maybe_encapsulate_block (first, last, equiv);
}
\f
/* Nonzero if we can perform a comparison of mode MODE straightforwardly.
can_compare_p (enum rtx_code code, enum machine_mode mode,
enum can_compare_purpose purpose)
{
+ rtx test;
+ test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
do
{
+ int icode;
+
if (optab_handler (cmp_optab, mode)->insn_code != CODE_FOR_nothing)
{
if (purpose == ccp_jump)
return 1;
}
if (purpose == ccp_jump
- && optab_handler (cbranch_optab, mode)->insn_code != CODE_FOR_nothing)
- return 1;
+ && (icode = optab_handler (cbranch_optab, mode)->insn_code) != CODE_FOR_nothing
+ && insn_data[icode].operand[0].predicate (test, mode))
+ return 1;
+ if (purpose == ccp_store_flag
+ && (icode = optab_handler (cstore_optab, mode)->insn_code) != CODE_FOR_nothing
+ && insn_data[icode].operand[1].predicate (test, mode))
+ return 1;
if (purpose == ccp_cmov
&& optab_handler (cmov_optab, mode)->insn_code != CODE_FOR_nothing)
return 1;
- if (purpose == ccp_store_flag
- && optab_handler (cstore_optab, mode)->insn_code != CODE_FOR_nothing)
- return 1;
+
mode = GET_MODE_WIDER_MODE (mode);
+ PUT_MODE (test, mode);
}
while (mode != VOIDmode);
/* If we are inside an appropriately-short loop and we are optimizing,
force expensive constants into a register. */
if (CONSTANT_P (x) && optimize
- && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
+ && (rtx_cost (x, COMPARE, optimize_insn_for_speed_p ())
+ > COSTS_N_INSNS (1)))
x = force_reg (mode, x);
if (CONSTANT_P (y) && optimize
- && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
+ && (rtx_cost (y, COMPARE, optimize_insn_for_speed_p ())
+ > COSTS_N_INSNS (1)))
y = force_reg (mode, y);
#ifdef HAVE_cc0
*px = x;
*py = y;
- if (can_compare_p (*pcomparison, mode, purpose))
+ if (GET_MODE_CLASS (mode) == MODE_CC)
+ {
+ gcc_assert (can_compare_p (*pcomparison, CCmode, purpose));
+ return;
+ }
+ else if (can_compare_p (*pcomparison, mode, purpose))
return;
/* Handle a lib call just for the mode we are using. */
-
libfunc = optab_libfunc (cmp_optab, mode);
if (libfunc && !SCALAR_FLOAT_MODE_P (mode))
{
enum rtx_code comparison, int unsignedp, rtx label)
{
rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
- enum mode_class class = GET_MODE_CLASS (mode);
+ enum mode_class mclass = GET_MODE_CLASS (mode);
enum machine_mode wider_mode = mode;
/* Try combined insns first. */
do
{
+ enum machine_mode optab_mode = mclass == MODE_CC ? CCmode : wider_mode;
enum insn_code icode;
PUT_MODE (test, wider_mode);
if (label)
{
- icode = optab_handler (cbranch_optab, wider_mode)->insn_code;
+ icode = optab_handler (cbranch_optab, optab_mode)->insn_code;
if (icode != CODE_FOR_nothing
&& insn_data[icode].operand[0].predicate (test, wider_mode))
}
/* Handle some compares against zero. */
- icode = (int) optab_handler (tst_optab, wider_mode)->insn_code;
+ icode = (int) optab_handler (tst_optab, optab_mode)->insn_code;
if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
{
x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
/* Handle compares for which there is a directly suitable insn. */
- icode = (int) optab_handler (cmp_optab, wider_mode)->insn_code;
+ icode = (int) optab_handler (cmp_optab, optab_mode)->insn_code;
if (icode != CODE_FOR_nothing)
{
x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
return;
}
- if (!CLASS_HAS_WIDER_MODES_P (class))
+ if (!CLASS_HAS_WIDER_MODES_P (mclass))
break;
wider_mode = GET_MODE_WIDER_MODE (wider_mode);
if (icode != CODE_FOR_nothing)
{
+ rtx last = get_last_insn ();
if (fmode != GET_MODE (from))
from = convert_to_mode (fmode, from, 0);
if (imode != GET_MODE (to))
target = gen_reg_rtx (imode);
- emit_unop_insn (icode, target, from,
- doing_unsigned ? UNSIGNED_FIX : FIX);
- if (target != to)
- convert_move (to, target, unsignedp);
- return;
+ if (maybe_emit_unop_insn (icode, target, from,
+ doing_unsigned ? UNSIGNED_FIX : FIX))
+ {
+ if (target != to)
+ convert_move (to, target, unsignedp);
+ return;
+ }
+ delete_insns_since (last);
}
}
icode = convert_optab_handler (tab, imode, fmode)->insn_code;
if (icode != CODE_FOR_nothing)
{
+ rtx last = get_last_insn ();
if (fmode != GET_MODE (from))
from = convert_to_mode (fmode, from, 0);
if (imode != GET_MODE (to))
target = gen_reg_rtx (imode);
- emit_unop_insn (icode, target, from, UNKNOWN);
+ if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
+ {
+ delete_insns_since (last);
+ continue;
+ }
if (target != to)
convert_move (to, target, 0);
return true;
unsigned opname_len = strlen (opname);
const char *mname = GET_MODE_NAME (mode);
unsigned mname_len = strlen (mname);
- char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
+ char *libfunc_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
char *p;
const char *q;
gen_libfunc (optable, opname, suffix, mode);
if (DECIMAL_FLOAT_MODE_P (mode))
{
- dec_opname = alloca (sizeof (DECIMAL_PREFIX) + strlen (opname));
+ dec_opname = XALLOCAVEC (char, sizeof (DECIMAL_PREFIX) + strlen (opname));
/* For BID support, change the name to have either a bid_ or dpd_ prefix
depending on the low level floating format used. */
memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
if (GET_MODE_CLASS (mode) == MODE_INT)
{
int len = strlen (name);
- char *v_name = alloca (len + 2);
+ char *v_name = XALLOCAVEC (char, len + 2);
strcpy (v_name, name);
v_name[len] = 'v';
v_name[len + 1] = 0;
mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
- nondec_name = alloca (2 + opname_len + mname_len + 1 + 1);
+ nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
nondec_name[0] = '_';
nondec_name[1] = '_';
memcpy (&nondec_name[2], opname, opname_len);
nondec_suffix = nondec_name + opname_len + 2;
- dec_name = alloca (2 + dec_len + opname_len + mname_len + 1 + 1);
+ dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
dec_name[0] = '_';
dec_name[1] = '_';
memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
}
-/* Initialize the libfunc fiels of an of an intra-mode-class conversion optab.
+/* Initialize the libfunc fields of an of an intra-mode-class conversion optab.
The string formation rules are
similar to the ones for init_libfunc, above. */
mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
- nondec_name = alloca (2 + opname_len + mname_len + 1 + 1);
+ nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
nondec_name[0] = '_';
nondec_name[1] = '_';
memcpy (&nondec_name[2], opname, opname_len);
nondec_suffix = nondec_name + opname_len + 2;
- dec_name = alloca (2 + dec_len + opname_len + mname_len + 1 + 1);
+ dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
dec_name[0] = '_';
dec_name[1] = '_';
memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
}
-rtx
-init_one_libfunc (const char *name)
+/* A table of previously-created libfuncs, hashed by name. */
+static GTY ((param_is (union tree_node))) htab_t libfunc_decls;
+
+/* Hashtable callbacks for libfunc_decls. */
+
+static hashval_t
+libfunc_decl_hash (const void *entry)
{
- rtx symbol;
+ return htab_hash_string (IDENTIFIER_POINTER (DECL_NAME ((const_tree) entry)));
+}
- /* Create a FUNCTION_DECL that can be passed to
- targetm.encode_section_info. */
- /* ??? We don't have any type information except for this is
- a function. Pretend this is "int foo()". */
- tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
- build_function_type (integer_type_node, NULL_TREE));
- DECL_ARTIFICIAL (decl) = 1;
- DECL_EXTERNAL (decl) = 1;
- TREE_PUBLIC (decl) = 1;
+static int
+libfunc_decl_eq (const void *entry1, const void *entry2)
+{
+ return DECL_NAME ((const_tree) entry1) == (const_tree) entry2;
+}
- symbol = XEXP (DECL_RTL (decl), 0);
+rtx
+init_one_libfunc (const char *name)
+{
+ tree id, decl;
+ void **slot;
+ hashval_t hash;
+
+ if (libfunc_decls == NULL)
+ libfunc_decls = htab_create_ggc (37, libfunc_decl_hash,
+ libfunc_decl_eq, NULL);
+
+ /* See if we have already created a libfunc decl for this function. */
+ id = get_identifier (name);
+ hash = htab_hash_string (name);
+ slot = htab_find_slot_with_hash (libfunc_decls, id, hash, INSERT);
+ decl = (tree) *slot;
+ if (decl == NULL)
+ {
+ /* Create a new decl, so that it can be passed to
+ targetm.encode_section_info. */
+ /* ??? We don't have any type information except for this is
+ a function. Pretend this is "int foo()". */
+ decl = build_decl (FUNCTION_DECL, get_identifier (name),
+ build_function_type (integer_type_node, NULL_TREE));
+ DECL_ARTIFICIAL (decl) = 1;
+ DECL_EXTERNAL (decl) = 1;
+ TREE_PUBLIC (decl) = 1;
+
+ /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
+ are the flags assigned by targetm.encode_section_info. */
+ SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
+
+ *slot = decl;
+ }
+ return XEXP (DECL_RTL (decl), 0);
+}
- /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
- are the flags assigned by targetm.encode_section_info. */
- SET_SYMBOL_REF_DECL (symbol, 0);
+/* Adjust the assembler name of libfunc NAME to ASMSPEC. */
- return symbol;
+rtx
+set_user_assembler_libfunc (const char *name, const char *asmspec)
+{
+ tree id, decl;
+ void **slot;
+ hashval_t hash;
+
+ id = get_identifier (name);
+ hash = htab_hash_string (name);
+ slot = htab_find_slot_with_hash (libfunc_decls, id, hash, NO_INSERT);
+ gcc_assert (slot);
+ decl = (tree) *slot;
+ set_user_assembler_name (decl, asmspec);
+ return XEXP (DECL_RTL (decl), 0);
}
/* Call this to reset the function entry for one optab (OPTABLE) in mode
val = 0;
slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
if (*slot == NULL)
- *slot = ggc_alloc (sizeof (struct libfunc_entry));
+ *slot = GGC_NEW (struct libfunc_entry);
(*slot)->optab = (size_t) (optable - &optab_table[0]);
(*slot)->mode1 = mode;
(*slot)->mode2 = VOIDmode;
val = 0;
slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
if (*slot == NULL)
- *slot = ggc_alloc (sizeof (struct libfunc_entry));
+ *slot = GGC_NEW (struct libfunc_entry);
(*slot)->optab = (size_t) (optable - &convert_optab_table[0]);
(*slot)->mode1 = tmode;
(*slot)->mode2 = fmode;
sync_new_xor_optab[i] = CODE_FOR_nothing;
sync_new_nand_optab[i] = CODE_FOR_nothing;
sync_compare_and_swap[i] = CODE_FOR_nothing;
- sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
sync_lock_test_and_set[i] = CODE_FOR_nothing;
sync_lock_release[i] = CODE_FOR_nothing;
rtx l;
o = &optab_table[i];
- l = optab_libfunc (o, j);
+ l = optab_libfunc (o, (enum machine_mode) j);
if (l)
{
gcc_assert (GET_CODE (l) == SYMBOL_REF);
rtx l;
o = &convert_optab_table[i];
- l = convert_optab_libfunc (o, j, k);
+ l = convert_optab_libfunc (o, (enum machine_mode) j,
+ (enum machine_mode) k);
if (l)
{
gcc_assert (GET_CODE (l) == SYMBOL_REF);
return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
}
+/* Helper function to find the MODE_CC set in a sync_compare_and_swap
+ pattern. */
+
+static void
+find_cc_set (rtx x, const_rtx pat, void *data)
+{
+ if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
+ && GET_CODE (pat) == SET)
+ {
+ rtx *p_cc_reg = (rtx *) data;
+ gcc_assert (!*p_cc_reg);
+ *p_cc_reg = x;
+ }
+}
+
/* Expand a compare-and-swap operation and store true into the result if
the operation was successful and false otherwise. Return the result.
Unlike other routines, TARGET is not optional. */
{
enum machine_mode mode = GET_MODE (mem);
enum insn_code icode;
- rtx subtarget, label0, label1;
+ rtx subtarget, seq, cc_reg;
/* If the target supports a compare-and-swap pattern that simultaneously
sets some flag for success, then use it. Otherwise use the regular
compare-and-swap and follow that immediately with a compare insn. */
- icode = sync_compare_and_swap_cc[mode];
- switch (icode)
- {
- default:
- subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
- NULL_RTX, icode);
- if (subtarget != NULL_RTX)
- break;
-
- /* FALLTHRU */
- case CODE_FOR_nothing:
- icode = sync_compare_and_swap[mode];
- if (icode == CODE_FOR_nothing)
- return NULL_RTX;
-
- /* Ensure that if old_val == mem, that we're not comparing
- against an old value. */
- if (MEM_P (old_val))
- old_val = force_reg (mode, old_val);
+ icode = sync_compare_and_swap[mode];
+ if (icode == CODE_FOR_nothing)
+ return NULL_RTX;
+ do
+ {
+ start_sequence ();
subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
- NULL_RTX, icode);
+ NULL_RTX, icode);
+ cc_reg = NULL_RTX;
if (subtarget == NULL_RTX)
- return NULL_RTX;
-
- emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
- }
-
- /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
- setcc instruction from the beginning. We don't work too hard here,
- but it's nice to not be stupid about initial code gen either. */
- if (STORE_FLAG_VALUE == 1)
- {
- icode = setcc_gen_code[EQ];
- if (icode != CODE_FOR_nothing)
{
- enum machine_mode cmode = insn_data[icode].operand[0].mode;
- rtx insn;
-
- subtarget = target;
- if (!insn_data[icode].operand[0].predicate (target, cmode))
- subtarget = gen_reg_rtx (cmode);
-
- insn = GEN_FCN (icode) (subtarget);
- if (insn)
- {
- emit_insn (insn);
- if (GET_MODE (target) != GET_MODE (subtarget))
- {
- convert_move (target, subtarget, 1);
- subtarget = target;
- }
- return subtarget;
- }
+ end_sequence ();
+ return NULL_RTX;
}
- }
-
- /* Without an appropriate setcc instruction, use a set of branches to
- get 1 and 0 stored into target. Presumably if the target has a
- STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
- label0 = gen_label_rtx ();
- label1 = gen_label_rtx ();
+ if (have_insn_for (COMPARE, CCmode))
+ note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
+ seq = get_insns ();
+ end_sequence ();
- emit_jump_insn (bcc_gen_fctn[EQ] (label0));
- emit_move_insn (target, const0_rtx);
- emit_jump_insn (gen_jump (label1));
- emit_barrier ();
- emit_label (label0);
- emit_move_insn (target, const1_rtx);
- emit_label (label1);
+ /* We might be comparing against an old value. Try again. :-( */
+ if (!cc_reg && MEM_P (old_val))
+ {
+ seq = NULL_RTX;
+ old_val = force_reg (mode, old_val);
+ }
+ }
+ while (!seq);
- return target;
+ emit_insn (seq);
+ if (cc_reg)
+ return emit_store_flag (target, EQ, cc_reg, const0_rtx, VOIDmode, 0, 1);
+ else
+ return emit_store_flag (target, EQ, subtarget, old_val, VOIDmode, 1, 1);
}
/* This is a helper function for the other atomic operations. This function
{
enum machine_mode mode = GET_MODE (mem);
enum insn_code icode;
- rtx label, cmp_reg, subtarget;
+ rtx label, cmp_reg, subtarget, cc_reg;
/* The loop we want to generate looks like
/* If the target supports a compare-and-swap pattern that simultaneously
sets some flag for success, then use it. Otherwise use the regular
compare-and-swap and follow that immediately with a compare insn. */
- icode = sync_compare_and_swap_cc[mode];
- switch (icode)
- {
- default:
- subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
- cmp_reg, icode);
- if (subtarget != NULL_RTX)
- {
- gcc_assert (subtarget == cmp_reg);
- break;
- }
+ icode = sync_compare_and_swap[mode];
+ if (icode == CODE_FOR_nothing)
+ return false;
- /* FALLTHRU */
- case CODE_FOR_nothing:
- icode = sync_compare_and_swap[mode];
- if (icode == CODE_FOR_nothing)
- return false;
+ subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
+ cmp_reg, icode);
+ if (subtarget == NULL_RTX)
+ return false;
- subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
- cmp_reg, icode);
- if (subtarget == NULL_RTX)
- return false;
+ cc_reg = NULL_RTX;
+ if (have_insn_for (COMPARE, CCmode))
+ note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
+ if (cc_reg)
+ {
+ cmp_reg = cc_reg;
+ old_reg = const0_rtx;
+ }
+ else
+ {
if (subtarget != cmp_reg)
emit_move_insn (cmp_reg, subtarget);
-
- emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
}
/* ??? Mark this jump predicted not taken? */
- emit_jump_insn (bcc_gen_fctn[NE] (label));
-
+ emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, const0_rtx, GET_MODE (cmp_reg), 1,
+ label);
return true;
}
t1 = t0;
if (code == NOT)
{
- t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
- code = AND;
+ t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
+ true, OPTAB_LIB_WIDEN);
+ t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
}
- t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
- true, OPTAB_LIB_WIDEN);
-
+ else
+ t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
+ true, OPTAB_LIB_WIDEN);
insn = get_insns ();
end_sequence ();
}
if (code == NOT)
- target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
- target = expand_simple_binop (mode, code, target, val, NULL_RTX,
- true, OPTAB_LIB_WIDEN);
+ {
+ target = expand_simple_binop (mode, AND, target, val,
+ NULL_RTX, true,
+ OPTAB_LIB_WIDEN);
+ target = expand_simple_unop (mode, code, target,
+ NULL_RTX, true);
+ }
+ else
+ target = expand_simple_binop (mode, code, target, val,
+ NULL_RTX, true,
+ OPTAB_LIB_WIDEN);
}
return target;
t1 = t0;
if (code == NOT)
{
- t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
- code = AND;
+ t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
+ true, OPTAB_LIB_WIDEN);
+ t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
}
- t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
- true, OPTAB_LIB_WIDEN);
+ else
+ t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
+ true, OPTAB_LIB_WIDEN);
if (after)
emit_move_insn (target, t1);