#ifndef HAVE_atomic_test_and_set
#define HAVE_atomic_test_and_set 0
#define CODE_FOR_atomic_test_and_set CODE_FOR_nothing
-#define gen_atomic_test_and_set(x,y,z) \
- (gcc_unreachable (), (void) (0 && (x) && (y) && (z)), NULL_RTX)
#endif
static rtx
maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
{
enum machine_mode pat_bool_mode;
- const struct insn_data_d *id;
+ struct expand_operand ops[3];
if (!HAVE_atomic_test_and_set)
return NULL_RTX;
- id = &insn_data[CODE_FOR_atomic_test_and_set];
- pat_bool_mode = id->operand[0].mode;
-
- /* ??? We only support test-and-set on single bytes at the moment.
- We'd have to change the builtin to allow wider memories. */
- gcc_checking_assert (id->operand[1].mode == QImode);
-
/* While we always get QImode from __atomic_test_and_set, we get
other memory modes from __sync_lock_test_and_set. Note that we
use no endian adjustment here. This matches the 4.6 behavior
in the Sparc backend. */
+ gcc_checking_assert
+ (insn_data[CODE_FOR_atomic_test_and_set].operand[1].mode == QImode);
if (GET_MODE (mem) != QImode)
mem = adjust_address_nv (mem, QImode, 0);
- if (target == NULL || GET_MODE (target) != pat_bool_mode)
- target = gen_reg_rtx (pat_bool_mode);
+ pat_bool_mode = insn_data[CODE_FOR_atomic_test_and_set].operand[0].mode;
+ create_output_operand (&ops[0], target, pat_bool_mode);
+ create_fixed_operand (&ops[1], mem);
+ create_integer_operand (&ops[2], model);
- emit_insn (gen_atomic_test_and_set (target, mem, GEN_INT (model)));
-
- return target;
+ if (maybe_expand_insn (CODE_FOR_atomic_test_and_set, 3, ops))
+ return ops[0].value;
+ return NULL_RTX;
}
/* This function expands the legacy _sync_lock test_and_set operation which is
expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
{
enum machine_mode mode = GET_MODE (mem);
- rtx ret;
+ rtx ret, trueval, subtarget;
ret = maybe_emit_atomic_test_and_set (target, mem, model);
if (ret)
return ret;
- if (target == NULL_RTX)
- target = gen_reg_rtx (mode);
+ /* Be binary compatible with non-default settings of trueval, and different
+ cpu revisions. E.g. one revision may have atomic-test-and-set, but
+ another only has atomic-exchange. */
+ if (targetm.atomic_test_and_set_trueval == 1)
+ {
+ trueval = const1_rtx;
+ subtarget = target ? target : gen_reg_rtx (mode);
+ }
+ else
+ {
+ trueval = gen_int_mode (targetm.atomic_test_and_set_trueval, mode);
+ subtarget = gen_reg_rtx (mode);
+ }
- /* If there is no test and set, try exchange, then a compare_and_swap loop,
- then __sync_test_and_set. */
- ret = maybe_emit_atomic_exchange (target, mem, const1_rtx, model);
- if (ret)
- return ret;
+ /* Try the atomic-exchange optab... */
+ ret = maybe_emit_atomic_exchange (subtarget, mem, trueval, model);
- ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, const1_rtx);
- if (ret)
- return ret;
+ /* ... then an atomic-compare-and-swap loop ... */
+ if (!ret)
+ ret = maybe_emit_compare_and_swap_exchange_loop (subtarget, mem, trueval);
- ret = maybe_emit_sync_lock_test_and_set (target, mem, const1_rtx, model);
- if (ret)
- return ret;
+ /* ... before trying the vaguely defined legacy lock_test_and_set. */
+ if (!ret)
+ ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, trueval, model);
- /* Failing all else, assume a single threaded environment and simply perform
- the operation. */
- emit_move_insn (target, mem);
- emit_move_insn (mem, const1_rtx);
- return target;
+ /* Recall that the legacy lock_test_and_set optab was allowed to do magic
+ things with the value 1. Thus we try again without trueval. */
+ if (!ret && targetm.atomic_test_and_set_trueval != 1)
+ ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, const1_rtx, model);
+
+ /* Failing all else, assume a single threaded environment and simply
+ perform the operation. */
+ if (!ret)
+ {
+ emit_move_insn (subtarget, mem);
+ emit_move_insn (mem, trueval);
+ ret = subtarget;
+ }
+
+ /* Recall that have to return a boolean value; rectify if trueval
+ is not exactly one. */
+ if (targetm.atomic_test_and_set_trueval != 1)
+ ret = emit_store_flag_force (target, NE, ret, const0_rtx, mode, 0, 1);
+
+ return ret;
}
/* This function expands the atomic exchange operation:
/* Issue val = compare_and_swap (mem, 0, 0).
This may cause the occasional harmless store of 0 when the value is
already 0, but it seems to be OK according to the standards guys. */
- expand_atomic_compare_and_swap (NULL, &target, mem, const0_rtx,
- const0_rtx, false, model, model);
- return target;
+ if (expand_atomic_compare_and_swap (NULL, &target, mem, const0_rtx,
+ const0_rtx, false, model, model))
+ return target;
+ else
+ /* Otherwise there is no atomic load, leave the library call. */
+ return NULL_RTX;
}
/* Otherwise assume loads are atomic, and emit the proper barriers. */