{
rtx temp = emit_move_insn (target, xtarget);
- set_unique_reg_note (temp,
- REG_EQUAL,
- gen_rtx_fmt_ee (binoptab->code, mode,
- copy_rtx (xop0),
- copy_rtx (xop1)));
+ set_dst_reg_note (temp, REG_EQUAL,
+ gen_rtx_fmt_ee (binoptab->code, mode,
+ copy_rtx (xop0),
+ copy_rtx (xop1)),
+ target);
}
else
target = xtarget;
if (optab_handler (mov_optab, mode) != CODE_FOR_nothing)
{
temp = emit_move_insn (target ? target : product, product);
- set_unique_reg_note (temp,
- REG_EQUAL,
- gen_rtx_fmt_ee (MULT, mode,
- copy_rtx (op0),
- copy_rtx (op1)));
+ set_dst_reg_note (temp,
+ REG_EQUAL,
+ gen_rtx_fmt_ee (MULT, mode,
+ copy_rtx (op0),
+ copy_rtx (op1)),
+ target ? target : product);
}
return product;
}
gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
target = lowpart_subreg_maybe_copy (mode, temp, imode);
- set_unique_reg_note (get_last_insn (), REG_EQUAL,
- gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
+ set_dst_reg_note (get_last_insn (), REG_EQUAL,
+ gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
+ target);
}
return target;
}
last = emit_move_insn (target, result);
- if (optab_handler (mov_optab, GET_MODE (target)) != CODE_FOR_nothing)
- set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
+ set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
if (final_dest != target)
emit_move_insn (final_dest, target);
{
/* Make a place for a REG_NOTE and add it. */
insn = emit_move_insn (to, to);
- set_unique_reg_note (insn,
- REG_EQUAL,
- gen_rtx_fmt_e (UNSIGNED_FIX,
- GET_MODE (to),
- copy_rtx (from)));
+ set_dst_reg_note (insn, REG_EQUAL,
+ gen_rtx_fmt_e (UNSIGNED_FIX, GET_MODE (to),
+ copy_rtx (from)),
+ to);
}
return;
init_sync_libfuncs_1 (sync_old_add_optab, "__sync_fetch_and_add", max);
init_sync_libfuncs_1 (sync_old_sub_optab, "__sync_fetch_and_sub", max);
- init_sync_libfuncs_1 (sync_old_ior_optab, "__sync_fetch_and_ior", max);
+ init_sync_libfuncs_1 (sync_old_ior_optab, "__sync_fetch_and_or", max);
init_sync_libfuncs_1 (sync_old_and_optab, "__sync_fetch_and_and", max);
init_sync_libfuncs_1 (sync_old_xor_optab, "__sync_fetch_and_xor", max);
init_sync_libfuncs_1 (sync_old_nand_optab, "__sync_fetch_and_nand", max);
init_sync_libfuncs_1 (sync_new_add_optab, "__sync_add_and_fetch", max);
init_sync_libfuncs_1 (sync_new_sub_optab, "__sync_sub_and_fetch", max);
- init_sync_libfuncs_1 (sync_new_ior_optab, "__sync_ior_and_fetch", max);
+ init_sync_libfuncs_1 (sync_new_ior_optab, "__sync_or_and_fetch", max);
init_sync_libfuncs_1 (sync_new_and_optab, "__sync_and_and_fetch", max);
init_sync_libfuncs_1 (sync_new_xor_optab, "__sync_xor_and_fetch", max);
init_sync_libfuncs_1 (sync_new_nand_optab, "__sync_nand_and_fetch", max);
}
/* If the input is a constant, expand it specially. */
- if (CONSTANT_P (sel))
+ gcc_assert (GET_MODE_CLASS (GET_MODE (sel)) == MODE_VECTOR_INT);
+ if (GET_CODE (sel) == CONST_VECTOR)
{
icode = direct_optab_handler (vec_perm_const_optab, mode);
if (icode != CODE_FOR_nothing)
{
unsigned int j, this_e;
- this_e = INTVAL (XVECEXP (sel, 0, i));
+ this_e = INTVAL (CONST_VECTOR_ELT (sel, i));
this_e &= 2 * e - 1;
this_e *= u;
rtx addr;
addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
- return emit_library_call_value (libfunc, target, LCT_NORMAL,
+ return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
mode, 2, addr, ptr_mode,
val, mode);
}
if (libfunc != NULL)
{
rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
- target_oval = emit_library_call_value (libfunc, target_oval, LCT_NORMAL,
+ target_oval = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
mode, 3, addr, ptr_mode,
expected, mode, desired, mode);
}
}
+/* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
+ using memory order MODEL. If AFTER is true the operation needs to return
+ the value of *MEM after the operation, otherwise the previous value.
+ TARGET is an optional place to place the result. The result is unused if
+ it is const0_rtx.
+ Return the result if there is a better sequence, otherwise NULL_RTX. */
+
+static rtx
+maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
+ enum memmodel model, bool after)
+{
+ /* If the value is prefetched, or not used, it may be possible to replace
+ the sequence with a native exchange operation. */
+ if (!after || target == const0_rtx)
+ {
+ /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
+ if (code == AND && val == const0_rtx)
+ {
+ if (target == const0_rtx)
+ target = gen_reg_rtx (GET_MODE (mem));
+ return maybe_emit_atomic_exchange (target, mem, val, model);
+ }
+
+ /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
+ if (code == IOR && val == constm1_rtx)
+ {
+ if (target == const0_rtx)
+ target = gen_reg_rtx (GET_MODE (mem));
+ return maybe_emit_atomic_exchange (target, mem, val, model);
+ }
+ }
+
+ return NULL_RTX;
+}
+
/* Try to emit an instruction for a specific operation varaition.
OPTAB contains the OP functions.
TARGET is an optional place to return the result. const0_rtx means unused.
get_atomic_op_for_code (&optab, code);
+ /* Check to see if there are any better instructions. */
+ result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
+ if (result)
+ return result;
+
/* Check for the case where the result isn't used and try those patterns. */
if (unused_result)
{
{
/* If the result isn't used, no need to do compensation code. */
if (unused_result)
- return target;
+ return result;
/* Issue compensation code. Fetch_after == fetch_before OP val.
Fetch_before == after REVERSE_OP val. */
result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
2, addr, ptr_mode, val, mode);
- if (unused_result)
- return target;
- if (fixup)
+ if (!unused_result && fixup)
result = expand_simple_binop (mode, code, result, val, target,
true, OPTAB_LIB_WIDEN);
return result;
return true;
/* If the operand is a memory whose address has no side effects,
- try forcing the address into a register. The check for side
- effects is important because force_reg cannot handle things
- like auto-modified addresses. */
- if (insn_data[(int) icode].operand[opno].allows_mem
- && MEM_P (op->value)
- && !side_effects_p (XEXP (op->value, 0)))
- {
- rtx addr, mem, last;
-
- last = get_last_insn ();
- addr = force_reg (Pmode, XEXP (op->value, 0));
- mem = replace_equiv_address (op->value, addr);
- if (insn_operand_matches (icode, opno, mem))
+ try forcing the address into a non-virtual pseudo register.
+ The check for side effects is important because copy_to_mode_reg
+ cannot handle things like auto-modified addresses. */
+ if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
+ {
+ rtx addr, mem;
+
+ mem = op->value;
+ addr = XEXP (mem, 0);
+ if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
+ && !side_effects_p (addr))
{
- op->value = mem;
- return true;
+ rtx last;
+ enum machine_mode mode;
+
+ last = get_last_insn ();
+ mode = targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
+ mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
+ if (insn_operand_matches (icode, opno, mem))
+ {
+ op->value = mem;
+ return true;
+ }
+ delete_insns_since (last);
}
- delete_insns_since (last);
}
return false;