return use_narrower_mode (SUBREG_REG (tem), GET_MODE (tem),
GET_MODE (SUBREG_REG (tem)));
return tem;
+ case ASM_OPERANDS:
+ /* Don't do any replacements in second and following
+ ASM_OPERANDS of inline-asm with multiple sets.
+ ASM_OPERANDS_INPUT_VEC, ASM_OPERANDS_INPUT_CONSTRAINT_VEC
+ and ASM_OPERANDS_LABEL_VEC need to be equal between
+ all the ASM_OPERANDs in the insn and adjust_insn will
+ fix this up. */
+ if (ASM_OPERANDS_OUTPUT_IDX (loc) != 0)
+ return loc;
+ break;
default:
break;
}
note_stores (PATTERN (insn), adjust_mem_stores, &amd);
amd.store = false;
- note_uses (&PATTERN (insn), adjust_mem_uses, &amd);
+ if (GET_CODE (PATTERN (insn)) == PARALLEL
+ && asm_noperands (PATTERN (insn)) > 0
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
+ {
+ rtx body, set0;
+ int i;
+
+ /* inline-asm with multiple sets is tiny bit more complicated,
+ because the 3 vectors in ASM_OPERANDS need to be shared between
+ all ASM_OPERANDS in the instruction. adjust_mems will
+ not touch ASM_OPERANDS other than the first one, asm_noperands
+ test above needs to be called before that (otherwise it would fail)
+ and afterwards this code fixes it up. */
+ note_uses (&PATTERN (insn), adjust_mem_uses, &amd);
+ body = PATTERN (insn);
+ set0 = XVECEXP (body, 0, 0);
+ gcc_checking_assert (GET_CODE (set0) == SET
+ && GET_CODE (SET_SRC (set0)) == ASM_OPERANDS
+ && ASM_OPERANDS_OUTPUT_IDX (SET_SRC (set0)) == 0);
+ for (i = 1; i < XVECLEN (body, 0); i++)
+ if (GET_CODE (XVECEXP (body, 0, i)) != SET)
+ break;
+ else
+ {
+ set = XVECEXP (body, 0, i);
+ gcc_checking_assert (GET_CODE (SET_SRC (set)) == ASM_OPERANDS
+ && ASM_OPERANDS_OUTPUT_IDX (SET_SRC (set))
+ == i);
+ if (ASM_OPERANDS_INPUT_VEC (SET_SRC (set))
+ != ASM_OPERANDS_INPUT_VEC (SET_SRC (set0))
+ || ASM_OPERANDS_INPUT_CONSTRAINT_VEC (SET_SRC (set))
+ != ASM_OPERANDS_INPUT_CONSTRAINT_VEC (SET_SRC (set0))
+ || ASM_OPERANDS_LABEL_VEC (SET_SRC (set))
+ != ASM_OPERANDS_LABEL_VEC (SET_SRC (set0)))
+ {
+ rtx newsrc = shallow_copy_rtx (SET_SRC (set));
+ ASM_OPERANDS_INPUT_VEC (newsrc)
+ = ASM_OPERANDS_INPUT_VEC (SET_SRC (set0));
+ ASM_OPERANDS_INPUT_CONSTRAINT_VEC (newsrc)
+ = ASM_OPERANDS_INPUT_CONSTRAINT_VEC (SET_SRC (set0));
+ ASM_OPERANDS_LABEL_VEC (newsrc)
+ = ASM_OPERANDS_LABEL_VEC (SET_SRC (set0));
+ validate_change (NULL_RTX, &SET_SRC (set), newsrc, true);
+ }
+ }
+ }
+ else
+ note_uses (&PATTERN (insn), adjust_mem_uses, &amd);
/* For read-only MEMs containing some constant, prefer those
constants. */
static inline tree
dv_as_decl (decl_or_value dv)
{
-#ifdef ENABLE_CHECKING
- gcc_assert (dv_is_decl_p (dv));
-#endif
+ gcc_checking_assert (dv_is_decl_p (dv));
return (tree) dv;
}
static inline rtx
dv_as_value (decl_or_value dv)
{
-#ifdef ENABLE_CHECKING
- gcc_assert (dv_is_value_p (dv));
-#endif
+ gcc_checking_assert (dv_is_value_p (dv));
return (rtx)dv;
}
{
decl_or_value dv;
dv = decl;
-#ifdef ENABLE_CHECKING
- gcc_assert (dv_is_decl_p (dv));
-#endif
+ gcc_checking_assert (dv_is_decl_p (dv));
return dv;
}
{
decl_or_value dv;
dv = value;
-#ifdef ENABLE_CHECKING
- gcc_assert (dv_is_value_p (dv));
-#endif
+ gcc_checking_assert (dv_is_value_p (dv));
return dv;
}
nnode->next = dnode;
dnode = nnode;
}
-#ifdef ENABLE_CHECKING
else if (r == 0)
- gcc_assert (rtx_equal_p (dnode->loc, snode->loc));
-#endif
+ gcc_checking_assert (rtx_equal_p (dnode->loc, snode->loc));
if (r >= 0)
snode = snode->next;
if (!var)
return NULL;
-#ifdef ENABLE_CHECKING
- gcc_assert (dv_onepart_p (var->dv));
-#endif
+ gcc_checking_assert (dv_onepart_p (var->dv));
if (!var->n_var_parts)
return NULL;
-#ifdef ENABLE_CHECKING
- gcc_assert (var->var_part[0].offset == 0);
- gcc_assert (loc != dv_as_opaque (var->dv));
-#endif
+ gcc_checking_assert (var->var_part[0].offset == 0);
+ gcc_checking_assert (loc != dv_as_opaque (var->dv));
loc_code = GET_CODE (loc);
for (node = var->var_part[0].loc_chain; node; node = node->next)
while (node->next && GET_CODE (node->next->loc) == VALUE)
{
node = node->next;
-#ifdef ENABLE_CHECKING
- gcc_assert (!canon_value_cmp (node->loc,
- dv_as_value (var->dv)));
-#endif
+ gcc_checking_assert (!canon_value_cmp (node->loc,
+ dv_as_value (var->dv)));
if (loc == node->loc)
return node;
}
continue;
}
-#ifdef ENABLE_CHECKING
- gcc_assert (node == var->var_part[0].loc_chain);
- gcc_assert (!node->next);
-#endif
+ gcc_checking_assert (node == var->var_part[0].loc_chain);
+ gcc_checking_assert (!node->next);
dv = dv_from_value (node->loc);
rvar = (variable) htab_find_with_hash (vars, dv, dv_htab_hash (dv));
{
location_chain s2node;
-#ifdef ENABLE_CHECKING
- gcc_assert (dv_onepart_p (s2var->dv));
-#endif
+ gcc_checking_assert (dv_onepart_p (s2var->dv));
if (s2var->n_var_parts)
{
-#ifdef ENABLE_CHECKING
- gcc_assert (s2var->var_part[0].offset == 0);
-#endif
+ gcc_checking_assert (s2var->var_part[0].offset == 0);
s2node = s2var->var_part[0].loc_chain;
for (; s1node && s2node;
if (DEBUG_TEMP_UID (DEBUG_EXPR_TREE_DECL (x))
< DEBUG_TEMP_UID (DEBUG_EXPR_TREE_DECL (y)))
return -1;
-#ifdef ENABLE_CHECKING
- gcc_assert (DEBUG_TEMP_UID (DEBUG_EXPR_TREE_DECL (x))
- > DEBUG_TEMP_UID (DEBUG_EXPR_TREE_DECL (y)));
-#endif
+ gcc_checking_assert (DEBUG_TEMP_UID (DEBUG_EXPR_TREE_DECL (x))
+ > DEBUG_TEMP_UID (DEBUG_EXPR_TREE_DECL (y)));
return 1;
}
dstslot = shared_hash_find_slot_noinsert_1 (dst->vars, dv, dvhash);
gcc_assert (*dstslot == dvar);
canonicalize_values_star (dstslot, dst);
-#ifdef ENABLE_CHECKING
- gcc_assert (dstslot
- == shared_hash_find_slot_noinsert_1 (dst->vars, dv, dvhash));
-#endif
+ gcc_checking_assert (dstslot
+ == shared_hash_find_slot_noinsert_1 (dst->vars,
+ dv, dvhash));
dvar = (variable)*dstslot;
}
else
dstslot = shared_hash_find_slot_noinsert_1 (dst->vars, dv, dvhash);
gcc_assert (*dstslot == dvar);
canonicalize_values_star (dstslot, dst);
-#ifdef ENABLE_CHECKING
- gcc_assert (dstslot
- == shared_hash_find_slot_noinsert_1 (dst->vars,
- dv, dvhash));
-#endif
+ gcc_checking_assert (dstslot
+ == shared_hash_find_slot_noinsert_1 (dst->vars,
+ dv, dvhash));
dvar = (variable)*dstslot;
}
}
case XOR:
case NOT:
case NEG:
+ if (!REG_P (XEXP (src, 0)))
+ return NULL_RTX;
+ break;
case SIGN_EXTEND:
case ZERO_EXTEND:
+ if (!REG_P (XEXP (src, 0)) && !MEM_P (XEXP (src, 0)))
+ return NULL_RTX;
break;
default:
return NULL_RTX;
}
- if (!REG_P (XEXP (src, 0))
- || !SCALAR_INT_MODE_P (GET_MODE (src))
- || XEXP (src, 0) == cfa_base_rtx)
+ if (!SCALAR_INT_MODE_P (GET_MODE (src)) || XEXP (src, 0) == cfa_base_rtx)
return NULL_RTX;
v = cselib_lookup (XEXP (src, 0), GET_MODE (XEXP (src, 0)), 0);
dataflow_set_copy (&old_out, out);
dataflow_set_copy (out, in);
- for (i = 0; VEC_iterate (micro_operation, VTI (bb)->mos, i, mo); i++)
+ FOR_EACH_VEC_ELT (micro_operation, VTI (bb)->mos, i, mo)
{
rtx insn = mo->insn;
data.vars = vars;
data.dummy = false;
data.cur_loc_changed = false;
- loc = cselib_expand_value_rtx_cb (loc, scratch_regs, 5,
+ loc = cselib_expand_value_rtx_cb (loc, scratch_regs, 8,
vt_expand_loc_callback, &data);
if (loc && MEM_P (loc))
data.vars = vars;
data.dummy = true;
data.cur_loc_changed = false;
- ret = cselib_dummy_expand_value_rtx_cb (loc, scratch_regs, 5,
+ ret = cselib_dummy_expand_value_rtx_cb (loc, scratch_regs, 8,
vt_expand_loc_callback, &data);
*pcur_loc_changed = data.cur_loc_changed;
return ret;
#ifdef ENABLE_RTL_CHECKING
/* Used to verify that cur_loc_changed updating is safe. */
static struct pointer_map_t *emitted_notes;
+
+/* Strip REG_POINTER from REGs and MEM_POINTER from MEMs in order to
+ avoid differences in commutative operand simplification. */
+static rtx
+strip_pointer_flags (rtx x, const_rtx old_rtx ATTRIBUTE_UNUSED,
+ void *data ATTRIBUTE_UNUSED)
+{
+ if (REG_P (x) && REG_POINTER (x))
+ return gen_rtx_REG (GET_MODE (x), REGNO (x));
+ if (MEM_P (x) && MEM_POINTER (x))
+ return gen_rtx_MEM (GET_MODE (x), XEXP (x, 0));
+ return NULL_RTX;
+}
#endif
/* Emit the NOTE_INSN_VAR_LOCATION for variable *VARP. DATA contains
rtx pnote = (rtx) *note_slot;
if (!var->cur_loc_changed && (pnote || PAT_VAR_LOCATION_LOC (note_vl)))
{
+ rtx old_vl, new_vl;
gcc_assert (pnote);
- gcc_assert (rtx_equal_p (PAT_VAR_LOCATION_LOC (pnote),
- PAT_VAR_LOCATION_LOC (note_vl)));
+ old_vl = PAT_VAR_LOCATION_LOC (pnote);
+ new_vl = PAT_VAR_LOCATION_LOC (note_vl);
+ if (!rtx_equal_p (old_vl, new_vl))
+ {
+ /* There might be differences caused by REG_POINTER
+ differences. REG_POINTER affects
+ swap_commutative_operands_p. */
+ old_vl = simplify_replace_fn_rtx (old_vl, NULL_RTX,
+ strip_pointer_flags, NULL);
+ new_vl = simplify_replace_fn_rtx (new_vl, NULL_RTX,
+ strip_pointer_flags, NULL);
+ gcc_assert (rtx_equal_p (old_vl, new_vl));
+ PAT_VAR_LOCATION_LOC (note_vl) = new_vl;
+ }
}
*note_slot = (void *) note_vl;
}
dataflow_set_clear (set);
dataflow_set_copy (set, &VTI (bb)->in);
- for (i = 0; VEC_iterate (micro_operation, VTI (bb)->mos, i, mo); i++)
+ FOR_EACH_VEC_ELT (micro_operation, VTI (bb)->mos, i, mo)
{
rtx insn = mo->insn;
unsigned int i;
rtx val;
- for (i = 0; VEC_iterate (rtx, preserved_values, i, val); i++)
+ FOR_EACH_VEC_ELT (rtx, preserved_values, i, val)
add_cselib_value_chains (dv_from_value (val));
changed_variables_stack = VEC_alloc (variable, heap, 40);
changed_values_stack = VEC_alloc (rtx, heap, 40);
unsigned int i;
rtx val;
- for (i = 0; VEC_iterate (rtx, preserved_values, i, val); i++)
+ FOR_EACH_VEC_ELT (rtx, preserved_values, i, val)
remove_cselib_value_chains (dv_from_value (val));
gcc_assert (htab_elements (value_chains) == 0);
}