+ if (!dv_onepart_p (dv))
+ return 1;
+
+ gcc_assert (var->n_var_parts == 1);
+ node = var->var_part[0].loc_chain;
+ gcc_assert (node);
+
+ while ((next = node->next))
+ {
+ gcc_assert (loc_cmp (node->loc, next->loc) < 0);
+ node = next;
+ }
+
+ return 1;
+}
+#endif
+
+/* Mark with VALUE_RECURSED_INTO values that have neighbors that are
+ more likely to be chosen as canonical for an equivalence set.
+ Ensure less likely values can reach more likely neighbors, making
+ the connections bidirectional. */
+
+static int
+canonicalize_values_mark (void **slot, void *data)
+{
+ dataflow_set *set = (dataflow_set *)data;
+ variable var = (variable) *slot;
+ decl_or_value dv = var->dv;
+ rtx val;
+ location_chain node;
+
+ if (!dv_is_value_p (dv))
+ return 1;
+
+ gcc_checking_assert (var->n_var_parts == 1);
+
+ val = dv_as_value (dv);
+
+ for (node = var->var_part[0].loc_chain; node; node = node->next)
+ if (GET_CODE (node->loc) == VALUE)
+ {
+ if (canon_value_cmp (node->loc, val))
+ VALUE_RECURSED_INTO (val) = true;
+ else
+ {
+ decl_or_value odv = dv_from_value (node->loc);
+ void **oslot = shared_hash_find_slot_noinsert (set->vars, odv);
+
+ oslot = set_slot_part (set, val, oslot, odv, 0,
+ node->init, NULL_RTX);
+
+ VALUE_RECURSED_INTO (node->loc) = true;
+ }
+ }
+
+ return 1;
+}
+
+/* Remove redundant entries from equivalence lists in onepart
+ variables, canonicalizing equivalence sets into star shapes. */
+
+static int
+canonicalize_values_star (void **slot, void *data)
+{
+ dataflow_set *set = (dataflow_set *)data;
+ variable var = (variable) *slot;
+ decl_or_value dv = var->dv;
+ location_chain node;
+ decl_or_value cdv;
+ rtx val, cval;
+ void **cslot;
+ bool has_value;
+ bool has_marks;
+
+ if (!dv_onepart_p (dv))
+ return 1;
+
+ gcc_checking_assert (var->n_var_parts == 1);
+
+ if (dv_is_value_p (dv))
+ {
+ cval = dv_as_value (dv);
+ if (!VALUE_RECURSED_INTO (cval))
+ return 1;
+ VALUE_RECURSED_INTO (cval) = false;
+ }
+ else
+ cval = NULL_RTX;
+
+ restart:
+ val = cval;
+ has_value = false;
+ has_marks = false;
+
+ gcc_assert (var->n_var_parts == 1);
+
+ for (node = var->var_part[0].loc_chain; node; node = node->next)
+ if (GET_CODE (node->loc) == VALUE)
+ {
+ has_value = true;
+ if (VALUE_RECURSED_INTO (node->loc))
+ has_marks = true;
+ if (canon_value_cmp (node->loc, cval))
+ cval = node->loc;
+ }
+
+ if (!has_value)
+ return 1;
+
+ if (cval == val)
+ {
+ if (!has_marks || dv_is_decl_p (dv))
+ return 1;
+
+ /* Keep it marked so that we revisit it, either after visiting a
+ child node, or after visiting a new parent that might be
+ found out. */
+ VALUE_RECURSED_INTO (val) = true;
+
+ for (node = var->var_part[0].loc_chain; node; node = node->next)
+ if (GET_CODE (node->loc) == VALUE
+ && VALUE_RECURSED_INTO (node->loc))
+ {
+ cval = node->loc;
+ restart_with_cval:
+ VALUE_RECURSED_INTO (cval) = false;
+ dv = dv_from_value (cval);
+ slot = shared_hash_find_slot_noinsert (set->vars, dv);
+ if (!slot)
+ {
+ gcc_assert (dv_is_decl_p (var->dv));
+ /* The canonical value was reset and dropped.
+ Remove it. */
+ clobber_variable_part (set, NULL, var->dv, 0, NULL);
+ return 1;
+ }
+ var = (variable)*slot;
+ gcc_assert (dv_is_value_p (var->dv));
+ if (var->n_var_parts == 0)
+ return 1;
+ gcc_assert (var->n_var_parts == 1);
+ goto restart;
+ }
+
+ VALUE_RECURSED_INTO (val) = false;
+
+ return 1;
+ }
+
+ /* Push values to the canonical one. */
+ cdv = dv_from_value (cval);
+ cslot = shared_hash_find_slot_noinsert (set->vars, cdv);
+
+ for (node = var->var_part[0].loc_chain; node; node = node->next)
+ if (node->loc != cval)
+ {
+ cslot = set_slot_part (set, node->loc, cslot, cdv, 0,
+ node->init, NULL_RTX);
+ if (GET_CODE (node->loc) == VALUE)
+ {
+ decl_or_value ndv = dv_from_value (node->loc);
+
+ set_variable_part (set, cval, ndv, 0, node->init, NULL_RTX,
+ NO_INSERT);
+
+ if (canon_value_cmp (node->loc, val))
+ {
+ /* If it could have been a local minimum, it's not any more,
+ since it's now neighbor to cval, so it may have to push
+ to it. Conversely, if it wouldn't have prevailed over
+ val, then whatever mark it has is fine: if it was to
+ push, it will now push to a more canonical node, but if
+ it wasn't, then it has already pushed any values it might
+ have to. */
+ VALUE_RECURSED_INTO (node->loc) = true;
+ /* Make sure we visit node->loc by ensuring we cval is
+ visited too. */
+ VALUE_RECURSED_INTO (cval) = true;
+ }
+ else if (!VALUE_RECURSED_INTO (node->loc))
+ /* If we have no need to "recurse" into this node, it's
+ already "canonicalized", so drop the link to the old
+ parent. */
+ clobber_variable_part (set, cval, ndv, 0, NULL);
+ }
+ else if (GET_CODE (node->loc) == REG)
+ {
+ attrs list = set->regs[REGNO (node->loc)], *listp;
+
+ /* Change an existing attribute referring to dv so that it
+ refers to cdv, removing any duplicate this might
+ introduce, and checking that no previous duplicates
+ existed, all in a single pass. */
+
+ while (list)
+ {
+ if (list->offset == 0
+ && (dv_as_opaque (list->dv) == dv_as_opaque (dv)
+ || dv_as_opaque (list->dv) == dv_as_opaque (cdv)))
+ break;
+
+ list = list->next;
+ }
+
+ gcc_assert (list);
+ if (dv_as_opaque (list->dv) == dv_as_opaque (dv))
+ {
+ list->dv = cdv;
+ for (listp = &list->next; (list = *listp); listp = &list->next)
+ {
+ if (list->offset)
+ continue;
+
+ if (dv_as_opaque (list->dv) == dv_as_opaque (cdv))
+ {
+ *listp = list->next;
+ pool_free (attrs_pool, list);
+ list = *listp;
+ break;
+ }
+
+ gcc_assert (dv_as_opaque (list->dv) != dv_as_opaque (dv));
+ }
+ }
+ else if (dv_as_opaque (list->dv) == dv_as_opaque (cdv))
+ {
+ for (listp = &list->next; (list = *listp); listp = &list->next)
+ {
+ if (list->offset)
+ continue;
+
+ if (dv_as_opaque (list->dv) == dv_as_opaque (dv))
+ {
+ *listp = list->next;
+ pool_free (attrs_pool, list);
+ list = *listp;
+ break;
+ }
+
+ gcc_assert (dv_as_opaque (list->dv) != dv_as_opaque (cdv));
+ }
+ }
+ else
+ gcc_unreachable ();
+
+#if ENABLE_CHECKING
+ while (list)
+ {
+ if (list->offset == 0
+ && (dv_as_opaque (list->dv) == dv_as_opaque (dv)
+ || dv_as_opaque (list->dv) == dv_as_opaque (cdv)))
+ gcc_unreachable ();
+
+ list = list->next;
+ }
+#endif
+ }
+ }
+
+ if (val)
+ cslot = set_slot_part (set, val, cslot, cdv, 0,
+ VAR_INIT_STATUS_INITIALIZED, NULL_RTX);
+
+ slot = clobber_slot_part (set, cval, slot, 0, NULL);
+
+ /* Variable may have been unshared. */
+ var = (variable)*slot;
+ gcc_checking_assert (var->n_var_parts && var->var_part[0].loc_chain->loc == cval
+ && var->var_part[0].loc_chain->next == NULL);
+
+ if (VALUE_RECURSED_INTO (cval))
+ goto restart_with_cval;
+
+ return 1;
+}
+
+/* Bind one-part variables to the canonical value in an equivalence
+ set. Not doing this causes dataflow convergence failure in rare
+ circumstances, see PR42873. Unfortunately we can't do this
+ efficiently as part of canonicalize_values_star, since we may not
+ have determined or even seen the canonical value of a set when we
+ get to a variable that references another member of the set. */
+
+static int
+canonicalize_vars_star (void **slot, void *data)
+{
+ dataflow_set *set = (dataflow_set *)data;
+ variable var = (variable) *slot;
+ decl_or_value dv = var->dv;
+ location_chain node;
+ rtx cval;
+ decl_or_value cdv;
+ void **cslot;
+ variable cvar;
+ location_chain cnode;
+
+ if (!dv_onepart_p (dv) || dv_is_value_p (dv))
+ return 1;
+
+ gcc_assert (var->n_var_parts == 1);
+
+ node = var->var_part[0].loc_chain;
+
+ if (GET_CODE (node->loc) != VALUE)
+ return 1;
+
+ gcc_assert (!node->next);
+ cval = node->loc;
+
+ /* Push values to the canonical one. */
+ cdv = dv_from_value (cval);
+ cslot = shared_hash_find_slot_noinsert (set->vars, cdv);
+ if (!cslot)
+ return 1;
+ cvar = (variable)*cslot;
+ gcc_assert (cvar->n_var_parts == 1);
+
+ cnode = cvar->var_part[0].loc_chain;
+
+ /* CVAL is canonical if its value list contains non-VALUEs or VALUEs
+ that are not “more canonical” than it. */
+ if (GET_CODE (cnode->loc) != VALUE
+ || !canon_value_cmp (cnode->loc, cval))
+ return 1;
+
+ /* CVAL was found to be non-canonical. Change the variable to point
+ to the canonical VALUE. */
+ gcc_assert (!cnode->next);
+ cval = cnode->loc;
+
+ slot = set_slot_part (set, cval, slot, dv, 0,
+ node->init, node->set_src);
+ slot = clobber_slot_part (set, cval, slot, 0, node->set_src);
+
+ return 1;
+}
+
+/* Combine variable or value in *S1SLOT (in DSM->cur) with the
+ corresponding entry in DSM->src. Multi-part variables are combined
+ with variable_union, whereas onepart dvs are combined with
+ intersection. */
+
+static int
+variable_merge_over_cur (variable s1var, struct dfset_merge *dsm)
+{
+ dataflow_set *dst = dsm->dst;
+ void **dstslot;
+ variable s2var, dvar = NULL;
+ decl_or_value dv = s1var->dv;
+ bool onepart = dv_onepart_p (dv);
+ rtx val;
+ hashval_t dvhash;
+ location_chain node, *nodep;
+
+ /* If the incoming onepart variable has an empty location list, then
+ the intersection will be just as empty. For other variables,
+ it's always union. */
+ gcc_checking_assert (s1var->n_var_parts
+ && s1var->var_part[0].loc_chain);
+
+ if (!onepart)
+ return variable_union (s1var, dst);
+
+ gcc_checking_assert (s1var->n_var_parts == 1
+ && s1var->var_part[0].offset == 0);
+
+ dvhash = dv_htab_hash (dv);
+ if (dv_is_value_p (dv))
+ val = dv_as_value (dv);
+ else
+ val = NULL;
+
+ s2var = shared_hash_find_1 (dsm->src->vars, dv, dvhash);
+ if (!s2var)
+ {
+ dst_can_be_shared = false;
+ return 1;
+ }
+
+ dsm->src_onepart_cnt--;
+ gcc_assert (s2var->var_part[0].loc_chain
+ && s2var->n_var_parts == 1
+ && s2var->var_part[0].offset == 0);
+
+ dstslot = shared_hash_find_slot_noinsert_1 (dst->vars, dv, dvhash);
+ if (dstslot)
+ {
+ dvar = (variable)*dstslot;
+ gcc_assert (dvar->refcount == 1
+ && dvar->n_var_parts == 1
+ && dvar->var_part[0].offset == 0);
+ nodep = &dvar->var_part[0].loc_chain;
+ }
+ else
+ {
+ nodep = &node;
+ node = NULL;
+ }
+
+ if (!dstslot && !onepart_variable_different_p (s1var, s2var))
+ {
+ dstslot = shared_hash_find_slot_unshare_1 (&dst->vars, dv,
+ dvhash, INSERT);
+ *dstslot = dvar = s2var;
+ dvar->refcount++;
+ }
+ else
+ {
+ dst_can_be_shared = false;
+
+ intersect_loc_chains (val, nodep, dsm,
+ s1var->var_part[0].loc_chain, s2var);
+
+ if (!dstslot)
+ {
+ if (node)
+ {
+ dvar = (variable) pool_alloc (dv_pool (dv));
+ dvar->dv = dv;
+ dvar->refcount = 1;
+ dvar->n_var_parts = 1;
+ dvar->cur_loc_changed = false;
+ dvar->in_changed_variables = false;
+ dvar->var_part[0].offset = 0;
+ dvar->var_part[0].loc_chain = node;
+ dvar->var_part[0].cur_loc = NULL;
+
+ dstslot
+ = shared_hash_find_slot_unshare_1 (&dst->vars, dv, dvhash,
+ INSERT);
+ gcc_assert (!*dstslot);
+ *dstslot = dvar;
+ }
+ else
+ return 1;
+ }
+ }
+
+ nodep = &dvar->var_part[0].loc_chain;
+ while ((node = *nodep))
+ {
+ location_chain *nextp = &node->next;
+
+ if (GET_CODE (node->loc) == REG)
+ {
+ attrs list;
+
+ for (list = dst->regs[REGNO (node->loc)]; list; list = list->next)
+ if (GET_MODE (node->loc) == GET_MODE (list->loc)
+ && dv_is_value_p (list->dv))
+ break;
+
+ if (!list)
+ attrs_list_insert (&dst->regs[REGNO (node->loc)],
+ dv, 0, node->loc);
+ /* If this value became canonical for another value that had
+ this register, we want to leave it alone. */
+ else if (dv_as_value (list->dv) != val)
+ {
+ dstslot = set_slot_part (dst, dv_as_value (list->dv),
+ dstslot, dv, 0,
+ node->init, NULL_RTX);
+ dstslot = delete_slot_part (dst, node->loc, dstslot, 0);
+
+ /* Since nextp points into the removed node, we can't
+ use it. The pointer to the next node moved to nodep.
+ However, if the variable we're walking is unshared
+ during our walk, we'll keep walking the location list
+ of the previously-shared variable, in which case the
+ node won't have been removed, and we'll want to skip
+ it. That's why we test *nodep here. */
+ if (*nodep != node)
+ nextp = nodep;
+ }
+ }
+ else
+ /* Canonicalization puts registers first, so we don't have to
+ walk it all. */
+ break;
+ nodep = nextp;
+ }
+
+ if (dvar != (variable)*dstslot)
+ dvar = (variable)*dstslot;
+ nodep = &dvar->var_part[0].loc_chain;
+
+ if (val)
+ {
+ /* Mark all referenced nodes for canonicalization, and make sure
+ we have mutual equivalence links. */
+ VALUE_RECURSED_INTO (val) = true;
+ for (node = *nodep; node; node = node->next)
+ if (GET_CODE (node->loc) == VALUE)
+ {
+ VALUE_RECURSED_INTO (node->loc) = true;
+ set_variable_part (dst, val, dv_from_value (node->loc), 0,
+ node->init, NULL, INSERT);
+ }
+
+ dstslot = shared_hash_find_slot_noinsert_1 (dst->vars, dv, dvhash);
+ gcc_assert (*dstslot == dvar);
+ canonicalize_values_star (dstslot, dst);
+ gcc_checking_assert (dstslot
+ == shared_hash_find_slot_noinsert_1 (dst->vars,
+ dv, dvhash));
+ dvar = (variable)*dstslot;
+ }
+ else
+ {
+ bool has_value = false, has_other = false;
+
+ /* If we have one value and anything else, we're going to
+ canonicalize this, so make sure all values have an entry in
+ the table and are marked for canonicalization. */
+ for (node = *nodep; node; node = node->next)
+ {
+ if (GET_CODE (node->loc) == VALUE)
+ {
+ /* If this was marked during register canonicalization,
+ we know we have to canonicalize values. */
+ if (has_value)
+ has_other = true;
+ has_value = true;
+ if (has_other)
+ break;
+ }
+ else
+ {
+ has_other = true;
+ if (has_value)
+ break;
+ }
+ }
+
+ if (has_value && has_other)
+ {
+ for (node = *nodep; node; node = node->next)
+ {
+ if (GET_CODE (node->loc) == VALUE)
+ {
+ decl_or_value dv = dv_from_value (node->loc);
+ void **slot = NULL;
+
+ if (shared_hash_shared (dst->vars))
+ slot = shared_hash_find_slot_noinsert (dst->vars, dv);
+ if (!slot)
+ slot = shared_hash_find_slot_unshare (&dst->vars, dv,
+ INSERT);
+ if (!*slot)
+ {
+ variable var = (variable) pool_alloc (dv_pool (dv));
+ var->dv = dv;
+ var->refcount = 1;
+ var->n_var_parts = 1;
+ var->cur_loc_changed = false;
+ var->in_changed_variables = false;
+ var->var_part[0].offset = 0;
+ var->var_part[0].loc_chain = NULL;
+ var->var_part[0].cur_loc = NULL;
+ *slot = var;
+ }
+
+ VALUE_RECURSED_INTO (node->loc) = true;
+ }
+ }
+
+ dstslot = shared_hash_find_slot_noinsert_1 (dst->vars, dv, dvhash);
+ gcc_assert (*dstslot == dvar);
+ canonicalize_values_star (dstslot, dst);
+ gcc_checking_assert (dstslot
+ == shared_hash_find_slot_noinsert_1 (dst->vars,
+ dv, dvhash));
+ dvar = (variable)*dstslot;
+ }
+ }
+
+ if (!onepart_variable_different_p (dvar, s2var))
+ {
+ variable_htab_free (dvar);
+ *dstslot = dvar = s2var;
+ dvar->refcount++;
+ }
+ else if (s2var != s1var && !onepart_variable_different_p (dvar, s1var))
+ {
+ variable_htab_free (dvar);
+ *dstslot = dvar = s1var;
+ dvar->refcount++;
+ dst_can_be_shared = false;
+ }
+ else
+ dst_can_be_shared = false;
+
+ return 1;
+}
+
+/* Copy s2slot (in DSM->src) to DSM->dst if the variable is a
+ multi-part variable. Unions of multi-part variables and
+ intersections of one-part ones will be handled in
+ variable_merge_over_cur(). */
+
+static int
+variable_merge_over_src (variable s2var, struct dfset_merge *dsm)
+{
+ dataflow_set *dst = dsm->dst;
+ decl_or_value dv = s2var->dv;
+ bool onepart = dv_onepart_p (dv);
+
+ if (!onepart)
+ {
+ void **dstp = shared_hash_find_slot (dst->vars, dv);
+ *dstp = s2var;
+ s2var->refcount++;
+ return 1;
+ }
+
+ dsm->src_onepart_cnt++;
+ return 1;
+}
+
+/* Combine dataflow set information from SRC2 into DST, using PDST
+ to carry over information across passes. */
+
+static void
+dataflow_set_merge (dataflow_set *dst, dataflow_set *src2)
+{
+ dataflow_set cur = *dst;
+ dataflow_set *src1 = &cur;
+ struct dfset_merge dsm;
+ int i;
+ size_t src1_elems, src2_elems;
+ htab_iterator hi;
+ variable var;
+
+ src1_elems = htab_elements (shared_hash_htab (src1->vars));
+ src2_elems = htab_elements (shared_hash_htab (src2->vars));
+ dataflow_set_init (dst);
+ dst->stack_adjust = cur.stack_adjust;
+ shared_hash_destroy (dst->vars);
+ dst->vars = (shared_hash) pool_alloc (shared_hash_pool);
+ dst->vars->refcount = 1;
+ dst->vars->htab
+ = htab_create (MAX (src1_elems, src2_elems), variable_htab_hash,
+ variable_htab_eq, variable_htab_free);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ attrs_list_mpdv_union (&dst->regs[i], src1->regs[i], src2->regs[i]);
+
+ dsm.dst = dst;
+ dsm.src = src2;
+ dsm.cur = src1;
+ dsm.src_onepart_cnt = 0;
+
+ FOR_EACH_HTAB_ELEMENT (shared_hash_htab (dsm.src->vars), var, variable, hi)
+ variable_merge_over_src (var, &dsm);
+ FOR_EACH_HTAB_ELEMENT (shared_hash_htab (dsm.cur->vars), var, variable, hi)
+ variable_merge_over_cur (var, &dsm);
+
+ if (dsm.src_onepart_cnt)
+ dst_can_be_shared = false;
+
+ dataflow_set_destroy (src1);
+}
+
+/* Mark register equivalences. */
+
+static void
+dataflow_set_equiv_regs (dataflow_set *set)
+{
+ int i;
+ attrs list, *listp;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ rtx canon[NUM_MACHINE_MODES];
+
+ /* If the list is empty or one entry, no need to canonicalize
+ anything. */
+ if (set->regs[i] == NULL || set->regs[i]->next == NULL)
+ continue;
+
+ memset (canon, 0, sizeof (canon));
+
+ for (list = set->regs[i]; list; list = list->next)
+ if (list->offset == 0 && dv_is_value_p (list->dv))
+ {
+ rtx val = dv_as_value (list->dv);
+ rtx *cvalp = &canon[(int)GET_MODE (val)];
+ rtx cval = *cvalp;
+
+ if (canon_value_cmp (val, cval))
+ *cvalp = val;
+ }
+
+ for (list = set->regs[i]; list; list = list->next)
+ if (list->offset == 0 && dv_onepart_p (list->dv))
+ {
+ rtx cval = canon[(int)GET_MODE (list->loc)];
+
+ if (!cval)
+ continue;
+
+ if (dv_is_value_p (list->dv))
+ {
+ rtx val = dv_as_value (list->dv);
+
+ if (val == cval)
+ continue;
+
+ VALUE_RECURSED_INTO (val) = true;
+ set_variable_part (set, val, dv_from_value (cval), 0,
+ VAR_INIT_STATUS_INITIALIZED,
+ NULL, NO_INSERT);
+ }
+
+ VALUE_RECURSED_INTO (cval) = true;
+ set_variable_part (set, cval, list->dv, 0,
+ VAR_INIT_STATUS_INITIALIZED, NULL, NO_INSERT);
+ }
+
+ for (listp = &set->regs[i]; (list = *listp);
+ listp = list ? &list->next : listp)
+ if (list->offset == 0 && dv_onepart_p (list->dv))
+ {
+ rtx cval = canon[(int)GET_MODE (list->loc)];
+ void **slot;
+
+ if (!cval)
+ continue;
+
+ if (dv_is_value_p (list->dv))
+ {
+ rtx val = dv_as_value (list->dv);
+ if (!VALUE_RECURSED_INTO (val))
+ continue;
+ }
+
+ slot = shared_hash_find_slot_noinsert (set->vars, list->dv);
+ canonicalize_values_star (slot, set);
+ if (*listp != list)
+ list = NULL;
+ }
+ }
+}
+
+/* Remove any redundant values in the location list of VAR, which must
+ be unshared and 1-part. */
+
+static void
+remove_duplicate_values (variable var)
+{
+ location_chain node, *nodep;
+
+ gcc_assert (dv_onepart_p (var->dv));
+ gcc_assert (var->n_var_parts == 1);
+ gcc_assert (var->refcount == 1);
+
+ for (nodep = &var->var_part[0].loc_chain; (node = *nodep); )
+ {
+ if (GET_CODE (node->loc) == VALUE)
+ {
+ if (VALUE_RECURSED_INTO (node->loc))
+ {
+ /* Remove duplicate value node. */
+ *nodep = node->next;
+ pool_free (loc_chain_pool, node);
+ continue;
+ }
+ else
+ VALUE_RECURSED_INTO (node->loc) = true;
+ }
+ nodep = &node->next;
+ }
+
+ for (node = var->var_part[0].loc_chain; node; node = node->next)
+ if (GET_CODE (node->loc) == VALUE)
+ {
+ gcc_assert (VALUE_RECURSED_INTO (node->loc));
+ VALUE_RECURSED_INTO (node->loc) = false;
+ }
+}
+
+
+/* Hash table iteration argument passed to variable_post_merge. */
+struct dfset_post_merge
+{
+ /* The new input set for the current block. */
+ dataflow_set *set;
+ /* Pointer to the permanent input set for the current block, or
+ NULL. */
+ dataflow_set **permp;
+};
+
+/* Create values for incoming expressions associated with one-part
+ variables that don't have value numbers for them. */
+
+static int
+variable_post_merge_new_vals (void **slot, void *info)
+{
+ struct dfset_post_merge *dfpm = (struct dfset_post_merge *)info;
+ dataflow_set *set = dfpm->set;
+ variable var = (variable)*slot;
+ location_chain node;
+
+ if (!dv_onepart_p (var->dv) || !var->n_var_parts)
+ return 1;
+
+ gcc_assert (var->n_var_parts == 1);
+
+ if (dv_is_decl_p (var->dv))
+ {
+ bool check_dupes = false;
+
+ restart:
+ for (node = var->var_part[0].loc_chain; node; node = node->next)
+ {
+ if (GET_CODE (node->loc) == VALUE)
+ gcc_assert (!VALUE_RECURSED_INTO (node->loc));
+ else if (GET_CODE (node->loc) == REG)
+ {
+ attrs att, *attp, *curp = NULL;
+
+ if (var->refcount != 1)
+ {
+ slot = unshare_variable (set, slot, var,
+ VAR_INIT_STATUS_INITIALIZED);
+ var = (variable)*slot;
+ goto restart;
+ }
+
+ for (attp = &set->regs[REGNO (node->loc)]; (att = *attp);
+ attp = &att->next)
+ if (att->offset == 0
+ && GET_MODE (att->loc) == GET_MODE (node->loc))
+ {
+ if (dv_is_value_p (att->dv))
+ {
+ rtx cval = dv_as_value (att->dv);
+ node->loc = cval;
+ check_dupes = true;
+ break;
+ }
+ else if (dv_as_opaque (att->dv) == dv_as_opaque (var->dv))
+ curp = attp;
+ }
+
+ if (!curp)
+ {
+ curp = attp;
+ while (*curp)
+ if ((*curp)->offset == 0
+ && GET_MODE ((*curp)->loc) == GET_MODE (node->loc)
+ && dv_as_opaque ((*curp)->dv) == dv_as_opaque (var->dv))
+ break;
+ else
+ curp = &(*curp)->next;
+ gcc_assert (*curp);
+ }
+
+ if (!att)
+ {
+ decl_or_value cdv;
+ rtx cval;
+
+ if (!*dfpm->permp)
+ {
+ *dfpm->permp = XNEW (dataflow_set);
+ dataflow_set_init (*dfpm->permp);
+ }
+
+ for (att = (*dfpm->permp)->regs[REGNO (node->loc)];
+ att; att = att->next)
+ if (GET_MODE (att->loc) == GET_MODE (node->loc))
+ {
+ gcc_assert (att->offset == 0
+ && dv_is_value_p (att->dv));
+ val_reset (set, att->dv);
+ break;
+ }
+
+ if (att)
+ {
+ cdv = att->dv;
+ cval = dv_as_value (cdv);
+ }
+ else
+ {
+ /* Create a unique value to hold this register,
+ that ought to be found and reused in
+ subsequent rounds. */
+ cselib_val *v;
+ gcc_assert (!cselib_lookup (node->loc,
+ GET_MODE (node->loc), 0));
+ v = cselib_lookup (node->loc, GET_MODE (node->loc), 1);
+ cselib_preserve_value (v);
+ cselib_invalidate_rtx (node->loc);
+ cval = v->val_rtx;
+ cdv = dv_from_value (cval);
+ if (dump_file)
+ fprintf (dump_file,
+ "Created new value %u:%u for reg %i\n",
+ v->uid, v->hash, REGNO (node->loc));
+ }
+
+ var_reg_decl_set (*dfpm->permp, node->loc,
+ VAR_INIT_STATUS_INITIALIZED,
+ cdv, 0, NULL, INSERT);
+
+ node->loc = cval;
+ check_dupes = true;
+ }
+
+ /* Remove attribute referring to the decl, which now
+ uses the value for the register, already existing or
+ to be added when we bring perm in. */
+ att = *curp;
+ *curp = att->next;
+ pool_free (attrs_pool, att);
+ }
+ }
+
+ if (check_dupes)
+ remove_duplicate_values (var);
+ }
+
+ return 1;
+}
+
+/* Reset values in the permanent set that are not associated with the
+ chosen expression. */
+
+static int
+variable_post_merge_perm_vals (void **pslot, void *info)
+{
+ struct dfset_post_merge *dfpm = (struct dfset_post_merge *)info;
+ dataflow_set *set = dfpm->set;
+ variable pvar = (variable)*pslot, var;
+ location_chain pnode;
+ decl_or_value dv;
+ attrs att;
+
+ gcc_assert (dv_is_value_p (pvar->dv)
+ && pvar->n_var_parts == 1);
+ pnode = pvar->var_part[0].loc_chain;
+ gcc_assert (pnode
+ && !pnode->next
+ && REG_P (pnode->loc));
+
+ dv = pvar->dv;
+
+ var = shared_hash_find (set->vars, dv);
+ if (var)
+ {
+ /* Although variable_post_merge_new_vals may have made decls
+ non-star-canonical, values that pre-existed in canonical form
+ remain canonical, and newly-created values reference a single
+ REG, so they are canonical as well. Since VAR has the
+ location list for a VALUE, using find_loc_in_1pdv for it is
+ fine, since VALUEs don't map back to DECLs. */
+ if (find_loc_in_1pdv (pnode->loc, var, shared_hash_htab (set->vars)))
+ return 1;
+ val_reset (set, dv);
+ }
+
+ for (att = set->regs[REGNO (pnode->loc)]; att; att = att->next)
+ if (att->offset == 0
+ && GET_MODE (att->loc) == GET_MODE (pnode->loc)
+ && dv_is_value_p (att->dv))
+ break;
+
+ /* If there is a value associated with this register already, create
+ an equivalence. */
+ if (att && dv_as_value (att->dv) != dv_as_value (dv))
+ {
+ rtx cval = dv_as_value (att->dv);
+ set_variable_part (set, cval, dv, 0, pnode->init, NULL, INSERT);
+ set_variable_part (set, dv_as_value (dv), att->dv, 0, pnode->init,
+ NULL, INSERT);
+ }
+ else if (!att)
+ {
+ attrs_list_insert (&set->regs[REGNO (pnode->loc)],
+ dv, 0, pnode->loc);
+ variable_union (pvar, set);
+ }
+
+ return 1;
+}
+
+/* Just checking stuff and registering register attributes for
+ now. */
+
+static void
+dataflow_post_merge_adjust (dataflow_set *set, dataflow_set **permp)
+{
+ struct dfset_post_merge dfpm;
+
+ dfpm.set = set;
+ dfpm.permp = permp;
+
+ htab_traverse (shared_hash_htab (set->vars), variable_post_merge_new_vals,
+ &dfpm);
+ if (*permp)
+ htab_traverse (shared_hash_htab ((*permp)->vars),
+ variable_post_merge_perm_vals, &dfpm);
+ htab_traverse (shared_hash_htab (set->vars), canonicalize_values_star, set);
+ htab_traverse (shared_hash_htab (set->vars), canonicalize_vars_star, set);
+}
+
+/* Return a node whose loc is a MEM that refers to EXPR in the
+ location list of a one-part variable or value VAR, or in that of
+ any values recursively mentioned in the location lists. */
+
+static location_chain
+find_mem_expr_in_1pdv (tree expr, rtx val, htab_t vars)
+{
+ location_chain node;
+ decl_or_value dv;
+ variable var;
+ location_chain where = NULL;
+
+ if (!val)
+ return NULL;
+
+ gcc_assert (GET_CODE (val) == VALUE
+ && !VALUE_RECURSED_INTO (val));
+
+ dv = dv_from_value (val);
+ var = (variable) htab_find_with_hash (vars, dv, dv_htab_hash (dv));
+
+ if (!var)
+ return NULL;
+
+ gcc_assert (dv_onepart_p (var->dv));
+
+ if (!var->n_var_parts)
+ return NULL;
+
+ gcc_assert (var->var_part[0].offset == 0);
+
+ VALUE_RECURSED_INTO (val) = true;
+
+ for (node = var->var_part[0].loc_chain; node; node = node->next)
+ if (MEM_P (node->loc) && MEM_EXPR (node->loc) == expr
+ && MEM_OFFSET (node->loc) == 0)
+ {
+ where = node;
+ break;
+ }
+ else if (GET_CODE (node->loc) == VALUE
+ && !VALUE_RECURSED_INTO (node->loc)
+ && (where = find_mem_expr_in_1pdv (expr, node->loc, vars)))
+ break;
+
+ VALUE_RECURSED_INTO (val) = false;
+
+ return where;
+}
+
+/* Return TRUE if the value of MEM may vary across a call. */
+
+static bool
+mem_dies_at_call (rtx mem)
+{
+ tree expr = MEM_EXPR (mem);
+ tree decl;
+
+ if (!expr)
+ return true;
+
+ decl = get_base_address (expr);
+
+ if (!decl)
+ return true;
+
+ if (!DECL_P (decl))
+ return true;
+
+ return (may_be_aliased (decl)
+ || (!TREE_READONLY (decl) && is_global_var (decl)));
+}
+
+/* Remove all MEMs from the location list of a hash table entry for a
+ one-part variable, except those whose MEM attributes map back to
+ the variable itself, directly or within a VALUE. */
+
+static int
+dataflow_set_preserve_mem_locs (void **slot, void *data)
+{
+ dataflow_set *set = (dataflow_set *) data;
+ variable var = (variable) *slot;
+
+ if (dv_is_decl_p (var->dv) && dv_onepart_p (var->dv))
+ {
+ tree decl = dv_as_decl (var->dv);
+ location_chain loc, *locp;
+ bool changed = false;
+
+ if (!var->n_var_parts)
+ return 1;
+
+ gcc_assert (var->n_var_parts == 1);
+
+ if (shared_var_p (var, set->vars))
+ {
+ for (loc = var->var_part[0].loc_chain; loc; loc = loc->next)
+ {
+ /* We want to remove dying MEMs that doesn't refer to
+ DECL. */
+ if (GET_CODE (loc->loc) == MEM
+ && (MEM_EXPR (loc->loc) != decl
+ || MEM_OFFSET (loc->loc))
+ && !mem_dies_at_call (loc->loc))
+ break;
+ /* We want to move here MEMs that do refer to DECL. */
+ else if (GET_CODE (loc->loc) == VALUE
+ && find_mem_expr_in_1pdv (decl, loc->loc,
+ shared_hash_htab (set->vars)))
+ break;
+ }
+
+ if (!loc)
+ return 1;
+
+ slot = unshare_variable (set, slot, var, VAR_INIT_STATUS_UNKNOWN);
+ var = (variable)*slot;
+ gcc_assert (var->n_var_parts == 1);
+ }
+
+ for (locp = &var->var_part[0].loc_chain, loc = *locp;
+ loc; loc = *locp)
+ {
+ rtx old_loc = loc->loc;
+ if (GET_CODE (old_loc) == VALUE)
+ {
+ location_chain mem_node
+ = find_mem_expr_in_1pdv (decl, loc->loc,
+ shared_hash_htab (set->vars));
+
+ /* ??? This picks up only one out of multiple MEMs that
+ refer to the same variable. Do we ever need to be
+ concerned about dealing with more than one, or, given
+ that they should all map to the same variable
+ location, their addresses will have been merged and
+ they will be regarded as equivalent? */
+ if (mem_node)
+ {
+ loc->loc = mem_node->loc;
+ loc->set_src = mem_node->set_src;
+ loc->init = MIN (loc->init, mem_node->init);
+ }
+ }
+
+ if (GET_CODE (loc->loc) != MEM
+ || (MEM_EXPR (loc->loc) == decl
+ && MEM_OFFSET (loc->loc) == 0)
+ || !mem_dies_at_call (loc->loc))
+ {
+ if (old_loc != loc->loc && emit_notes)
+ {
+ if (old_loc == var->var_part[0].cur_loc)
+ {
+ changed = true;
+ var->var_part[0].cur_loc = NULL;
+ var->cur_loc_changed = true;
+ }
+ add_value_chains (var->dv, loc->loc);
+ remove_value_chains (var->dv, old_loc);
+ }
+ locp = &loc->next;
+ continue;
+ }
+
+ if (emit_notes)
+ {
+ remove_value_chains (var->dv, old_loc);
+ if (old_loc == var->var_part[0].cur_loc)
+ {
+ changed = true;
+ var->var_part[0].cur_loc = NULL;
+ var->cur_loc_changed = true;
+ }
+ }
+ *locp = loc->next;
+ pool_free (loc_chain_pool, loc);
+ }
+
+ if (!var->var_part[0].loc_chain)
+ {
+ var->n_var_parts--;
+ changed = true;
+ }
+ if (changed)
+ variable_was_changed (var, set);
+ }
+
+ return 1;
+}
+
+/* Remove all MEMs from the location list of a hash table entry for a
+ value. */
+
+static int
+dataflow_set_remove_mem_locs (void **slot, void *data)
+{
+ dataflow_set *set = (dataflow_set *) data;
+ variable var = (variable) *slot;
+
+ if (dv_is_value_p (var->dv))
+ {
+ location_chain loc, *locp;
+ bool changed = false;
+
+ gcc_assert (var->n_var_parts == 1);
+
+ if (shared_var_p (var, set->vars))
+ {
+ for (loc = var->var_part[0].loc_chain; loc; loc = loc->next)
+ if (GET_CODE (loc->loc) == MEM
+ && mem_dies_at_call (loc->loc))
+ break;
+
+ if (!loc)
+ return 1;
+
+ slot = unshare_variable (set, slot, var, VAR_INIT_STATUS_UNKNOWN);
+ var = (variable)*slot;
+ gcc_assert (var->n_var_parts == 1);
+ }
+
+ for (locp = &var->var_part[0].loc_chain, loc = *locp;
+ loc; loc = *locp)
+ {
+ if (GET_CODE (loc->loc) != MEM
+ || !mem_dies_at_call (loc->loc))
+ {
+ locp = &loc->next;
+ continue;
+ }
+
+ if (emit_notes)
+ remove_value_chains (var->dv, loc->loc);
+ *locp = loc->next;
+ /* If we have deleted the location which was last emitted
+ we have to emit new location so add the variable to set
+ of changed variables. */
+ if (var->var_part[0].cur_loc == loc->loc)
+ {
+ changed = true;
+ var->var_part[0].cur_loc = NULL;
+ var->cur_loc_changed = true;
+ }
+ pool_free (loc_chain_pool, loc);
+ }
+
+ if (!var->var_part[0].loc_chain)
+ {
+ var->n_var_parts--;
+ changed = true;
+ }
+ if (changed)
+ variable_was_changed (var, set);
+ }
+
+ return 1;
+}
+
+/* Remove all variable-location information about call-clobbered
+ registers, as well as associations between MEMs and VALUEs. */
+
+static void
+dataflow_set_clear_at_call (dataflow_set *set)
+{
+ int r;
+
+ for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
+ if (TEST_HARD_REG_BIT (regs_invalidated_by_call, r))
+ var_regno_delete (set, r);
+
+ if (MAY_HAVE_DEBUG_INSNS)
+ {
+ set->traversed_vars = set->vars;
+ htab_traverse (shared_hash_htab (set->vars),
+ dataflow_set_preserve_mem_locs, set);
+ set->traversed_vars = set->vars;
+ htab_traverse (shared_hash_htab (set->vars), dataflow_set_remove_mem_locs,
+ set);
+ set->traversed_vars = NULL;
+ }
+}
+
+static bool
+variable_part_different_p (variable_part *vp1, variable_part *vp2)
+{
+ location_chain lc1, lc2;
+
+ for (lc1 = vp1->loc_chain; lc1; lc1 = lc1->next)
+ {
+ for (lc2 = vp2->loc_chain; lc2; lc2 = lc2->next)
+ {
+ if (REG_P (lc1->loc) && REG_P (lc2->loc))
+ {
+ if (REGNO (lc1->loc) == REGNO (lc2->loc))
+ break;
+ }
+ if (rtx_equal_p (lc1->loc, lc2->loc))
+ break;
+ }
+ if (!lc2)
+ return true;
+ }
+ return false;
+}
+
+/* Return true if one-part variables VAR1 and VAR2 are different.
+ They must be in canonical order. */
+
+static bool
+onepart_variable_different_p (variable var1, variable var2)
+{
+ location_chain lc1, lc2;
+
+ if (var1 == var2)
+ return false;
+
+ gcc_assert (var1->n_var_parts == 1
+ && var2->n_var_parts == 1);
+
+ lc1 = var1->var_part[0].loc_chain;
+ lc2 = var2->var_part[0].loc_chain;
+
+ gcc_assert (lc1 && lc2);
+
+ while (lc1 && lc2)
+ {
+ if (loc_cmp (lc1->loc, lc2->loc))
+ return true;
+ lc1 = lc1->next;
+ lc2 = lc2->next;
+ }
+
+ return lc1 != lc2;
+}
+
+/* Return true if variables VAR1 and VAR2 are different. */
+
+static bool
+variable_different_p (variable var1, variable var2)
+{
+ int i;
+
+ if (var1 == var2)
+ return false;
+
+ if (var1->n_var_parts != var2->n_var_parts)
+ return true;
+
+ for (i = 0; i < var1->n_var_parts; i++)
+ {
+ if (var1->var_part[i].offset != var2->var_part[i].offset)
+ return true;
+ /* One-part values have locations in a canonical order. */
+ if (i == 0 && var1->var_part[i].offset == 0 && dv_onepart_p (var1->dv))
+ {
+ gcc_assert (var1->n_var_parts == 1
+ && dv_as_opaque (var1->dv) == dv_as_opaque (var2->dv));
+ return onepart_variable_different_p (var1, var2);
+ }
+ if (variable_part_different_p (&var1->var_part[i], &var2->var_part[i]))
+ return true;
+ if (variable_part_different_p (&var2->var_part[i], &var1->var_part[i]))
+ return true;
+ }
+ return false;
+}
+
+/* Return true if dataflow sets OLD_SET and NEW_SET differ. */
+
+static bool
+dataflow_set_different (dataflow_set *old_set, dataflow_set *new_set)
+{
+ htab_iterator hi;
+ variable var1;
+
+ if (old_set->vars == new_set->vars)
+ return false;
+
+ if (htab_elements (shared_hash_htab (old_set->vars))
+ != htab_elements (shared_hash_htab (new_set->vars)))
+ return true;
+
+ FOR_EACH_HTAB_ELEMENT (shared_hash_htab (old_set->vars), var1, variable, hi)
+ {
+ htab_t htab = shared_hash_htab (new_set->vars);
+ variable var2 = (variable) htab_find_with_hash (htab, var1->dv,
+ dv_htab_hash (var1->dv));
+ if (!var2)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "dataflow difference found: removal of:\n");
+ dump_var (var1);
+ }
+ return true;
+ }
+
+ if (variable_different_p (var1, var2))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "dataflow difference found: "
+ "old and new follow:\n");
+ dump_var (var1);
+ dump_var (var2);
+ }
+ return true;
+ }
+ }
+
+ /* No need to traverse the second hashtab, if both have the same number
+ of elements and the second one had all entries found in the first one,
+ then it can't have any extra entries. */
+ return false;
+}
+
+/* Free the contents of dataflow set SET. */
+
+static void
+dataflow_set_destroy (dataflow_set *set)
+{
+ int i;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ attrs_list_clear (&set->regs[i]);
+
+ shared_hash_destroy (set->vars);
+ set->vars = NULL;
+}
+
+/* Return true if RTL X contains a SYMBOL_REF. */
+
+static bool
+contains_symbol_ref (rtx x)
+{
+ const char *fmt;
+ RTX_CODE code;
+ int i;
+
+ if (!x)
+ return false;
+
+ code = GET_CODE (x);
+ if (code == SYMBOL_REF)
+ return true;
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (contains_symbol_ref (XEXP (x, i)))
+ return true;
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (contains_symbol_ref (XVECEXP (x, i, j)))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Shall EXPR be tracked? */
+
+static bool
+track_expr_p (tree expr, bool need_rtl)
+{
+ rtx decl_rtl;
+ tree realdecl;
+
+ if (TREE_CODE (expr) == DEBUG_EXPR_DECL)
+ return DECL_RTL_SET_P (expr);
+
+ /* If EXPR is not a parameter or a variable do not track it. */
+ if (TREE_CODE (expr) != VAR_DECL && TREE_CODE (expr) != PARM_DECL)
+ return 0;
+
+ /* It also must have a name... */
+ if (!DECL_NAME (expr) && need_rtl)
+ return 0;
+
+ /* ... and a RTL assigned to it. */
+ decl_rtl = DECL_RTL_IF_SET (expr);
+ if (!decl_rtl && need_rtl)
+ return 0;
+
+ /* If this expression is really a debug alias of some other declaration, we
+ don't need to track this expression if the ultimate declaration is
+ ignored. */
+ realdecl = expr;
+ if (DECL_DEBUG_EXPR_IS_FROM (realdecl))
+ {
+ realdecl = DECL_DEBUG_EXPR (realdecl);
+ if (realdecl == NULL_TREE)
+ realdecl = expr;
+ else if (!DECL_P (realdecl))
+ {
+ if (handled_component_p (realdecl))
+ {
+ HOST_WIDE_INT bitsize, bitpos, maxsize;
+ tree innerdecl
+ = get_ref_base_and_extent (realdecl, &bitpos, &bitsize,
+ &maxsize);
+ if (!DECL_P (innerdecl)
+ || DECL_IGNORED_P (innerdecl)
+ || TREE_STATIC (innerdecl)
+ || bitsize <= 0
+ || bitpos + bitsize > 256
+ || bitsize != maxsize)
+ return 0;
+ else
+ realdecl = expr;
+ }
+ else
+ return 0;
+ }
+ }
+
+ /* Do not track EXPR if REALDECL it should be ignored for debugging
+ purposes. */
+ if (DECL_IGNORED_P (realdecl))
+ return 0;
+
+ /* Do not track global variables until we are able to emit correct location
+ list for them. */
+ if (TREE_STATIC (realdecl))
+ return 0;
+
+ /* When the EXPR is a DECL for alias of some variable (see example)
+ the TREE_STATIC flag is not used. Disable tracking all DECLs whose
+ DECL_RTL contains SYMBOL_REF.
+
+ Example:
+ extern char **_dl_argv_internal __attribute__ ((alias ("_dl_argv")));
+ char **_dl_argv;
+ */
+ if (decl_rtl && MEM_P (decl_rtl)
+ && contains_symbol_ref (XEXP (decl_rtl, 0)))
+ return 0;
+
+ /* If RTX is a memory it should not be very large (because it would be
+ an array or struct). */
+ if (decl_rtl && MEM_P (decl_rtl))
+ {
+ /* Do not track structures and arrays. */
+ if (GET_MODE (decl_rtl) == BLKmode
+ || AGGREGATE_TYPE_P (TREE_TYPE (realdecl)))
+ return 0;
+ if (MEM_SIZE (decl_rtl)
+ && INTVAL (MEM_SIZE (decl_rtl)) > MAX_VAR_PARTS)
+ return 0;
+ }
+
+ DECL_CHANGED (expr) = 0;
+ DECL_CHANGED (realdecl) = 0;
+ return 1;
+}
+
+/* Determine whether a given LOC refers to the same variable part as
+ EXPR+OFFSET. */
+
+static bool
+same_variable_part_p (rtx loc, tree expr, HOST_WIDE_INT offset)
+{
+ tree expr2;
+ HOST_WIDE_INT offset2;
+
+ if (! DECL_P (expr))
+ return false;
+
+ if (REG_P (loc))
+ {
+ expr2 = REG_EXPR (loc);
+ offset2 = REG_OFFSET (loc);
+ }
+ else if (MEM_P (loc))
+ {
+ expr2 = MEM_EXPR (loc);
+ offset2 = INT_MEM_OFFSET (loc);
+ }
+ else
+ return false;
+
+ if (! expr2 || ! DECL_P (expr2))
+ return false;
+
+ expr = var_debug_decl (expr);
+ expr2 = var_debug_decl (expr2);
+
+ return (expr == expr2 && offset == offset2);
+}
+
+/* LOC is a REG or MEM that we would like to track if possible.
+ If EXPR is null, we don't know what expression LOC refers to,
+ otherwise it refers to EXPR + OFFSET. STORE_REG_P is true if
+ LOC is an lvalue register.
+
+ Return true if EXPR is nonnull and if LOC, or some lowpart of it,
+ is something we can track. When returning true, store the mode of
+ the lowpart we can track in *MODE_OUT (if nonnull) and its offset
+ from EXPR in *OFFSET_OUT (if nonnull). */
+
+static bool
+track_loc_p (rtx loc, tree expr, HOST_WIDE_INT offset, bool store_reg_p,
+ enum machine_mode *mode_out, HOST_WIDE_INT *offset_out)
+{
+ enum machine_mode mode;
+
+ if (expr == NULL || !track_expr_p (expr, true))
+ return false;
+
+ /* If REG was a paradoxical subreg, its REG_ATTRS will describe the
+ whole subreg, but only the old inner part is really relevant. */
+ mode = GET_MODE (loc);
+ if (REG_P (loc) && !HARD_REGISTER_NUM_P (ORIGINAL_REGNO (loc)))
+ {
+ enum machine_mode pseudo_mode;
+
+ pseudo_mode = PSEUDO_REGNO_MODE (ORIGINAL_REGNO (loc));
+ if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (pseudo_mode))
+ {
+ offset += byte_lowpart_offset (pseudo_mode, mode);
+ mode = pseudo_mode;
+ }
+ }
+
+ /* If LOC is a paradoxical lowpart of EXPR, refer to EXPR itself.
+ Do the same if we are storing to a register and EXPR occupies
+ the whole of register LOC; in that case, the whole of EXPR is
+ being changed. We exclude complex modes from the second case
+ because the real and imaginary parts are represented as separate
+ pseudo registers, even if the whole complex value fits into one
+ hard register. */
+ if ((GET_MODE_SIZE (mode) > GET_MODE_SIZE (DECL_MODE (expr))
+ || (store_reg_p
+ && !COMPLEX_MODE_P (DECL_MODE (expr))
+ && hard_regno_nregs[REGNO (loc)][DECL_MODE (expr)] == 1))
+ && offset + byte_lowpart_offset (DECL_MODE (expr), mode) == 0)
+ {
+ mode = DECL_MODE (expr);
+ offset = 0;
+ }
+
+ if (offset < 0 || offset >= MAX_VAR_PARTS)
+ return false;
+
+ if (mode_out)
+ *mode_out = mode;
+ if (offset_out)
+ *offset_out = offset;
+ return true;
+}
+
+/* Return the MODE lowpart of LOC, or null if LOC is not something we
+ want to track. When returning nonnull, make sure that the attributes
+ on the returned value are updated. */
+
+static rtx
+var_lowpart (enum machine_mode mode, rtx loc)
+{
+ unsigned int offset, reg_offset, regno;
+
+ if (!REG_P (loc) && !MEM_P (loc))
+ return NULL;
+
+ if (GET_MODE (loc) == mode)
+ return loc;
+
+ offset = byte_lowpart_offset (mode, GET_MODE (loc));
+
+ if (MEM_P (loc))
+ return adjust_address_nv (loc, mode, offset);
+
+ reg_offset = subreg_lowpart_offset (mode, GET_MODE (loc));
+ regno = REGNO (loc) + subreg_regno_offset (REGNO (loc), GET_MODE (loc),
+ reg_offset, mode);
+ return gen_rtx_REG_offset (loc, mode, regno, offset);
+}
+
+/* arg_pointer_rtx resp. frame_pointer_rtx if stack_pointer_rtx or
+ hard_frame_pointer_rtx is being mapped to it. */
+static rtx cfa_base_rtx;
+
+/* Carry information about uses and stores while walking rtx. */
+
+struct count_use_info
+{
+ /* The insn where the RTX is. */
+ rtx insn;
+
+ /* The basic block where insn is. */
+ basic_block bb;
+
+ /* The array of n_sets sets in the insn, as determined by cselib. */
+ struct cselib_set *sets;
+ int n_sets;
+
+ /* True if we're counting stores, false otherwise. */
+ bool store_p;
+};
+
+/* Find a VALUE corresponding to X. */
+
+static inline cselib_val *
+find_use_val (rtx x, enum machine_mode mode, struct count_use_info *cui)
+{
+ int i;
+
+ if (cui->sets)
+ {
+ /* This is called after uses are set up and before stores are
+ processed bycselib, so it's safe to look up srcs, but not
+ dsts. So we look up expressions that appear in srcs or in
+ dest expressions, but we search the sets array for dests of
+ stores. */
+ if (cui->store_p)
+ {
+ for (i = 0; i < cui->n_sets; i++)
+ if (cui->sets[i].dest == x)
+ return cui->sets[i].src_elt;
+ }
+ else
+ return cselib_lookup (x, mode, 0);
+ }
+
+ return NULL;
+}
+
+/* Helper function to get mode of MEM's address. */
+
+static inline enum machine_mode
+get_address_mode (rtx mem)
+{
+ enum machine_mode mode = GET_MODE (XEXP (mem, 0));
+ if (mode != VOIDmode)
+ return mode;
+ return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
+}
+
+/* Replace all registers and addresses in an expression with VALUE
+ expressions that map back to them, unless the expression is a
+ register. If no mapping is or can be performed, returns NULL. */
+
+static rtx
+replace_expr_with_values (rtx loc)
+{
+ if (REG_P (loc))
+ return NULL;
+ else if (MEM_P (loc))
+ {
+ cselib_val *addr = cselib_lookup (XEXP (loc, 0),
+ get_address_mode (loc), 0);
+ if (addr)
+ return replace_equiv_address_nv (loc, addr->val_rtx);
+ else
+ return NULL;
+ }
+ else
+ return cselib_subst_to_values (loc);
+}
+
+/* Determine what kind of micro operation to choose for a USE. Return
+ MO_CLOBBER if no micro operation is to be generated. */
+
+static enum micro_operation_type
+use_type (rtx loc, struct count_use_info *cui, enum machine_mode *modep)
+{
+ tree expr;
+
+ if (cui && cui->sets)
+ {
+ if (GET_CODE (loc) == VAR_LOCATION)
+ {
+ if (track_expr_p (PAT_VAR_LOCATION_DECL (loc), false))
+ {
+ rtx ploc = PAT_VAR_LOCATION_LOC (loc);
+ if (! VAR_LOC_UNKNOWN_P (ploc))
+ {
+ cselib_val *val = cselib_lookup (ploc, GET_MODE (loc), 1);
+
+ /* ??? flag_float_store and volatile mems are never
+ given values, but we could in theory use them for
+ locations. */
+ gcc_assert (val || 1);
+ }
+ return MO_VAL_LOC;
+ }
+ else
+ return MO_CLOBBER;
+ }
+
+ if (REG_P (loc) || MEM_P (loc))
+ {
+ if (modep)
+ *modep = GET_MODE (loc);
+ if (cui->store_p)
+ {
+ if (REG_P (loc)
+ || (find_use_val (loc, GET_MODE (loc), cui)
+ && cselib_lookup (XEXP (loc, 0),
+ get_address_mode (loc), 0)))
+ return MO_VAL_SET;
+ }
+ else
+ {
+ cselib_val *val = find_use_val (loc, GET_MODE (loc), cui);
+
+ if (val && !cselib_preserved_value_p (val))
+ return MO_VAL_USE;
+ }
+ }
+ }
+
+ if (REG_P (loc))
+ {
+ gcc_assert (REGNO (loc) < FIRST_PSEUDO_REGISTER);
+
+ if (loc == cfa_base_rtx)
+ return MO_CLOBBER;
+ expr = REG_EXPR (loc);
+
+ if (!expr)
+ return MO_USE_NO_VAR;
+ else if (target_for_debug_bind (var_debug_decl (expr)))
+ return MO_CLOBBER;
+ else if (track_loc_p (loc, expr, REG_OFFSET (loc),
+ false, modep, NULL))
+ return MO_USE;
+ else
+ return MO_USE_NO_VAR;
+ }
+ else if (MEM_P (loc))
+ {
+ expr = MEM_EXPR (loc);
+
+ if (!expr)
+ return MO_CLOBBER;
+ else if (target_for_debug_bind (var_debug_decl (expr)))
+ return MO_CLOBBER;
+ else if (track_loc_p (loc, expr, INT_MEM_OFFSET (loc),
+ false, modep, NULL))
+ return MO_USE;
+ else
+ return MO_CLOBBER;
+ }
+
+ return MO_CLOBBER;
+}
+
+/* Log to OUT information about micro-operation MOPT involving X in
+ INSN of BB. */
+
+static inline void
+log_op_type (rtx x, basic_block bb, rtx insn,
+ enum micro_operation_type mopt, FILE *out)
+{
+ fprintf (out, "bb %i op %i insn %i %s ",
+ bb->index, VEC_length (micro_operation, VTI (bb)->mos),
+ INSN_UID (insn), micro_operation_type_name[mopt]);
+ print_inline_rtx (out, x, 2);
+ fputc ('\n', out);
+}
+
+/* Tell whether the CONCAT used to holds a VALUE and its location
+ needs value resolution, i.e., an attempt of mapping the location
+ back to other incoming values. */
+#define VAL_NEEDS_RESOLUTION(x) \
+ (RTL_FLAG_CHECK1 ("VAL_NEEDS_RESOLUTION", (x), CONCAT)->volatil)
+/* Whether the location in the CONCAT is a tracked expression, that
+ should also be handled like a MO_USE. */
+#define VAL_HOLDS_TRACK_EXPR(x) \
+ (RTL_FLAG_CHECK1 ("VAL_HOLDS_TRACK_EXPR", (x), CONCAT)->used)
+/* Whether the location in the CONCAT should be handled like a MO_COPY
+ as well. */
+#define VAL_EXPR_IS_COPIED(x) \
+ (RTL_FLAG_CHECK1 ("VAL_EXPR_IS_COPIED", (x), CONCAT)->jump)
+/* Whether the location in the CONCAT should be handled like a
+ MO_CLOBBER as well. */
+#define VAL_EXPR_IS_CLOBBERED(x) \
+ (RTL_FLAG_CHECK1 ("VAL_EXPR_IS_CLOBBERED", (x), CONCAT)->unchanging)
+/* Whether the location is a CONCAT of the MO_VAL_SET expression and
+ a reverse operation that should be handled afterwards. */
+#define VAL_EXPR_HAS_REVERSE(x) \
+ (RTL_FLAG_CHECK1 ("VAL_EXPR_HAS_REVERSE", (x), CONCAT)->return_val)
+
+/* All preserved VALUEs. */
+static VEC (rtx, heap) *preserved_values;
+
+/* Ensure VAL is preserved and remember it in a vector for vt_emit_notes. */
+
+static void
+preserve_value (cselib_val *val)
+{
+ cselib_preserve_value (val);
+ VEC_safe_push (rtx, heap, preserved_values, val->val_rtx);
+}
+
+/* Helper function for MO_VAL_LOC handling. Return non-zero if
+ any rtxes not suitable for CONST use not replaced by VALUEs
+ are discovered. */
+
+static int
+non_suitable_const (rtx *x, void *data ATTRIBUTE_UNUSED)
+{
+ if (*x == NULL_RTX)
+ return 0;
+
+ switch (GET_CODE (*x))
+ {
+ case REG:
+ case DEBUG_EXPR:
+ case PC:
+ case SCRATCH:
+ case CC0:
+ case ASM_INPUT:
+ case ASM_OPERANDS:
+ return 1;
+ case MEM:
+ return !MEM_READONLY_P (*x);
+ default:
+ return 0;
+ }
+}
+
+/* Add uses (register and memory references) LOC which will be tracked
+ to VTI (bb)->mos. INSN is instruction which the LOC is part of. */
+
+static int
+add_uses (rtx *ploc, void *data)
+{
+ rtx loc = *ploc;
+ enum machine_mode mode = VOIDmode;
+ struct count_use_info *cui = (struct count_use_info *)data;
+ enum micro_operation_type type = use_type (loc, cui, &mode);
+
+ if (type != MO_CLOBBER)
+ {
+ basic_block bb = cui->bb;
+ micro_operation mo;
+
+ mo.type = type;
+ mo.u.loc = type == MO_USE ? var_lowpart (mode, loc) : loc;
+ mo.insn = cui->insn;
+
+ if (type == MO_VAL_LOC)
+ {
+ rtx oloc = loc;
+ rtx vloc = PAT_VAR_LOCATION_LOC (oloc);
+ cselib_val *val;
+
+ gcc_assert (cui->sets);
+
+ if (MEM_P (vloc)
+ && !REG_P (XEXP (vloc, 0))
+ && !MEM_P (XEXP (vloc, 0))
+ && (GET_CODE (XEXP (vloc, 0)) != PLUS
+ || XEXP (XEXP (vloc, 0), 0) != cfa_base_rtx
+ || !CONST_INT_P (XEXP (XEXP (vloc, 0), 1))))
+ {
+ rtx mloc = vloc;
+ enum machine_mode address_mode = get_address_mode (mloc);
+ cselib_val *val
+ = cselib_lookup (XEXP (mloc, 0), address_mode, 0);
+
+ if (val && !cselib_preserved_value_p (val))
+ {
+ micro_operation moa;
+ preserve_value (val);
+ mloc = cselib_subst_to_values (XEXP (mloc, 0));
+ moa.type = MO_VAL_USE;
+ moa.insn = cui->insn;
+ moa.u.loc = gen_rtx_CONCAT (address_mode,
+ val->val_rtx, mloc);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ log_op_type (moa.u.loc, cui->bb, cui->insn,
+ moa.type, dump_file);
+ VEC_safe_push (micro_operation, heap, VTI (bb)->mos, &moa);
+ }
+ }
+
+ if (CONSTANT_P (vloc)
+ && (GET_CODE (vloc) != CONST
+ || for_each_rtx (&vloc, non_suitable_const, NULL)))
+ /* For constants don't look up any value. */;
+ else if (!VAR_LOC_UNKNOWN_P (vloc)
+ && (val = find_use_val (vloc, GET_MODE (oloc), cui)))
+ {
+ enum machine_mode mode2;
+ enum micro_operation_type type2;
+ rtx nloc = replace_expr_with_values (vloc);
+
+ if (nloc)
+ {
+ oloc = shallow_copy_rtx (oloc);
+ PAT_VAR_LOCATION_LOC (oloc) = nloc;
+ }
+
+ oloc = gen_rtx_CONCAT (mode, val->val_rtx, oloc);
+
+ type2 = use_type (vloc, 0, &mode2);
+
+ gcc_assert (type2 == MO_USE || type2 == MO_USE_NO_VAR
+ || type2 == MO_CLOBBER);
+
+ if (type2 == MO_CLOBBER
+ && !cselib_preserved_value_p (val))
+ {
+ VAL_NEEDS_RESOLUTION (oloc) = 1;
+ preserve_value (val);
+ }
+ }
+ else if (!VAR_LOC_UNKNOWN_P (vloc))
+ {
+ oloc = shallow_copy_rtx (oloc);
+ PAT_VAR_LOCATION_LOC (oloc) = gen_rtx_UNKNOWN_VAR_LOC ();
+ }
+
+ mo.u.loc = oloc;
+ }
+ else if (type == MO_VAL_USE)
+ {
+ enum machine_mode mode2 = VOIDmode;
+ enum micro_operation_type type2;
+ cselib_val *val = find_use_val (loc, GET_MODE (loc), cui);
+ rtx vloc, oloc = loc, nloc;
+
+ gcc_assert (cui->sets);
+
+ if (MEM_P (oloc)
+ && !REG_P (XEXP (oloc, 0))
+ && !MEM_P (XEXP (oloc, 0))
+ && (GET_CODE (XEXP (oloc, 0)) != PLUS
+ || XEXP (XEXP (oloc, 0), 0) != cfa_base_rtx
+ || !CONST_INT_P (XEXP (XEXP (oloc, 0), 1))))
+ {
+ rtx mloc = oloc;
+ enum machine_mode address_mode = get_address_mode (mloc);
+ cselib_val *val
+ = cselib_lookup (XEXP (mloc, 0), address_mode, 0);
+
+ if (val && !cselib_preserved_value_p (val))
+ {
+ micro_operation moa;
+ preserve_value (val);
+ mloc = cselib_subst_to_values (XEXP (mloc, 0));
+ moa.type = MO_VAL_USE;
+ moa.insn = cui->insn;
+ moa.u.loc = gen_rtx_CONCAT (address_mode,
+ val->val_rtx, mloc);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ log_op_type (moa.u.loc, cui->bb, cui->insn,
+ moa.type, dump_file);
+ VEC_safe_push (micro_operation, heap, VTI (bb)->mos, &moa);
+ }
+ }
+
+ type2 = use_type (loc, 0, &mode2);
+
+ gcc_assert (type2 == MO_USE || type2 == MO_USE_NO_VAR
+ || type2 == MO_CLOBBER);
+
+ if (type2 == MO_USE)
+ vloc = var_lowpart (mode2, loc);
+ else
+ vloc = oloc;
+
+ /* The loc of a MO_VAL_USE may have two forms:
+
+ (concat val src): val is at src, a value-based
+ representation.
+
+ (concat (concat val use) src): same as above, with use as
+ the MO_USE tracked value, if it differs from src.
+
+ */
+
+ nloc = replace_expr_with_values (loc);
+ if (!nloc)
+ nloc = oloc;
+
+ if (vloc != nloc)
+ oloc = gen_rtx_CONCAT (mode2, val->val_rtx, vloc);
+ else
+ oloc = val->val_rtx;
+
+ mo.u.loc = gen_rtx_CONCAT (mode, oloc, nloc);
+
+ if (type2 == MO_USE)
+ VAL_HOLDS_TRACK_EXPR (mo.u.loc) = 1;
+ if (!cselib_preserved_value_p (val))
+ {
+ VAL_NEEDS_RESOLUTION (mo.u.loc) = 1;
+ preserve_value (val);
+ }
+ }
+ else
+ gcc_assert (type == MO_USE || type == MO_USE_NO_VAR);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ log_op_type (mo.u.loc, cui->bb, cui->insn, mo.type, dump_file);
+ VEC_safe_push (micro_operation, heap, VTI (bb)->mos, &mo);
+ }
+
+ return 0;
+}
+
+/* Helper function for finding all uses of REG/MEM in X in insn INSN. */
+
+static void
+add_uses_1 (rtx *x, void *cui)
+{
+ for_each_rtx (x, add_uses, cui);
+}
+
+/* Attempt to reverse the EXPR operation in the debug info. Say for
+ reg1 = reg2 + 6 even when reg2 is no longer live we
+ can express its value as VAL - 6. */
+
+static rtx
+reverse_op (rtx val, const_rtx expr)
+{
+ rtx src, arg, ret;
+ cselib_val *v;
+ enum rtx_code code;
+
+ if (GET_CODE (expr) != SET)
+ return NULL_RTX;
+
+ if (!REG_P (SET_DEST (expr)) || GET_MODE (val) != GET_MODE (SET_DEST (expr)))
+ return NULL_RTX;
+
+ src = SET_SRC (expr);
+ switch (GET_CODE (src))
+ {
+ case PLUS:
+ case MINUS:
+ case XOR:
+ case NOT:
+ case NEG:
+ if (!REG_P (XEXP (src, 0)))
+ return NULL_RTX;
+ break;
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ if (!REG_P (XEXP (src, 0)) && !MEM_P (XEXP (src, 0)))
+ return NULL_RTX;
+ break;
+ default:
+ return NULL_RTX;
+ }
+
+ if (!SCALAR_INT_MODE_P (GET_MODE (src)) || XEXP (src, 0) == cfa_base_rtx)
+ return NULL_RTX;
+
+ v = cselib_lookup (XEXP (src, 0), GET_MODE (XEXP (src, 0)), 0);
+ if (!v || !cselib_preserved_value_p (v))
+ return NULL_RTX;
+
+ switch (GET_CODE (src))
+ {
+ case NOT:
+ case NEG:
+ if (GET_MODE (v->val_rtx) != GET_MODE (val))
+ return NULL_RTX;
+ ret = gen_rtx_fmt_e (GET_CODE (src), GET_MODE (val), val);
+ break;
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ ret = gen_lowpart_SUBREG (GET_MODE (v->val_rtx), val);
+ break;
+ case XOR:
+ code = XOR;
+ goto binary;
+ case PLUS:
+ code = MINUS;
+ goto binary;
+ case MINUS:
+ code = PLUS;
+ goto binary;
+ binary:
+ if (GET_MODE (v->val_rtx) != GET_MODE (val))
+ return NULL_RTX;
+ arg = XEXP (src, 1);
+ if (!CONST_INT_P (arg) && GET_CODE (arg) != SYMBOL_REF)
+ {
+ arg = cselib_expand_value_rtx (arg, scratch_regs, 5);
+ if (arg == NULL_RTX)
+ return NULL_RTX;
+ if (!CONST_INT_P (arg) && GET_CODE (arg) != SYMBOL_REF)
+ return NULL_RTX;
+ }
+ ret = simplify_gen_binary (code, GET_MODE (val), val, arg);
+ if (ret == val)
+ /* Ensure ret isn't VALUE itself (which can happen e.g. for
+ (plus (reg1) (reg2)) when reg2 is known to be 0), as that
+ breaks a lot of routines during var-tracking. */
+ ret = gen_rtx_fmt_ee (PLUS, GET_MODE (val), val, const0_rtx);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ return gen_rtx_CONCAT (GET_MODE (v->val_rtx), v->val_rtx, ret);
+}
+
+/* Add stores (register and memory references) LOC which will be tracked
+ to VTI (bb)->mos. EXPR is the RTL expression containing the store.
+ CUIP->insn is instruction which the LOC is part of. */
+
+static void
+add_stores (rtx loc, const_rtx expr, void *cuip)
+{
+ enum machine_mode mode = VOIDmode, mode2;
+ struct count_use_info *cui = (struct count_use_info *)cuip;
+ basic_block bb = cui->bb;
+ micro_operation mo;
+ rtx oloc = loc, nloc, src = NULL;
+ enum micro_operation_type type = use_type (loc, cui, &mode);
+ bool track_p = false;
+ cselib_val *v;
+ bool resolve, preserve;
+ rtx reverse;
+
+ if (type == MO_CLOBBER)
+ return;
+
+ mode2 = mode;
+
+ if (REG_P (loc))
+ {
+ gcc_assert (loc != cfa_base_rtx);
+ if ((GET_CODE (expr) == CLOBBER && type != MO_VAL_SET)
+ || !(track_p = use_type (loc, NULL, &mode2) == MO_USE)
+ || GET_CODE (expr) == CLOBBER)
+ {
+ mo.type = MO_CLOBBER;
+ mo.u.loc = loc;
+ }
+ else
+ {
+ if (GET_CODE (expr) == SET && SET_DEST (expr) == loc)
+ src = var_lowpart (mode2, SET_SRC (expr));
+ loc = var_lowpart (mode2, loc);
+
+ if (src == NULL)
+ {
+ mo.type = MO_SET;
+ mo.u.loc = loc;
+ }
+ else
+ {
+ rtx xexpr = gen_rtx_SET (VOIDmode, loc, src);
+ if (same_variable_part_p (src, REG_EXPR (loc), REG_OFFSET (loc)))
+ mo.type = MO_COPY;
+ else
+ mo.type = MO_SET;
+ mo.u.loc = xexpr;
+ }
+ }
+ mo.insn = cui->insn;
+ }
+ else if (MEM_P (loc)
+ && ((track_p = use_type (loc, NULL, &mode2) == MO_USE)
+ || cui->sets))
+ {
+ if (MEM_P (loc) && type == MO_VAL_SET
+ && !REG_P (XEXP (loc, 0))
+ && !MEM_P (XEXP (loc, 0))
+ && (GET_CODE (XEXP (loc, 0)) != PLUS
+ || XEXP (XEXP (loc, 0), 0) != cfa_base_rtx
+ || !CONST_INT_P (XEXP (XEXP (loc, 0), 1))))
+ {
+ rtx mloc = loc;
+ enum machine_mode address_mode = get_address_mode (mloc);
+ cselib_val *val = cselib_lookup (XEXP (mloc, 0),
+ address_mode, 0);
+
+ if (val && !cselib_preserved_value_p (val))
+ {
+ preserve_value (val);
+ mo.type = MO_VAL_USE;
+ mloc = cselib_subst_to_values (XEXP (mloc, 0));
+ mo.u.loc = gen_rtx_CONCAT (address_mode, val->val_rtx, mloc);
+ mo.insn = cui->insn;
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ log_op_type (mo.u.loc, cui->bb, cui->insn,
+ mo.type, dump_file);
+ VEC_safe_push (micro_operation, heap, VTI (bb)->mos, &mo);
+ }
+ }
+
+ if (GET_CODE (expr) == CLOBBER || !track_p)
+ {
+ mo.type = MO_CLOBBER;
+ mo.u.loc = track_p ? var_lowpart (mode2, loc) : loc;
+ }
+ else
+ {
+ if (GET_CODE (expr) == SET && SET_DEST (expr) == loc)
+ src = var_lowpart (mode2, SET_SRC (expr));
+ loc = var_lowpart (mode2, loc);
+
+ if (src == NULL)
+ {
+ mo.type = MO_SET;
+ mo.u.loc = loc;
+ }
+ else
+ {
+ rtx xexpr = gen_rtx_SET (VOIDmode, loc, src);
+ if (same_variable_part_p (SET_SRC (xexpr),
+ MEM_EXPR (loc),
+ INT_MEM_OFFSET (loc)))
+ mo.type = MO_COPY;
+ else
+ mo.type = MO_SET;
+ mo.u.loc = xexpr;
+ }
+ }
+ mo.insn = cui->insn;
+ }
+ else
+ return;
+
+ if (type != MO_VAL_SET)
+ goto log_and_return;
+
+ v = find_use_val (oloc, mode, cui);
+
+ if (!v)
+ goto log_and_return;
+
+ resolve = preserve = !cselib_preserved_value_p (v);
+
+ nloc = replace_expr_with_values (oloc);
+ if (nloc)
+ oloc = nloc;
+
+ if (GET_CODE (PATTERN (cui->insn)) == COND_EXEC)
+ {
+ cselib_val *oval = cselib_lookup (oloc, GET_MODE (oloc), 0);