+ if (include_defs)
+ size += DF_REG_DEF_COUNT (regno);
+ if (include_uses)
+ size += DF_REG_USE_COUNT (regno);
+ if (include_eq_uses)
+ size += DF_REG_EQ_USE_COUNT (regno);
+ }
+ return size;
+}
+
+
+/* Take build ref table for either the uses or defs from the reg-use
+ or reg-def chains. This version processes the refs in reg order
+ which is likely to be best if processing the whole function. */
+
+static void
+df_reorganize_refs_by_reg_by_reg (struct df_ref_info *ref_info,
+ bool include_defs,
+ bool include_uses,
+ bool include_eq_uses)
+{
+ unsigned int m = df->regs_inited;
+ unsigned int regno;
+ unsigned int offset = 0;
+ unsigned int start;
+
+ if (df->changeable_flags & DF_NO_HARD_REGS)
+ {
+ start = FIRST_PSEUDO_REGISTER;
+ memset (ref_info->begin, 0, sizeof (int) * FIRST_PSEUDO_REGISTER);
+ memset (ref_info->count, 0, sizeof (int) * FIRST_PSEUDO_REGISTER);
+ }
+ else
+ start = 0;
+
+ ref_info->total_size
+ = df_count_refs (include_defs, include_uses, include_eq_uses);
+
+ df_check_and_grow_ref_info (ref_info, 1);
+
+ for (regno = start; regno < m; regno++)
+ {
+ int count = 0;
+ ref_info->begin[regno] = offset;
+ if (include_defs)
+ {
+ df_ref ref = DF_REG_DEF_CHAIN (regno);
+ while (ref)
+ {
+ ref_info->refs[offset] = ref;
+ DF_REF_ID (ref) = offset++;
+ count++;
+ ref = DF_REF_NEXT_REG (ref);
+ gcc_assert (offset < ref_info->refs_size);
+ }
+ }
+ if (include_uses)
+ {
+ df_ref ref = DF_REG_USE_CHAIN (regno);
+ while (ref)
+ {
+ ref_info->refs[offset] = ref;
+ DF_REF_ID (ref) = offset++;
+ count++;
+ ref = DF_REF_NEXT_REG (ref);
+ gcc_assert (offset < ref_info->refs_size);
+ }
+ }
+ if (include_eq_uses)
+ {
+ df_ref ref = DF_REG_EQ_USE_CHAIN (regno);
+ while (ref)
+ {
+ ref_info->refs[offset] = ref;
+ DF_REF_ID (ref) = offset++;
+ count++;
+ ref = DF_REF_NEXT_REG (ref);
+ gcc_assert (offset < ref_info->refs_size);
+ }
+ }
+ ref_info->count[regno] = count;
+ }
+
+ /* The bitmap size is not decremented when refs are deleted. So
+ reset it now that we have squished out all of the empty
+ slots. */
+ ref_info->table_size = offset;
+}
+
+
+/* Take build ref table for either the uses or defs from the reg-use
+ or reg-def chains. This version processes the refs in insn order
+ which is likely to be best if processing some segment of the
+ function. */
+
+static void
+df_reorganize_refs_by_reg_by_insn (struct df_ref_info *ref_info,
+ bool include_defs,
+ bool include_uses,
+ bool include_eq_uses)
+{
+ bitmap_iterator bi;
+ unsigned int bb_index;
+ unsigned int m = df->regs_inited;
+ unsigned int offset = 0;
+ unsigned int r;
+ unsigned int start
+ = (df->changeable_flags & DF_NO_HARD_REGS) ? FIRST_PSEUDO_REGISTER : 0;
+
+ memset (ref_info->begin, 0, sizeof (int) * df->regs_inited);
+ memset (ref_info->count, 0, sizeof (int) * df->regs_inited);
+
+ ref_info->total_size = df_count_refs (include_defs, include_uses, include_eq_uses);
+ df_check_and_grow_ref_info (ref_info, 1);
+
+ EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
+ {
+ basic_block bb = BASIC_BLOCK (bb_index);
+ rtx insn;
+ df_ref *ref_rec;
+
+ if (include_defs)
+ for (ref_rec = df_get_artificial_defs (bb_index); *ref_rec; ref_rec++)
+ {
+ unsigned int regno = DF_REF_REGNO (*ref_rec);
+ ref_info->count[regno]++;
+ }
+ if (include_uses)
+ for (ref_rec = df_get_artificial_uses (bb_index); *ref_rec; ref_rec++)
+ {
+ unsigned int regno = DF_REF_REGNO (*ref_rec);
+ ref_info->count[regno]++;
+ }
+
+ FOR_BB_INSNS (bb, insn)
+ {
+ if (INSN_P (insn))
+ {
+ unsigned int uid = INSN_UID (insn);
+
+ if (include_defs)
+ for (ref_rec = DF_INSN_UID_DEFS (uid); *ref_rec; ref_rec++)
+ {
+ unsigned int regno = DF_REF_REGNO (*ref_rec);
+ ref_info->count[regno]++;
+ }
+ if (include_uses)
+ for (ref_rec = DF_INSN_UID_USES (uid); *ref_rec; ref_rec++)
+ {
+ unsigned int regno = DF_REF_REGNO (*ref_rec);
+ ref_info->count[regno]++;
+ }
+ if (include_eq_uses)
+ for (ref_rec = DF_INSN_UID_EQ_USES (uid); *ref_rec; ref_rec++)
+ {
+ unsigned int regno = DF_REF_REGNO (*ref_rec);
+ ref_info->count[regno]++;
+ }
+ }
+ }
+ }
+
+ for (r = start; r < m; r++)
+ {
+ ref_info->begin[r] = offset;
+ offset += ref_info->count[r];
+ ref_info->count[r] = 0;
+ }
+
+ EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
+ {
+ basic_block bb = BASIC_BLOCK (bb_index);
+ rtx insn;
+ df_ref *ref_rec;
+
+ if (include_defs)
+ for (ref_rec = df_get_artificial_defs (bb_index); *ref_rec; ref_rec++)
+ {
+ df_ref ref = *ref_rec;
+ unsigned int regno = DF_REF_REGNO (ref);
+ if (regno >= start)
+ {
+ unsigned int id
+ = ref_info->begin[regno] + ref_info->count[regno]++;
+ DF_REF_ID (ref) = id;
+ ref_info->refs[id] = ref;
+ }
+ }
+ if (include_uses)
+ for (ref_rec = df_get_artificial_uses (bb_index); *ref_rec; ref_rec++)
+ {
+ df_ref ref = *ref_rec;
+ unsigned int regno = DF_REF_REGNO (ref);
+ if (regno >= start)
+ {
+ unsigned int id
+ = ref_info->begin[regno] + ref_info->count[regno]++;
+ DF_REF_ID (ref) = id;
+ ref_info->refs[id] = ref;
+ }
+ }
+
+ FOR_BB_INSNS (bb, insn)
+ {
+ if (INSN_P (insn))
+ {
+ unsigned int uid = INSN_UID (insn);
+
+ if (include_defs)
+ for (ref_rec = DF_INSN_UID_DEFS (uid); *ref_rec; ref_rec++)
+ {
+ df_ref ref = *ref_rec;
+ unsigned int regno = DF_REF_REGNO (ref);
+ if (regno >= start)
+ {
+ unsigned int id
+ = ref_info->begin[regno] + ref_info->count[regno]++;
+ DF_REF_ID (ref) = id;
+ ref_info->refs[id] = ref;
+ }
+ }
+ if (include_uses)
+ for (ref_rec = DF_INSN_UID_USES (uid); *ref_rec; ref_rec++)
+ {
+ df_ref ref = *ref_rec;
+ unsigned int regno = DF_REF_REGNO (ref);
+ if (regno >= start)
+ {
+ unsigned int id
+ = ref_info->begin[regno] + ref_info->count[regno]++;
+ DF_REF_ID (ref) = id;
+ ref_info->refs[id] = ref;
+ }
+ }
+ if (include_eq_uses)
+ for (ref_rec = DF_INSN_UID_EQ_USES (uid); *ref_rec; ref_rec++)
+ {
+ df_ref ref = *ref_rec;
+ unsigned int regno = DF_REF_REGNO (ref);
+ if (regno >= start)
+ {
+ unsigned int id
+ = ref_info->begin[regno] + ref_info->count[regno]++;
+ DF_REF_ID (ref) = id;
+ ref_info->refs[id] = ref;
+ }
+ }
+ }
+ }
+ }
+
+ /* The bitmap size is not decremented when refs are deleted. So
+ reset it now that we have squished out all of the empty
+ slots. */
+
+ ref_info->table_size = offset;
+}
+
+/* Take build ref table for either the uses or defs from the reg-use
+ or reg-def chains. */
+
+static void
+df_reorganize_refs_by_reg (struct df_ref_info *ref_info,
+ bool include_defs,
+ bool include_uses,
+ bool include_eq_uses)
+{
+ if (df->analyze_subset)
+ df_reorganize_refs_by_reg_by_insn (ref_info, include_defs,
+ include_uses, include_eq_uses);
+ else
+ df_reorganize_refs_by_reg_by_reg (ref_info, include_defs,
+ include_uses, include_eq_uses);
+}
+
+
+/* Add the refs in REF_VEC to the table in REF_INFO starting at OFFSET. */
+static unsigned int
+df_add_refs_to_table (unsigned int offset,
+ struct df_ref_info *ref_info,
+ df_ref *ref_vec)
+{
+ while (*ref_vec)
+ {
+ df_ref ref = *ref_vec;
+ if ((!(df->changeable_flags & DF_NO_HARD_REGS))
+ || (DF_REF_REGNO (ref) >= FIRST_PSEUDO_REGISTER))
+ {
+ ref_info->refs[offset] = ref;
+ DF_REF_ID (*ref_vec) = offset++;
+ }
+ ref_vec++;
+ }
+ return offset;
+}
+
+
+/* Count the number of refs in all of the insns of BB. Include the
+ defs if INCLUDE_DEFS. Include the uses if INCLUDE_USES. Include the
+ eq_uses if INCLUDE_EQ_USES. */
+
+static unsigned int
+df_reorganize_refs_by_insn_bb (basic_block bb, unsigned int offset,
+ struct df_ref_info *ref_info,
+ bool include_defs, bool include_uses,
+ bool include_eq_uses)
+{
+ rtx insn;
+
+ if (include_defs)
+ offset = df_add_refs_to_table (offset, ref_info,
+ df_get_artificial_defs (bb->index));
+ if (include_uses)
+ offset = df_add_refs_to_table (offset, ref_info,
+ df_get_artificial_uses (bb->index));
+
+ FOR_BB_INSNS (bb, insn)
+ if (INSN_P (insn))
+ {
+ unsigned int uid = INSN_UID (insn);
+ if (include_defs)
+ offset = df_add_refs_to_table (offset, ref_info,
+ DF_INSN_UID_DEFS (uid));
+ if (include_uses)
+ offset = df_add_refs_to_table (offset, ref_info,
+ DF_INSN_UID_USES (uid));
+ if (include_eq_uses)
+ offset = df_add_refs_to_table (offset, ref_info,
+ DF_INSN_UID_EQ_USES (uid));
+ }
+ return offset;
+}
+
+
+/* Organize the refs by insn into the table in REF_INFO. If
+ blocks_to_analyze is defined, use that set, otherwise the entire
+ program. Include the defs if INCLUDE_DEFS. Include the uses if
+ INCLUDE_USES. Include the eq_uses if INCLUDE_EQ_USES. */
+
+static void
+df_reorganize_refs_by_insn (struct df_ref_info *ref_info,
+ bool include_defs, bool include_uses,
+ bool include_eq_uses)
+{
+ basic_block bb;
+ unsigned int offset = 0;
+
+ ref_info->total_size = df_count_refs (include_defs, include_uses, include_eq_uses);
+ df_check_and_grow_ref_info (ref_info, 1);
+ if (df->blocks_to_analyze)
+ {
+ bitmap_iterator bi;
+ unsigned int index;
+
+ EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, index, bi)
+ {
+ offset = df_reorganize_refs_by_insn_bb (BASIC_BLOCK (index), offset, ref_info,
+ include_defs, include_uses,
+ include_eq_uses);
+ }
+
+ ref_info->table_size = offset;
+ }
+ else
+ {
+ FOR_ALL_BB (bb)
+ offset = df_reorganize_refs_by_insn_bb (bb, offset, ref_info,
+ include_defs, include_uses,
+ include_eq_uses);
+ ref_info->table_size = offset;
+ }
+}
+
+
+/* If the use refs in DF are not organized, reorganize them. */
+
+void
+df_maybe_reorganize_use_refs (enum df_ref_order order)
+{
+ if (order == df->use_info.ref_order)
+ return;
+
+ switch (order)
+ {
+ case DF_REF_ORDER_BY_REG:
+ df_reorganize_refs_by_reg (&df->use_info, false, true, false);
+ break;
+
+ case DF_REF_ORDER_BY_REG_WITH_NOTES:
+ df_reorganize_refs_by_reg (&df->use_info, false, true, true);
+ break;
+
+ case DF_REF_ORDER_BY_INSN:
+ df_reorganize_refs_by_insn (&df->use_info, false, true, false);
+ break;
+
+ case DF_REF_ORDER_BY_INSN_WITH_NOTES:
+ df_reorganize_refs_by_insn (&df->use_info, false, true, true);
+ break;
+
+ case DF_REF_ORDER_NO_TABLE:
+ free (df->use_info.refs);
+ df->use_info.refs = NULL;
+ df->use_info.refs_size = 0;
+ break;
+
+ case DF_REF_ORDER_UNORDERED:
+ case DF_REF_ORDER_UNORDERED_WITH_NOTES:
+ gcc_unreachable ();
+ break;
+ }
+
+ df->use_info.ref_order = order;
+}
+
+
+/* If the def refs in DF are not organized, reorganize them. */
+
+void
+df_maybe_reorganize_def_refs (enum df_ref_order order)
+{
+ if (order == df->def_info.ref_order)
+ return;
+
+ switch (order)
+ {
+ case DF_REF_ORDER_BY_REG:
+ df_reorganize_refs_by_reg (&df->def_info, true, false, false);
+ break;
+
+ case DF_REF_ORDER_BY_INSN:
+ df_reorganize_refs_by_insn (&df->def_info, true, false, false);
+ break;
+
+ case DF_REF_ORDER_NO_TABLE:
+ free (df->def_info.refs);
+ df->def_info.refs = NULL;
+ df->def_info.refs_size = 0;
+ break;
+
+ case DF_REF_ORDER_BY_INSN_WITH_NOTES:
+ case DF_REF_ORDER_BY_REG_WITH_NOTES:
+ case DF_REF_ORDER_UNORDERED:
+ case DF_REF_ORDER_UNORDERED_WITH_NOTES:
+ gcc_unreachable ();
+ break;
+ }
+
+ df->def_info.ref_order = order;
+}
+
+
+/* Change all of the basic block references in INSN to use the insn's
+ current basic block. This function is called from routines that move
+ instructions from one block to another. */
+
+void
+df_insn_change_bb (rtx insn, basic_block new_bb)
+{
+ basic_block old_bb = BLOCK_FOR_INSN (insn);
+ struct df_insn_info *insn_info;
+ unsigned int uid = INSN_UID (insn);
+
+ if (old_bb == new_bb)
+ return;
+
+ set_block_for_insn (insn, new_bb);
+
+ if (!df)
+ return;
+
+ if (dump_file)
+ fprintf (dump_file, "changing bb of uid %d\n", uid);
+
+ insn_info = DF_INSN_UID_SAFE_GET (uid);
+ if (insn_info == NULL)
+ {
+ if (dump_file)
+ fprintf (dump_file, " unscanned insn\n");
+ df_insn_rescan (insn);
+ return;
+ }
+
+ if (!INSN_P (insn))
+ return;
+
+ df_set_bb_dirty (new_bb);
+ if (old_bb)
+ {
+ if (dump_file)
+ fprintf (dump_file, " from %d to %d\n",
+ old_bb->index, new_bb->index);
+ df_set_bb_dirty (old_bb);
+ }
+ else
+ if (dump_file)
+ fprintf (dump_file, " to %d\n", new_bb->index);
+}
+
+
+/* Helper function for df_ref_change_reg_with_loc. */
+
+static void
+df_ref_change_reg_with_loc_1 (struct df_reg_info *old_df,
+ struct df_reg_info *new_df,
+ int new_regno, rtx loc)
+{
+ df_ref the_ref = old_df->reg_chain;
+
+ while (the_ref)
+ {
+ if ((!DF_REF_IS_ARTIFICIAL (the_ref))
+ && (DF_REF_LOC (the_ref))
+ && (*DF_REF_LOC (the_ref) == loc))
+ {
+ df_ref next_ref = DF_REF_NEXT_REG (the_ref);
+ df_ref prev_ref = DF_REF_PREV_REG (the_ref);
+ df_ref *ref_vec, *ref_vec_t;
+ struct df_insn_info *insn_info = DF_REF_INSN_INFO (the_ref);
+ unsigned int count = 0;
+
+ DF_REF_REGNO (the_ref) = new_regno;
+ DF_REF_REG (the_ref) = regno_reg_rtx[new_regno];
+
+ /* Pull the_ref out of the old regno chain. */
+ if (prev_ref)
+ DF_REF_NEXT_REG (prev_ref) = next_ref;
+ else
+ old_df->reg_chain = next_ref;
+ if (next_ref)
+ DF_REF_PREV_REG (next_ref) = prev_ref;
+ old_df->n_refs--;
+
+ /* Put the ref into the new regno chain. */
+ DF_REF_PREV_REG (the_ref) = NULL;
+ DF_REF_NEXT_REG (the_ref) = new_df->reg_chain;
+ if (new_df->reg_chain)
+ DF_REF_PREV_REG (new_df->reg_chain) = the_ref;
+ new_df->reg_chain = the_ref;
+ new_df->n_refs++;
+ if (DF_REF_BB (the_ref))
+ df_set_bb_dirty (DF_REF_BB (the_ref));
+
+ /* Need to sort the record again that the ref was in because
+ the regno is a sorting key. First, find the right
+ record. */
+ if (DF_REF_FLAGS (the_ref) & DF_REF_IN_NOTE)
+ ref_vec = insn_info->eq_uses;
+ else
+ ref_vec = insn_info->uses;
+ if (dump_file)
+ fprintf (dump_file, "changing reg in insn %d\n",
+ DF_REF_INSN_UID (the_ref));
+
+ ref_vec_t = ref_vec;
+
+ /* Find the length. */
+ while (*ref_vec_t)
+ {
+ count++;
+ ref_vec_t++;
+ }
+ qsort (ref_vec, count, sizeof (df_ref ), df_ref_compare);
+
+ the_ref = next_ref;
+ }
+ else
+ the_ref = DF_REF_NEXT_REG (the_ref);
+ }
+}
+
+
+/* Change the regno of all refs that contained LOC from OLD_REGNO to
+ NEW_REGNO. Refs that do not match LOC are not changed which means
+ that artificial refs are not changed since they have no loc. This
+ call is to support the SET_REGNO macro. */
+
+void
+df_ref_change_reg_with_loc (int old_regno, int new_regno, rtx loc)
+{
+ if ((!df) || (old_regno == -1) || (old_regno == new_regno))
+ return;
+
+ df_grow_reg_info ();
+
+ df_ref_change_reg_with_loc_1 (DF_REG_DEF_GET (old_regno),
+ DF_REG_DEF_GET (new_regno), new_regno, loc);
+ df_ref_change_reg_with_loc_1 (DF_REG_USE_GET (old_regno),
+ DF_REG_USE_GET (new_regno), new_regno, loc);
+ df_ref_change_reg_with_loc_1 (DF_REG_EQ_USE_GET (old_regno),
+ DF_REG_EQ_USE_GET (new_regno), new_regno, loc);
+}
+
+
+/* Delete the mw_hardregs that point into the eq_notes. */
+
+static unsigned int
+df_mw_hardreg_chain_delete_eq_uses (struct df_insn_info *insn_info)
+{
+ struct df_mw_hardreg **mw_vec = insn_info->mw_hardregs;
+ unsigned int deleted = 0;
+ unsigned int count = 0;
+ struct df_scan_problem_data *problem_data
+ = (struct df_scan_problem_data *) df_scan->problem_data;
+
+ if (!*mw_vec)
+ return 0;
+
+ while (*mw_vec)
+ {
+ if ((*mw_vec)->flags & DF_REF_IN_NOTE)
+ {
+ struct df_mw_hardreg **temp_vec = mw_vec;
+
+ pool_free (problem_data->mw_reg_pool, *mw_vec);
+ temp_vec = mw_vec;
+ /* Shove the remaining ones down one to fill the gap. While
+ this looks n**2, it is highly unusual to have any mw regs
+ in eq_notes and the chances of more than one are almost
+ non existent. */
+ while (*temp_vec)
+ {
+ *temp_vec = *(temp_vec + 1);
+ temp_vec++;
+ }
+ deleted++;
+ }
+ else
+ {
+ mw_vec++;
+ count++;
+ }
+ }
+
+ if (count == 0)
+ {
+ df_scan_free_mws_vec (insn_info->mw_hardregs);
+ insn_info->mw_hardregs = df_null_mw_rec;
+ return 0;
+ }
+ return deleted;
+}
+
+
+/* Rescan only the REG_EQUIV/REG_EQUAL notes part of INSN. */
+
+void
+df_notes_rescan (rtx insn)
+{
+ struct df_insn_info *insn_info;
+ unsigned int uid = INSN_UID (insn);
+
+ if (!df)
+ return;
+
+ /* The client has disabled rescanning and plans to do it itself. */
+ if (df->changeable_flags & DF_NO_INSN_RESCAN)
+ return;
+
+ /* Do nothing if the insn hasn't been emitted yet. */
+ if (!BLOCK_FOR_INSN (insn))
+ return;
+
+ df_grow_bb_info (df_scan);
+ df_grow_reg_info ();
+
+ insn_info = DF_INSN_UID_SAFE_GET (INSN_UID(insn));
+
+ /* The client has deferred rescanning. */
+ if (df->changeable_flags & DF_DEFER_INSN_RESCAN)
+ {
+ if (!insn_info)
+ {
+ insn_info = df_insn_create_insn_record (insn);
+ insn_info->defs = df_null_ref_rec;
+ insn_info->uses = df_null_ref_rec;
+ insn_info->eq_uses = df_null_ref_rec;
+ insn_info->mw_hardregs = df_null_mw_rec;
+ }
+
+ bitmap_clear_bit (df->insns_to_delete, uid);
+ /* If the insn is set to be rescanned, it does not need to also
+ be notes rescanned. */
+ if (!bitmap_bit_p (df->insns_to_rescan, uid))
+ bitmap_set_bit (df->insns_to_notes_rescan, INSN_UID (insn));
+ return;
+ }
+
+ bitmap_clear_bit (df->insns_to_delete, uid);
+ bitmap_clear_bit (df->insns_to_notes_rescan, uid);
+
+ if (insn_info)
+ {
+ basic_block bb = BLOCK_FOR_INSN (insn);
+ rtx note;
+ struct df_collection_rec collection_rec;
+ unsigned int num_deleted;
+ unsigned int mw_len;
+
+ memset (&collection_rec, 0, sizeof (struct df_collection_rec));
+ collection_rec.eq_use_vec = VEC_alloc (df_ref, stack, 32);
+ collection_rec.mw_vec = VEC_alloc (df_mw_hardreg_ptr, stack, 32);
+
+ num_deleted = df_mw_hardreg_chain_delete_eq_uses (insn_info);
+ df_ref_chain_delete (insn_info->eq_uses);
+ insn_info->eq_uses = NULL;
+
+ /* Process REG_EQUIV/REG_EQUAL notes */
+ for (note = REG_NOTES (insn); note;
+ note = XEXP (note, 1))
+ {
+ switch (REG_NOTE_KIND (note))
+ {
+ case REG_EQUIV:
+ case REG_EQUAL:
+ df_uses_record (DF_REF_REGULAR, &collection_rec,
+ &XEXP (note, 0), DF_REF_REG_USE,
+ bb, insn_info, DF_REF_IN_NOTE, -1, -1, VOIDmode);
+ default:
+ break;
+ }
+ }
+
+ /* Find some place to put any new mw_hardregs. */
+ df_canonize_collection_rec (&collection_rec);
+ mw_len = VEC_length (df_mw_hardreg_ptr, collection_rec.mw_vec);
+ if (mw_len)
+ {
+ unsigned int count = 0;
+ struct df_mw_hardreg **mw_rec = insn_info->mw_hardregs;
+ while (*mw_rec)
+ {
+ count++;
+ mw_rec++;
+ }
+
+ if (count)
+ {
+ /* Append to the end of the existing record after
+ expanding it if necessary. */
+ if (mw_len > num_deleted)
+ {
+ insn_info->mw_hardregs =
+ XRESIZEVEC (struct df_mw_hardreg *,
+ insn_info->mw_hardregs,
+ count + 1 + mw_len);
+ }
+ memcpy (&insn_info->mw_hardregs[count],
+ VEC_address (df_mw_hardreg_ptr, collection_rec.mw_vec),
+ mw_len * sizeof (struct df_mw_hardreg *));
+ insn_info->mw_hardregs[count + mw_len] = NULL;
+ qsort (insn_info->mw_hardregs, count + mw_len,
+ sizeof (struct df_mw_hardreg *), df_mw_compare);
+ }
+ else
+ {
+ /* No vector there. */
+ insn_info->mw_hardregs
+ = XNEWVEC (struct df_mw_hardreg*, 1 + mw_len);
+ memcpy (insn_info->mw_hardregs,
+ VEC_address (df_mw_hardreg_ptr, collection_rec.mw_vec),
+ mw_len * sizeof (struct df_mw_hardreg *));
+ insn_info->mw_hardregs[mw_len] = NULL;
+ }
+ }
+ /* Get rid of the mw_rec so that df_refs_add_to_chains will
+ ignore it. */
+ VEC_free (df_mw_hardreg_ptr, stack, collection_rec.mw_vec);
+ df_refs_add_to_chains (&collection_rec, bb, insn);
+ VEC_free (df_ref, stack, collection_rec.eq_use_vec);
+ }
+ else
+ df_insn_rescan (insn);
+
+}
+
+\f
+/*----------------------------------------------------------------------------
+ Hard core instruction scanning code. No external interfaces here,
+ just a lot of routines that look inside insns.
+----------------------------------------------------------------------------*/
+
+
+/* Return true if the contents of two df_ref's are identical.
+ It ignores DF_REF_MARKER. */
+
+static bool
+df_ref_equal_p (df_ref ref1, df_ref ref2)
+{
+ if (!ref2)
+ return false;
+
+ if (ref1 == ref2)
+ return true;
+
+ if (DF_REF_CLASS (ref1) != DF_REF_CLASS (ref2)
+ || DF_REF_REGNO (ref1) != DF_REF_REGNO (ref2)
+ || DF_REF_REG (ref1) != DF_REF_REG (ref2)
+ || DF_REF_TYPE (ref1) != DF_REF_TYPE (ref2)
+ || ((DF_REF_FLAGS (ref1) & ~(DF_REF_REG_MARKER + DF_REF_MW_HARDREG))
+ != (DF_REF_FLAGS (ref2) & ~(DF_REF_REG_MARKER + DF_REF_MW_HARDREG)))
+ || DF_REF_BB (ref1) != DF_REF_BB (ref2)
+ || DF_REF_INSN_INFO (ref1) != DF_REF_INSN_INFO (ref2))
+ return false;
+
+ switch (DF_REF_CLASS (ref1))
+ {
+ case DF_REF_ARTIFICIAL:
+ case DF_REF_BASE:
+ return true;
+
+ case DF_REF_EXTRACT:
+ if ((DF_REF_EXTRACT_OFFSET (ref1) != DF_REF_EXTRACT_OFFSET (ref2))
+ || (DF_REF_EXTRACT_WIDTH (ref1) != DF_REF_EXTRACT_WIDTH (ref2))
+ || (DF_REF_EXTRACT_MODE (ref1) != DF_REF_EXTRACT_MODE (ref2)))
+ return false;
+ /* fallthru. */
+
+ case DF_REF_REGULAR:
+ return DF_REF_LOC (ref1) == DF_REF_LOC (ref2);
+
+ default:
+ gcc_unreachable ();
+ }
+ return false;
+}
+
+
+/* Compare REF1 and REF2 for sorting. This is only called from places
+ where all of the refs are of the same type, in the same insn, and
+ have the same bb. So these fields are not checked. */
+
+static int
+df_ref_compare (const void *r1, const void *r2)
+{
+ const df_ref ref1 = *(const df_ref *)r1;
+ const df_ref ref2 = *(const df_ref *)r2;
+
+ if (ref1 == ref2)
+ return 0;
+
+ if (DF_REF_CLASS (ref1) != DF_REF_CLASS (ref2))
+ return (int)DF_REF_CLASS (ref1) - (int)DF_REF_CLASS (ref2);
+
+ if (DF_REF_REGNO (ref1) != DF_REF_REGNO (ref2))
+ return (int)DF_REF_REGNO (ref1) - (int)DF_REF_REGNO (ref2);
+
+ if (DF_REF_TYPE (ref1) != DF_REF_TYPE (ref2))
+ return (int)DF_REF_TYPE (ref1) - (int)DF_REF_TYPE (ref2);
+
+ if (DF_REF_REG (ref1) != DF_REF_REG (ref2))
+ return (int)DF_REF_ORDER (ref1) - (int)DF_REF_ORDER (ref2);
+
+ /* Cannot look at the LOC field on artificial refs. */
+ if (DF_REF_CLASS (ref1) != DF_REF_ARTIFICIAL
+ && DF_REF_LOC (ref1) != DF_REF_LOC (ref2))
+ return (int)DF_REF_ORDER (ref1) - (int)DF_REF_ORDER (ref2);
+
+ if (DF_REF_FLAGS (ref1) != DF_REF_FLAGS (ref2))
+ {
+ /* If two refs are identical except that one of them has is from
+ a mw and one is not, we need to have the one with the mw
+ first. */
+ if (DF_REF_FLAGS_IS_SET (ref1, DF_REF_MW_HARDREG) ==
+ DF_REF_FLAGS_IS_SET (ref2, DF_REF_MW_HARDREG))
+ return DF_REF_FLAGS (ref1) - DF_REF_FLAGS (ref2);
+ else if (DF_REF_FLAGS_IS_SET (ref1, DF_REF_MW_HARDREG))
+ return -1;
+ else
+ return 1;
+ }
+
+ /* The classes are the same at this point so it is safe to only look
+ at ref1. */
+ if (DF_REF_CLASS (ref1) == DF_REF_EXTRACT)
+ {
+ if (DF_REF_EXTRACT_OFFSET (ref1) != DF_REF_EXTRACT_OFFSET (ref2))
+ return DF_REF_EXTRACT_OFFSET (ref1) - DF_REF_EXTRACT_OFFSET (ref2);
+ if (DF_REF_EXTRACT_WIDTH (ref1) != DF_REF_EXTRACT_WIDTH (ref2))
+ return DF_REF_EXTRACT_WIDTH (ref1) - DF_REF_EXTRACT_WIDTH (ref2);
+ if (DF_REF_EXTRACT_MODE (ref1) != DF_REF_EXTRACT_MODE (ref2))
+ return DF_REF_EXTRACT_MODE (ref1) - DF_REF_EXTRACT_MODE (ref2);
+ }
+ return 0;
+}
+
+static void
+df_swap_refs (VEC(df_ref,stack) **ref_vec, int i, int j)
+{
+ df_ref tmp = VEC_index (df_ref, *ref_vec, i);
+ VEC_replace (df_ref, *ref_vec, i, VEC_index (df_ref, *ref_vec, j));
+ VEC_replace (df_ref, *ref_vec, j, tmp);
+}
+
+/* Sort and compress a set of refs. */
+
+static void
+df_sort_and_compress_refs (VEC(df_ref,stack) **ref_vec)
+{
+ unsigned int count;
+ unsigned int i;
+ unsigned int dist = 0;
+
+ count = VEC_length (df_ref, *ref_vec);
+
+ /* If there are 1 or 0 elements, there is nothing to do. */
+ if (count < 2)
+ return;
+ else if (count == 2)
+ {
+ df_ref r0 = VEC_index (df_ref, *ref_vec, 0);
+ df_ref r1 = VEC_index (df_ref, *ref_vec, 1);
+ if (df_ref_compare (&r0, &r1) > 0)
+ df_swap_refs (ref_vec, 0, 1);
+ }
+ else
+ {
+ for (i = 0; i < count - 1; i++)
+ {
+ df_ref r0 = VEC_index (df_ref, *ref_vec, i);
+ df_ref r1 = VEC_index (df_ref, *ref_vec, i + 1);
+ if (df_ref_compare (&r0, &r1) >= 0)
+ break;
+ }
+ /* If the array is already strictly ordered,
+ which is the most common case for large COUNT case
+ (which happens for CALL INSNs),
+ no need to sort and filter out duplicate.
+ Simply return the count.
+ Make sure DF_GET_ADD_REFS adds refs in the increasing order
+ of DF_REF_COMPARE. */
+ if (i == count - 1)
+ return;
+ qsort (VEC_address (df_ref, *ref_vec), count, sizeof (df_ref),
+ df_ref_compare);
+ }
+
+ for (i=0; i<count-dist; i++)
+ {
+ /* Find the next ref that is not equal to the current ref. */
+ while (i + dist + 1 < count
+ && df_ref_equal_p (VEC_index (df_ref, *ref_vec, i),
+ VEC_index (df_ref, *ref_vec, i + dist + 1)))
+ {
+ df_free_ref (VEC_index (df_ref, *ref_vec, i + dist + 1));
+ dist++;
+ }
+ /* Copy it down to the next position. */
+ if (dist && i + dist + 1 < count)
+ VEC_replace (df_ref, *ref_vec, i + 1,
+ VEC_index (df_ref, *ref_vec, i + dist + 1));
+ }
+
+ count -= dist;
+ VEC_truncate (df_ref, *ref_vec, count);
+}
+
+
+/* Return true if the contents of two df_ref's are identical.
+ It ignores DF_REF_MARKER. */
+
+static bool
+df_mw_equal_p (struct df_mw_hardreg *mw1, struct df_mw_hardreg *mw2)
+{
+ if (!mw2)
+ return false;
+ return (mw1 == mw2) ||
+ (mw1->mw_reg == mw2->mw_reg
+ && mw1->type == mw2->type
+ && mw1->flags == mw2->flags
+ && mw1->start_regno == mw2->start_regno
+ && mw1->end_regno == mw2->end_regno);
+}
+
+
+/* Compare MW1 and MW2 for sorting. */
+
+static int
+df_mw_compare (const void *m1, const void *m2)
+{
+ const struct df_mw_hardreg *const mw1 = *(const struct df_mw_hardreg *const*)m1;
+ const struct df_mw_hardreg *const mw2 = *(const struct df_mw_hardreg *const*)m2;
+
+ if (mw1 == mw2)
+ return 0;
+
+ if (mw1->type != mw2->type)
+ return mw1->type - mw2->type;
+
+ if (mw1->flags != mw2->flags)
+ return mw1->flags - mw2->flags;
+
+ if (mw1->start_regno != mw2->start_regno)
+ return mw1->start_regno - mw2->start_regno;
+
+ if (mw1->end_regno != mw2->end_regno)
+ return mw1->end_regno - mw2->end_regno;
+
+ if (mw1->mw_reg != mw2->mw_reg)
+ return mw1->mw_order - mw2->mw_order;
+
+ return 0;
+}
+
+
+/* Sort and compress a set of refs. */
+
+static void
+df_sort_and_compress_mws (VEC(df_mw_hardreg_ptr,stack) **mw_vec)
+{
+ unsigned int count;
+ struct df_scan_problem_data *problem_data
+ = (struct df_scan_problem_data *) df_scan->problem_data;
+ unsigned int i;
+ unsigned int dist = 0;
+
+ count = VEC_length (df_mw_hardreg_ptr, *mw_vec);
+ if (count < 2)
+ return;
+ else if (count == 2)
+ {
+ struct df_mw_hardreg *m0 = VEC_index (df_mw_hardreg_ptr, *mw_vec, 0);
+ struct df_mw_hardreg *m1 = VEC_index (df_mw_hardreg_ptr, *mw_vec, 1);
+ if (df_mw_compare (&m0, &m1) > 0)
+ {
+ struct df_mw_hardreg *tmp = VEC_index (df_mw_hardreg_ptr,
+ *mw_vec, 0);
+ VEC_replace (df_mw_hardreg_ptr, *mw_vec, 0,
+ VEC_index (df_mw_hardreg_ptr, *mw_vec, 1));
+ VEC_replace (df_mw_hardreg_ptr, *mw_vec, 1, tmp);
+ }
+ }
+ else
+ qsort (VEC_address (df_mw_hardreg_ptr, *mw_vec), count,
+ sizeof (struct df_mw_hardreg *), df_mw_compare);
+
+ for (i=0; i<count-dist; i++)
+ {
+ /* Find the next ref that is not equal to the current ref. */
+ while (i + dist + 1 < count
+ && df_mw_equal_p (VEC_index (df_mw_hardreg_ptr, *mw_vec, i),
+ VEC_index (df_mw_hardreg_ptr, *mw_vec,
+ i + dist + 1)))
+ {
+ pool_free (problem_data->mw_reg_pool,
+ VEC_index (df_mw_hardreg_ptr, *mw_vec, i + dist + 1));
+ dist++;
+ }
+ /* Copy it down to the next position. */
+ if (dist && i + dist + 1 < count)
+ VEC_replace (df_mw_hardreg_ptr, *mw_vec, i + 1,
+ VEC_index (df_mw_hardreg_ptr, *mw_vec, i + dist + 1));
+ }
+
+ count -= dist;
+ VEC_truncate (df_mw_hardreg_ptr, *mw_vec, count);
+}
+
+
+/* Sort and remove duplicates from the COLLECTION_REC. */
+
+static void
+df_canonize_collection_rec (struct df_collection_rec *collection_rec)
+{
+ df_sort_and_compress_refs (&collection_rec->def_vec);
+ df_sort_and_compress_refs (&collection_rec->use_vec);
+ df_sort_and_compress_refs (&collection_rec->eq_use_vec);
+ df_sort_and_compress_mws (&collection_rec->mw_vec);
+}
+
+
+/* Add the new df_ref to appropriate reg_info/ref_info chains. */
+
+static void
+df_install_ref (df_ref this_ref,
+ struct df_reg_info *reg_info,
+ struct df_ref_info *ref_info,
+ bool add_to_table)
+{
+ unsigned int regno = DF_REF_REGNO (this_ref);
+ /* Add the ref to the reg_{def,use,eq_use} chain. */
+ df_ref head = reg_info->reg_chain;
+
+ reg_info->reg_chain = this_ref;
+ reg_info->n_refs++;
+
+ if (DF_REF_FLAGS_IS_SET (this_ref, DF_HARD_REG_LIVE))
+ {
+ gcc_assert (regno < FIRST_PSEUDO_REGISTER);
+ df->hard_regs_live_count[regno]++;
+ }
+
+ gcc_assert (DF_REF_NEXT_REG (this_ref) == NULL
+ && DF_REF_PREV_REG (this_ref) == NULL);
+
+ DF_REF_NEXT_REG (this_ref) = head;
+
+ /* We cannot actually link to the head of the chain. */
+ DF_REF_PREV_REG (this_ref) = NULL;
+
+ if (head)
+ DF_REF_PREV_REG (head) = this_ref;
+
+ if (add_to_table)
+ {
+ gcc_assert (ref_info->ref_order != DF_REF_ORDER_NO_TABLE);
+ df_check_and_grow_ref_info (ref_info, 1);
+ DF_REF_ID (this_ref) = ref_info->table_size;
+ /* Add the ref to the big array of defs. */
+ ref_info->refs[ref_info->table_size] = this_ref;
+ ref_info->table_size++;
+ }
+ else
+ DF_REF_ID (this_ref) = -1;
+
+ ref_info->total_size++;
+}
+
+
+/* This function takes one of the groups of refs (defs, uses or
+ eq_uses) and installs the entire group into the insn. It also adds
+ each of these refs into the appropriate chains. */
+
+static df_ref *
+df_install_refs (basic_block bb,
+ VEC(df_ref,stack)* old_vec,
+ struct df_reg_info **reg_info,
+ struct df_ref_info *ref_info,
+ bool is_notes)
+{
+ unsigned int count;
+
+ count = VEC_length (df_ref, old_vec);
+ if (count)
+ {
+ df_ref *new_vec = XNEWVEC (df_ref, count + 1);
+ bool add_to_table;
+ df_ref this_ref;
+ unsigned int ix;
+
+ switch (ref_info->ref_order)
+ {
+ case DF_REF_ORDER_UNORDERED_WITH_NOTES:
+ case DF_REF_ORDER_BY_REG_WITH_NOTES:
+ case DF_REF_ORDER_BY_INSN_WITH_NOTES:
+ ref_info->ref_order = DF_REF_ORDER_UNORDERED_WITH_NOTES;
+ add_to_table = true;
+ break;
+ case DF_REF_ORDER_UNORDERED:
+ case DF_REF_ORDER_BY_REG:
+ case DF_REF_ORDER_BY_INSN:
+ ref_info->ref_order = DF_REF_ORDER_UNORDERED;
+ add_to_table = !is_notes;
+ break;
+ default:
+ add_to_table = false;
+ break;
+ }
+
+ /* Do not add if ref is not in the right blocks. */
+ if (add_to_table && df->analyze_subset)
+ add_to_table = bitmap_bit_p (df->blocks_to_analyze, bb->index);
+
+ for (ix = 0; VEC_iterate (df_ref, old_vec, ix, this_ref); ++ix)
+ {
+ new_vec[ix] = this_ref;
+ df_install_ref (this_ref, reg_info[DF_REF_REGNO (this_ref)],
+ ref_info, add_to_table);
+ }
+
+ new_vec[count] = NULL;
+ return new_vec;
+ }
+ else
+ return df_null_ref_rec;
+}
+
+
+/* This function takes the mws installs the entire group into the
+ insn. */
+
+static struct df_mw_hardreg **
+df_install_mws (VEC(df_mw_hardreg_ptr,stack) *old_vec)
+{
+ unsigned int count;
+
+ count = VEC_length (df_mw_hardreg_ptr, old_vec);
+ if (count)
+ {
+ struct df_mw_hardreg **new_vec
+ = XNEWVEC (struct df_mw_hardreg*, count + 1);
+ memcpy (new_vec, VEC_address (df_mw_hardreg_ptr, old_vec),
+ sizeof (struct df_mw_hardreg*) * count);
+ new_vec[count] = NULL;
+ return new_vec;
+ }
+ else
+ return df_null_mw_rec;
+}
+
+
+/* Add a chain of df_refs to appropriate ref chain/reg_info/ref_info
+ chains and update other necessary information. */
+
+static void
+df_refs_add_to_chains (struct df_collection_rec *collection_rec,
+ basic_block bb, rtx insn)
+{
+ if (insn)
+ {
+ struct df_insn_info *insn_rec = DF_INSN_INFO_GET (insn);
+ /* If there is a vector in the collection rec, add it to the
+ insn. A null rec is a signal that the caller will handle the
+ chain specially. */
+ if (collection_rec->def_vec)
+ {
+ df_scan_free_ref_vec (insn_rec->defs);
+ insn_rec->defs
+ = df_install_refs (bb, collection_rec->def_vec,
+ df->def_regs,
+ &df->def_info, false);
+ }
+ if (collection_rec->use_vec)
+ {
+ df_scan_free_ref_vec (insn_rec->uses);
+ insn_rec->uses
+ = df_install_refs (bb, collection_rec->use_vec,
+ df->use_regs,
+ &df->use_info, false);
+ }
+ if (collection_rec->eq_use_vec)
+ {
+ df_scan_free_ref_vec (insn_rec->eq_uses);
+ insn_rec->eq_uses
+ = df_install_refs (bb, collection_rec->eq_use_vec,
+ df->eq_use_regs,
+ &df->use_info, true);
+ }
+ if (collection_rec->mw_vec)
+ {
+ df_scan_free_mws_vec (insn_rec->mw_hardregs);
+ insn_rec->mw_hardregs
+ = df_install_mws (collection_rec->mw_vec);
+ }
+ }
+ else
+ {
+ struct df_scan_bb_info *bb_info = df_scan_get_bb_info (bb->index);
+
+ df_scan_free_ref_vec (bb_info->artificial_defs);
+ bb_info->artificial_defs
+ = df_install_refs (bb, collection_rec->def_vec,
+ df->def_regs,
+ &df->def_info, false);
+ df_scan_free_ref_vec (bb_info->artificial_uses);
+ bb_info->artificial_uses
+ = df_install_refs (bb, collection_rec->use_vec,
+ df->use_regs,
+ &df->use_info, false);
+ }
+}
+
+
+/* Allocate a ref and initialize its fields.
+
+ If the REF_FLAGS field contain DF_REF_SIGN_EXTRACT or
+ DF_REF_ZERO_EXTRACT. WIDTH, OFFSET and MODE are used to access the fields
+ if they were constants. Otherwise they should be -1 if those flags
+ were set. */
+
+static df_ref
+df_ref_create_structure (enum df_ref_class cl,
+ struct df_collection_rec *collection_rec,
+ rtx reg, rtx *loc,
+ basic_block bb, struct df_insn_info *info,
+ enum df_ref_type ref_type,
+ int ref_flags,
+ int width, int offset, enum machine_mode mode)
+{
+ df_ref this_ref = NULL;
+ int regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg);
+ struct df_scan_problem_data *problem_data
+ = (struct df_scan_problem_data *) df_scan->problem_data;
+
+ switch (cl)
+ {
+ case DF_REF_BASE:
+ this_ref = (df_ref) pool_alloc (problem_data->ref_base_pool);
+ gcc_assert (loc == NULL);
+ break;
+
+ case DF_REF_ARTIFICIAL:
+ this_ref = (df_ref) pool_alloc (problem_data->ref_artificial_pool);
+ this_ref->artificial_ref.bb = bb;
+ gcc_assert (loc == NULL);
+ break;
+
+ case DF_REF_REGULAR:
+ this_ref = (df_ref) pool_alloc (problem_data->ref_regular_pool);
+ this_ref->regular_ref.loc = loc;
+ gcc_assert (loc);
+ break;
+
+ case DF_REF_EXTRACT:
+ this_ref = (df_ref) pool_alloc (problem_data->ref_extract_pool);
+ DF_REF_EXTRACT_WIDTH (this_ref) = width;
+ DF_REF_EXTRACT_OFFSET (this_ref) = offset;
+ DF_REF_EXTRACT_MODE (this_ref) = mode;
+ this_ref->regular_ref.loc = loc;
+ gcc_assert (loc);
+ break;
+ }
+
+ DF_REF_CLASS (this_ref) = cl;
+ DF_REF_ID (this_ref) = -1;
+ DF_REF_REG (this_ref) = reg;
+ DF_REF_REGNO (this_ref) = regno;
+ DF_REF_TYPE (this_ref) = ref_type;
+ DF_REF_INSN_INFO (this_ref) = info;
+ DF_REF_CHAIN (this_ref) = NULL;
+ DF_REF_FLAGS (this_ref) = ref_flags;
+ DF_REF_NEXT_REG (this_ref) = NULL;
+ DF_REF_PREV_REG (this_ref) = NULL;
+ DF_REF_ORDER (this_ref) = df->ref_order++;
+
+ /* We need to clear this bit because fwprop, and in the future
+ possibly other optimizations sometimes create new refs using ond
+ refs as the model. */
+ DF_REF_FLAGS_CLEAR (this_ref, DF_HARD_REG_LIVE);
+
+ /* See if this ref needs to have DF_HARD_REG_LIVE bit set. */
+ if ((regno < FIRST_PSEUDO_REGISTER)
+ && (!DF_REF_IS_ARTIFICIAL (this_ref)))
+ {
+ if (DF_REF_REG_DEF_P (this_ref))
+ {
+ if (!DF_REF_FLAGS_IS_SET (this_ref, DF_REF_MAY_CLOBBER))
+ DF_REF_FLAGS_SET (this_ref, DF_HARD_REG_LIVE);
+ }
+ else if (!(TEST_HARD_REG_BIT (elim_reg_set, regno)
+ && (regno == FRAME_POINTER_REGNUM
+ || regno == ARG_POINTER_REGNUM)))
+ DF_REF_FLAGS_SET (this_ref, DF_HARD_REG_LIVE);
+ }
+
+ if (collection_rec)
+ {
+ if (DF_REF_REG_DEF_P (this_ref))
+ VEC_safe_push (df_ref, stack, collection_rec->def_vec, this_ref);
+ else if (DF_REF_FLAGS (this_ref) & DF_REF_IN_NOTE)
+ VEC_safe_push (df_ref, stack, collection_rec->eq_use_vec, this_ref);
+ else
+ VEC_safe_push (df_ref, stack, collection_rec->use_vec, this_ref);
+ }
+
+ return this_ref;
+}
+
+
+/* Create new references of type DF_REF_TYPE for each part of register REG
+ at address LOC within INSN of BB.
+
+ If the REF_FLAGS field contain DF_REF_SIGN_EXTRACT or
+ DF_REF_ZERO_EXTRACT. WIDTH, OFFSET and MODE are used to access the
+ fields if they were constants. Otherwise they should be -1 if
+ those flags were set. */
+
+
+static void
+df_ref_record (enum df_ref_class cl,
+ struct df_collection_rec *collection_rec,
+ rtx reg, rtx *loc,
+ basic_block bb, struct df_insn_info *insn_info,
+ enum df_ref_type ref_type,
+ int ref_flags,
+ int width, int offset, enum machine_mode mode)
+{
+ unsigned int regno;
+
+ gcc_assert (REG_P (reg) || GET_CODE (reg) == SUBREG);
+
+ regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg);
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ struct df_mw_hardreg *hardreg = NULL;
+ struct df_scan_problem_data *problem_data
+ = (struct df_scan_problem_data *) df_scan->problem_data;
+ unsigned int i;
+ unsigned int endregno;
+ df_ref ref;
+
+ if (GET_CODE (reg) == SUBREG)
+ {
+ regno += subreg_regno_offset (regno, GET_MODE (SUBREG_REG (reg)),
+ SUBREG_BYTE (reg), GET_MODE (reg));
+ endregno = regno + subreg_nregs (reg);
+ }
+ else
+ endregno = END_HARD_REGNO (reg);
+
+ /* If this is a multiword hardreg, we create some extra
+ datastructures that will enable us to easily build REG_DEAD
+ and REG_UNUSED notes. */
+ if ((endregno != regno + 1) && insn_info)
+ {
+ /* Sets to a subreg of a multiword register are partial.
+ Sets to a non-subreg of a multiword register are not. */
+ if (GET_CODE (reg) == SUBREG)
+ ref_flags |= DF_REF_PARTIAL;
+ ref_flags |= DF_REF_MW_HARDREG;
+
+ hardreg = (struct df_mw_hardreg *) pool_alloc (problem_data->mw_reg_pool);
+ hardreg->type = ref_type;
+ hardreg->flags = ref_flags;
+ hardreg->mw_reg = reg;
+ hardreg->start_regno = regno;
+ hardreg->end_regno = endregno - 1;
+ hardreg->mw_order = df->ref_order++;
+ VEC_safe_push (df_mw_hardreg_ptr, stack, collection_rec->mw_vec,
+ hardreg);
+ }
+
+ for (i = regno; i < endregno; i++)
+ {
+ ref = df_ref_create_structure (cl, collection_rec, regno_reg_rtx[i], loc,
+ bb, insn_info, ref_type, ref_flags,
+ width, offset, mode);
+
+ gcc_assert (ORIGINAL_REGNO (DF_REF_REG (ref)) == i);
+ }
+ }
+ else
+ {
+ df_ref_create_structure (cl, collection_rec, reg, loc, bb, insn_info,
+ ref_type, ref_flags, width, offset, mode);
+ }
+}
+
+
+/* A set to a non-paradoxical SUBREG for which the number of word_mode units
+ covered by the outer mode is smaller than that covered by the inner mode,
+ is a read-modify-write operation.
+ This function returns true iff the SUBREG X is such a SUBREG. */
+
+bool
+df_read_modify_subreg_p (rtx x)
+{
+ unsigned int isize, osize;
+ if (GET_CODE (x) != SUBREG)
+ return false;
+ isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
+ osize = GET_MODE_SIZE (GET_MODE (x));
+ return isize > osize
+ && isize > REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x)));
+}
+
+
+/* Process all the registers defined in the rtx, X.
+ Autoincrement/decrement definitions will be picked up by
+ df_uses_record. */
+
+static void
+df_def_record_1 (struct df_collection_rec *collection_rec,
+ rtx x, basic_block bb, struct df_insn_info *insn_info,
+ int flags)
+{
+ rtx *loc;
+ rtx dst;
+ int offset = -1;
+ int width = -1;
+ enum machine_mode mode = VOIDmode;
+ enum df_ref_class cl = DF_REF_REGULAR;
+
+ /* We may recursively call ourselves on EXPR_LIST when dealing with PARALLEL
+ construct. */
+ if (GET_CODE (x) == EXPR_LIST || GET_CODE (x) == CLOBBER)
+ loc = &XEXP (x, 0);
+ else
+ loc = &SET_DEST (x);
+ dst = *loc;
+
+ /* It is legal to have a set destination be a parallel. */
+ if (GET_CODE (dst) == PARALLEL)
+ {
+ int i;
+
+ for (i = XVECLEN (dst, 0) - 1; i >= 0; i--)
+ {
+ rtx temp = XVECEXP (dst, 0, i);
+ if (GET_CODE (temp) == EXPR_LIST || GET_CODE (temp) == CLOBBER
+ || GET_CODE (temp) == SET)
+ df_def_record_1 (collection_rec,
+ temp, bb, insn_info,
+ GET_CODE (temp) == CLOBBER
+ ? flags | DF_REF_MUST_CLOBBER : flags);
+ }
+ return;
+ }
+
+ if (GET_CODE (dst) == STRICT_LOW_PART)
+ {
+ flags |= DF_REF_READ_WRITE | DF_REF_PARTIAL | DF_REF_STRICT_LOW_PART;
+
+ loc = &XEXP (dst, 0);
+ dst = *loc;
+ }
+
+ if (GET_CODE (dst) == ZERO_EXTRACT)
+ {
+ flags |= DF_REF_READ_WRITE | DF_REF_PARTIAL | DF_REF_ZERO_EXTRACT;
+
+ if (CONST_INT_P (XEXP (dst, 1))
+ && CONST_INT_P (XEXP (dst, 2)))
+ {
+ width = INTVAL (XEXP (dst, 1));
+ offset = INTVAL (XEXP (dst, 2));
+ mode = GET_MODE (dst);
+ cl = DF_REF_EXTRACT;
+ }
+
+ loc = &XEXP (dst, 0);
+ dst = *loc;
+ }
+
+ /* At this point if we do not have a reg or a subreg, just return. */
+ if (REG_P (dst))
+ {
+ df_ref_record (cl, collection_rec,
+ dst, loc, bb, insn_info, DF_REF_REG_DEF, flags,
+ width, offset, mode);
+
+ /* We want to keep sp alive everywhere - by making all
+ writes to sp also use of sp. */
+ if (REGNO (dst) == STACK_POINTER_REGNUM)
+ df_ref_record (DF_REF_BASE, collection_rec,
+ dst, NULL, bb, insn_info, DF_REF_REG_USE, flags,
+ width, offset, mode);
+ }
+ else if (GET_CODE (dst) == SUBREG && REG_P (SUBREG_REG (dst)))
+ {
+ if (df_read_modify_subreg_p (dst))
+ flags |= DF_REF_READ_WRITE | DF_REF_PARTIAL;
+
+ flags |= DF_REF_SUBREG;
+
+ df_ref_record (cl, collection_rec,
+ dst, loc, bb, insn_info, DF_REF_REG_DEF, flags,
+ width, offset, mode);
+ }
+}
+
+
+/* Process all the registers defined in the pattern rtx, X. */
+
+static void
+df_defs_record (struct df_collection_rec *collection_rec,
+ rtx x, basic_block bb, struct df_insn_info *insn_info,
+ int flags)
+{
+ RTX_CODE code = GET_CODE (x);
+
+ if (code == SET || code == CLOBBER)
+ {
+ /* Mark the single def within the pattern. */
+ int clobber_flags = flags;
+ clobber_flags |= (code == CLOBBER) ? DF_REF_MUST_CLOBBER : 0;
+ df_def_record_1 (collection_rec, x, bb, insn_info, clobber_flags);
+ }
+ else if (code == COND_EXEC)
+ {
+ df_defs_record (collection_rec, COND_EXEC_CODE (x),
+ bb, insn_info, DF_REF_CONDITIONAL);
+ }
+ else if (code == PARALLEL)
+ {
+ int i;
+
+ /* Mark the multiple defs within the pattern. */
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ df_defs_record (collection_rec, XVECEXP (x, 0, i), bb, insn_info, flags);
+ }
+}
+
+
+/* Process all the registers used in the rtx at address LOC.
+
+ If the REF_FLAGS field contain DF_REF_SIGN_EXTRACT or
+ DF_REF_ZERO_EXTRACT. WIDTH, OFFSET and MODE are used to access the
+ fields if they were constants. Otherwise they should be -1 if
+ those flags were set. */
+
+static void
+df_uses_record (enum df_ref_class cl, struct df_collection_rec *collection_rec,
+ rtx *loc, enum df_ref_type ref_type,
+ basic_block bb, struct df_insn_info *insn_info,
+ int flags,
+ int width, int offset, enum machine_mode mode)
+{
+ RTX_CODE code;
+ rtx x;
+
+ retry:
+ x = *loc;
+ if (!x)
+ return;
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_INT:
+ case CONST: