+/*----------------------------------------------------------------------------
+ The following three functions are used only for FORWARDS scanning:
+ i.e. they process the defs and the REG_DEAD and REG_UNUSED notes.
+ Thus it is important to add the DF_NOTES problem to the stack of
+ problems computed before using these functions.
+
+ df_simulate_initialize_forwards should be called first with a
+ bitvector copyied from the DF_LIVE_IN or DF_LR_IN. Then
+ df_simulate_one_insn_forwards should be called for each insn in
+ the block, starting with the first one.
+ ----------------------------------------------------------------------------*/
+
+/* Initialize the LIVE bitmap, which should be copied from DF_LIVE_IN or
+ DF_LR_IN for basic block BB, for forward scanning by marking artificial
+ defs live. */
+
+void
+df_simulate_initialize_forwards (basic_block bb, bitmap live)
+{
+ df_ref *def_rec;
+ int bb_index = bb->index;
+
+ for (def_rec = df_get_artificial_defs (bb_index); *def_rec; def_rec++)
+ {
+ df_ref def = *def_rec;
+ if (DF_REF_FLAGS (def) & DF_REF_AT_TOP)
+ bitmap_set_bit (live, DF_REF_REGNO (def));
+ }
+}
+
+/* Simulate the forwards effects of INSN on the bitmap LIVE. */
+
+void
+df_simulate_one_insn_forwards (basic_block bb, rtx insn, bitmap live)
+{
+ rtx link;
+ if (! INSN_P (insn))
+ return;
+
+ /* Make sure that DF_NOTE really is an active df problem. */
+ gcc_assert (df_note);
+
+ /* Note that this is the opposite as how the problem is defined, because
+ in the LR problem defs _kill_ liveness. However, they do so backwards,
+ while here the scan is performed forwards! So, first assume that the
+ def is live, and if this is not true REG_UNUSED notes will rectify the
+ situation. */
+ df_simulate_find_noclobber_defs (insn, live);
+
+ /* Clear all of the registers that go dead. */
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ {
+ switch (REG_NOTE_KIND (link))
+ {
+ case REG_DEAD:
+ case REG_UNUSED:
+ {
+ rtx reg = XEXP (link, 0);
+ int regno = REGNO (reg);
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int n = hard_regno_nregs[regno][GET_MODE (reg)];
+ while (--n >= 0)
+ bitmap_clear_bit (live, regno + n);
+ }
+ else
+ bitmap_clear_bit (live, regno);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ df_simulate_fixup_sets (bb, live);
+}
+\f
+/* Used by the next two functions to encode information about the
+ memory references we found. */
+#define MEMREF_NORMAL 1
+#define MEMREF_VOLATILE 2
+
+/* A subroutine of can_move_insns_across_p called through for_each_rtx.
+ Return either MEMREF_NORMAL or MEMREF_VOLATILE if a memory is found. */
+
+static int
+find_memory (rtx *px, void *data ATTRIBUTE_UNUSED)
+{
+ rtx x = *px;
+
+ if (GET_CODE (x) == ASM_OPERANDS && MEM_VOLATILE_P (x))
+ return MEMREF_VOLATILE;
+
+ if (!MEM_P (x))
+ return 0;
+ if (MEM_VOLATILE_P (x))
+ return MEMREF_VOLATILE;
+ if (MEM_READONLY_P (x))
+ return 0;
+
+ return MEMREF_NORMAL;
+}
+
+/* A subroutine of can_move_insns_across_p called through note_stores.
+ DATA points to an integer in which we set either the bit for
+ MEMREF_NORMAL or the bit for MEMREF_VOLATILE if we find a MEM
+ of either kind. */
+
+static void
+find_memory_stores (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
+ void *data ATTRIBUTE_UNUSED)
+{
+ int *pflags = (int *)data;
+ if (GET_CODE (x) == SUBREG)
+ x = XEXP (x, 0);
+ /* Treat stores to SP as stores to memory, this will prevent problems
+ when there are references to the stack frame. */
+ if (x == stack_pointer_rtx)
+ *pflags |= MEMREF_VOLATILE;
+ if (!MEM_P (x))
+ return;
+ *pflags |= MEM_VOLATILE_P (x) ? MEMREF_VOLATILE : MEMREF_NORMAL;
+}
+
+/* Scan BB backwards, using df_simulate functions to keep track of
+ lifetimes, up to insn POINT. The result is stored in LIVE. */
+
+void
+simulate_backwards_to_point (basic_block bb, regset live, rtx point)
+{
+ rtx insn;
+ bitmap_copy (live, df_get_live_out (bb));
+ df_simulate_initialize_backwards (bb, live);
+
+ /* Scan and update life information until we reach the point we're
+ interested in. */
+ for (insn = BB_END (bb); insn != point; insn = PREV_INSN (insn))
+ df_simulate_one_insn_backwards (bb, insn, live);
+}
+
+/* Return true if it is safe to move a group of insns, described by
+ the range FROM to TO, backwards across another group of insns,
+ described by ACROSS_FROM to ACROSS_TO. It is assumed that there
+ are no insns between ACROSS_TO and FROM, but they may be in
+ different basic blocks; MERGE_BB is the block from which the
+ insns will be moved. The caller must pass in a regset MERGE_LIVE
+ which specifies the registers live after TO.
+
+ This function may be called in one of two cases: either we try to
+ move identical instructions from all successor blocks into their
+ predecessor, or we try to move from only one successor block. If
+ OTHER_BRANCH_LIVE is nonnull, it indicates that we're dealing with
+ the second case. It should contain a set of registers live at the
+ end of ACROSS_TO which must not be clobbered by moving the insns.
+ In that case, we're also more careful about moving memory references
+ and trapping insns.
+
+ We return false if it is not safe to move the entire group, but it
+ may still be possible to move a subgroup. PMOVE_UPTO, if nonnull,
+ is set to point at the last moveable insn in such a case. */
+
+bool
+can_move_insns_across (rtx from, rtx to, rtx across_from, rtx across_to,
+ basic_block merge_bb, regset merge_live,
+ regset other_branch_live, rtx *pmove_upto)
+{
+ rtx insn, next, max_to;
+ bitmap merge_set, merge_use, local_merge_live;
+ bitmap test_set, test_use;
+ unsigned i, fail = 0;
+ bitmap_iterator bi;
+ int memrefs_in_across = 0;
+ int mem_sets_in_across = 0;
+ bool trapping_insns_in_across = false;
+
+ if (pmove_upto != NULL)
+ *pmove_upto = NULL_RTX;
+
+ /* Find real bounds, ignoring debug insns. */
+ while (!NONDEBUG_INSN_P (from) && from != to)
+ from = NEXT_INSN (from);
+ while (!NONDEBUG_INSN_P (to) && from != to)
+ to = PREV_INSN (to);
+
+ for (insn = across_to; ; insn = next)
+ {
+ if (NONDEBUG_INSN_P (insn))
+ {
+ memrefs_in_across |= for_each_rtx (&PATTERN (insn), find_memory,
+ NULL);
+ note_stores (PATTERN (insn), find_memory_stores,
+ &mem_sets_in_across);
+ /* This is used just to find sets of the stack pointer. */
+ memrefs_in_across |= mem_sets_in_across;
+ trapping_insns_in_across |= may_trap_p (PATTERN (insn));
+ }
+ next = PREV_INSN (insn);
+ if (insn == across_from)
+ break;
+ }
+
+ /* Collect:
+ MERGE_SET = set of registers set in MERGE_BB
+ MERGE_USE = set of registers used in MERGE_BB and live at its top
+ MERGE_LIVE = set of registers live at the point inside the MERGE
+ range that we've reached during scanning
+ TEST_SET = set of registers set between ACROSS_FROM and ACROSS_END.
+ TEST_USE = set of registers used between ACROSS_FROM and ACROSS_END,
+ and live before ACROSS_FROM. */
+
+ merge_set = BITMAP_ALLOC (®_obstack);
+ merge_use = BITMAP_ALLOC (®_obstack);
+ local_merge_live = BITMAP_ALLOC (®_obstack);
+ test_set = BITMAP_ALLOC (®_obstack);
+ test_use = BITMAP_ALLOC (®_obstack);
+
+ /* Compute the set of registers set and used in the ACROSS range. */
+ if (other_branch_live != NULL)
+ bitmap_copy (test_use, other_branch_live);
+ df_simulate_initialize_backwards (merge_bb, test_use);
+ for (insn = across_to; ; insn = next)
+ {
+ if (CALL_P (insn))
+ {
+ if (RTL_CONST_OR_PURE_CALL_P (insn))
+ /* Pure functions can read from memory. Const functions can
+ read from arguments that the ABI has forced onto the stack.
+ Neither sort of read can be volatile. */
+ memrefs_in_across |= MEMREF_NORMAL;
+ else
+ {
+ memrefs_in_across |= MEMREF_VOLATILE;
+ mem_sets_in_across |= MEMREF_VOLATILE;
+ }
+ }
+ if (NONDEBUG_INSN_P (insn))
+ {
+ df_simulate_find_defs (insn, test_set);
+ df_simulate_defs (insn, test_use);
+ df_simulate_uses (insn, test_use);
+ }
+ next = PREV_INSN (insn);
+ if (insn == across_from)
+ break;
+ }
+
+ /* Compute an upper bound for the amount of insns moved, by finding
+ the first insn in MERGE that sets a register in TEST_USE, or uses
+ a register in TEST_SET. We also check for calls, trapping operations,
+ and memory references. */
+ max_to = NULL_RTX;
+ for (insn = from; ; insn = next)
+ {
+ if (CALL_P (insn))
+ break;
+ if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_EPILOGUE_BEG)
+ break;
+ if (NONDEBUG_INSN_P (insn))
+ {
+ if (may_trap_or_fault_p (PATTERN (insn))
+ && (trapping_insns_in_across || other_branch_live != NULL))
+ break;
+
+ /* We cannot move memory stores past each other, or move memory
+ reads past stores, at least not without tracking them and
+ calling true_dependence on every pair.
+
+ If there is no other branch and no memory references or
+ sets in the ACROSS range, we can move memory references
+ freely, even volatile ones.
+
+ Otherwise, the rules are as follows: volatile memory
+ references and stores can't be moved at all, and any type
+ of memory reference can't be moved if there are volatile
+ accesses or stores in the ACROSS range. That leaves
+ normal reads, which can be moved, as the trapping case is
+ dealt with elsewhere. */
+ if (other_branch_live != NULL || memrefs_in_across != 0)
+ {
+ int mem_ref_flags = 0;
+ int mem_set_flags = 0;
+ note_stores (PATTERN (insn), find_memory_stores, &mem_set_flags);
+ mem_ref_flags = for_each_rtx (&PATTERN (insn), find_memory,
+ NULL);
+ /* Catch sets of the stack pointer. */
+ mem_ref_flags |= mem_set_flags;
+
+ if ((mem_ref_flags | mem_set_flags) & MEMREF_VOLATILE)
+ break;
+ if ((memrefs_in_across & MEMREF_VOLATILE) && mem_ref_flags != 0)
+ break;
+ if (mem_set_flags != 0
+ || (mem_sets_in_across != 0 && mem_ref_flags != 0))
+ break;
+ }
+ df_simulate_find_uses (insn, merge_use);
+ /* We're only interested in uses which use a value live at
+ the top, not one previously set in this block. */
+ bitmap_and_compl_into (merge_use, merge_set);
+ df_simulate_find_defs (insn, merge_set);
+ if (bitmap_intersect_p (merge_set, test_use)
+ || bitmap_intersect_p (merge_use, test_set))
+ break;
+ max_to = insn;
+ }
+ next = NEXT_INSN (insn);
+ if (insn == to)
+ break;
+ }
+ if (max_to != to)
+ fail = 1;
+
+ if (max_to == NULL_RTX || (fail && pmove_upto == NULL))
+ goto out;
+
+ /* Now, lower this upper bound by also taking into account that
+ a range of insns moved across ACROSS must not leave a register
+ live at the end that will be clobbered in ACROSS. We need to
+ find a point where TEST_SET & LIVE == 0.
+
+ Insns in the MERGE range that set registers which are also set
+ in the ACROSS range may still be moved as long as we also move
+ later insns which use the results of the set, and make the
+ register dead again. This is verified by the condition stated
+ above. We only need to test it for registers that are set in
+ the moved region.
+
+ MERGE_LIVE is provided by the caller and holds live registers after
+ TO. */
+ bitmap_copy (local_merge_live, merge_live);
+ for (insn = to; insn != max_to; insn = PREV_INSN (insn))
+ df_simulate_one_insn_backwards (merge_bb, insn, local_merge_live);
+
+ /* We're not interested in registers that aren't set in the moved
+ region at all. */
+ bitmap_and_into (local_merge_live, merge_set);
+ for (;;)
+ {
+ if (NONDEBUG_INSN_P (insn))
+ {
+ if (!bitmap_intersect_p (test_set, local_merge_live))
+ {
+ max_to = insn;
+ break;
+ }
+
+ df_simulate_one_insn_backwards (merge_bb, insn,
+ local_merge_live);
+ }
+ if (insn == from)
+ {
+ fail = 1;
+ goto out;
+ }
+ insn = PREV_INSN (insn);
+ }
+
+ if (max_to != to)
+ fail = 1;
+
+ if (pmove_upto)
+ *pmove_upto = max_to;
+
+ /* For small register class machines, don't lengthen lifetimes of
+ hard registers before reload. */
+ if (! reload_completed
+ && targetm.small_register_classes_for_mode_p (VOIDmode))
+ {
+ EXECUTE_IF_SET_IN_BITMAP (merge_set, 0, i, bi)
+ {
+ if (i < FIRST_PSEUDO_REGISTER
+ && ! fixed_regs[i]
+ && ! global_regs[i])
+ fail = 1;
+ }
+ }
+
+ out:
+ BITMAP_FREE (merge_set);
+ BITMAP_FREE (merge_use);
+ BITMAP_FREE (local_merge_live);
+ BITMAP_FREE (test_set);
+ BITMAP_FREE (test_use);
+
+ return !fail;
+}
+
+\f
+/*----------------------------------------------------------------------------
+ MULTIPLE DEFINITIONS
+
+ Find the locations in the function reached by multiple definition sites
+ for a live pseudo. In and out bitvectors are built for each basic
+ block. They are restricted for efficiency to live registers.
+
+ The gen and kill sets for the problem are obvious. Together they
+ include all defined registers in a basic block; the gen set includes
+ registers where a partial or conditional or may-clobber definition is
+ last in the BB, while the kill set includes registers with a complete
+ definition coming last. However, the computation of the dataflow
+ itself is interesting.
+
+ The idea behind it comes from SSA form's iterated dominance frontier
+ criterion for inserting PHI functions. Just like in that case, we can use
+ the dominance frontier to find places where multiple definitions meet;
+ a register X defined in a basic block BB1 has multiple definitions in
+ basic blocks in BB1's dominance frontier.
+
+ So, the in-set of a basic block BB2 is not just the union of the
+ out-sets of BB2's predecessors, but includes some more bits that come
+ from the basic blocks of whose dominance frontier BB2 is part (BB1 in
+ the previous paragraph). I called this set the init-set of BB2.
+
+ (Note: I actually use the kill-set only to build the init-set.
+ gen bits are anyway propagated from BB1 to BB2 by dataflow).
+
+ For example, if you have
+
+ BB1 : r10 = 0
+ r11 = 0
+ if <...> goto BB2 else goto BB3;
+
+ BB2 : r10 = 1
+ r12 = 1
+ goto BB3;
+
+ BB3 :
+
+ you have BB3 in BB2's dominance frontier but not in BB1's, so that the
+ init-set of BB3 includes r10 and r12, but not r11. Note that we do
+ not need to iterate the dominance frontier, because we do not insert
+ anything like PHI functions there! Instead, dataflow will take care of
+ propagating the information to BB3's successors.
+ ---------------------------------------------------------------------------*/
+
+/* Private data used to verify the solution for this problem. */
+struct df_md_problem_data
+{
+ /* An obstack for the bitmaps we need for this problem. */
+ bitmap_obstack md_bitmaps;
+};
+
+/* Scratch var used by transfer functions. This is used to do md analysis
+ only for live registers. */
+static bitmap_head df_md_scratch;
+
+
+static void
+df_md_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
+ void *vbb_info)
+{
+ struct df_md_bb_info *bb_info = (struct df_md_bb_info *) vbb_info;
+ if (bb_info)
+ {
+ bitmap_clear (&bb_info->kill);
+ bitmap_clear (&bb_info->gen);
+ bitmap_clear (&bb_info->init);
+ bitmap_clear (&bb_info->in);
+ bitmap_clear (&bb_info->out);
+ }
+}
+
+
+/* Allocate or reset bitmaps for DF_MD. The solution bits are
+ not touched unless the block is new. */
+
+static void
+df_md_alloc (bitmap all_blocks)
+{
+ unsigned int bb_index;
+ bitmap_iterator bi;
+ struct df_md_problem_data *problem_data;
+
+ df_grow_bb_info (df_md);
+ if (df_md->problem_data)
+ problem_data = (struct df_md_problem_data *) df_md->problem_data;
+ else
+ {
+ problem_data = XNEW (struct df_md_problem_data);
+ df_md->problem_data = problem_data;
+ bitmap_obstack_initialize (&problem_data->md_bitmaps);
+ }
+ bitmap_initialize (&df_md_scratch, &problem_data->md_bitmaps);
+
+ EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
+ {
+ struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
+ /* When bitmaps are already initialized, just clear them. */
+ if (bb_info->init.obstack)
+ {
+ bitmap_clear (&bb_info->init);
+ bitmap_clear (&bb_info->gen);
+ bitmap_clear (&bb_info->kill);
+ bitmap_clear (&bb_info->in);
+ bitmap_clear (&bb_info->out);
+ }
+ else
+ {
+ bitmap_initialize (&bb_info->init, &problem_data->md_bitmaps);
+ bitmap_initialize (&bb_info->gen, &problem_data->md_bitmaps);
+ bitmap_initialize (&bb_info->kill, &problem_data->md_bitmaps);
+ bitmap_initialize (&bb_info->in, &problem_data->md_bitmaps);
+ bitmap_initialize (&bb_info->out, &problem_data->md_bitmaps);
+ }
+ }
+
+ df_md->optional_p = true;
+}
+
+/* Add the effect of the top artificial defs of BB to the multiple definitions
+ bitmap LOCAL_MD. */
+
+void
+df_md_simulate_artificial_defs_at_top (basic_block bb, bitmap local_md)
+{
+ int bb_index = bb->index;
+ df_ref *def_rec;
+ for (def_rec = df_get_artificial_defs (bb_index); *def_rec; def_rec++)
+ {
+ df_ref def = *def_rec;
+ if (DF_REF_FLAGS (def) & DF_REF_AT_TOP)
+ {
+ unsigned int dregno = DF_REF_REGNO (def);
+ if (DF_REF_FLAGS (def)
+ & (DF_REF_PARTIAL | DF_REF_CONDITIONAL | DF_REF_MAY_CLOBBER))
+ bitmap_set_bit (local_md, dregno);
+ else
+ bitmap_clear_bit (local_md, dregno);
+ }
+ }
+}
+
+
+/* Add the effect of the defs of INSN to the reaching definitions bitmap
+ LOCAL_MD. */
+
+void
+df_md_simulate_one_insn (basic_block bb ATTRIBUTE_UNUSED, rtx insn,
+ bitmap local_md)
+{
+ unsigned uid = INSN_UID (insn);
+ df_ref *def_rec;
+
+ for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
+ {
+ df_ref def = *def_rec;
+ unsigned int dregno = DF_REF_REGNO (def);
+ if ((!(df->changeable_flags & DF_NO_HARD_REGS))
+ || (dregno >= FIRST_PSEUDO_REGISTER))
+ {
+ if (DF_REF_FLAGS (def)
+ & (DF_REF_PARTIAL | DF_REF_CONDITIONAL | DF_REF_MAY_CLOBBER))
+ bitmap_set_bit (local_md, DF_REF_ID (def));
+ else
+ bitmap_clear_bit (local_md, DF_REF_ID (def));
+ }
+ }
+}
+
+static void
+df_md_bb_local_compute_process_def (struct df_md_bb_info *bb_info,
+ df_ref *def_rec,
+ int top_flag)
+{
+ df_ref def;
+ bitmap_clear (&seen_in_insn);
+
+ while ((def = *def_rec++) != NULL)
+ {
+ unsigned int dregno = DF_REF_REGNO (def);
+ if (((!(df->changeable_flags & DF_NO_HARD_REGS))
+ || (dregno >= FIRST_PSEUDO_REGISTER))
+ && top_flag == (DF_REF_FLAGS (def) & DF_REF_AT_TOP))
+ {
+ if (!bitmap_bit_p (&seen_in_insn, dregno))
+ {
+ if (DF_REF_FLAGS (def)
+ & (DF_REF_PARTIAL | DF_REF_CONDITIONAL | DF_REF_MAY_CLOBBER))
+ {
+ bitmap_set_bit (&bb_info->gen, dregno);
+ bitmap_clear_bit (&bb_info->kill, dregno);
+ }
+ else
+ {
+ /* When we find a clobber and a regular def,
+ make sure the regular def wins. */
+ bitmap_set_bit (&seen_in_insn, dregno);
+ bitmap_set_bit (&bb_info->kill, dregno);
+ bitmap_clear_bit (&bb_info->gen, dregno);
+ }
+ }
+ }
+ }
+}
+
+
+/* Compute local multiple def info for basic block BB. */
+
+static void
+df_md_bb_local_compute (unsigned int bb_index)
+{
+ basic_block bb = BASIC_BLOCK (bb_index);
+ struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
+ rtx insn;
+
+ /* Artificials are only hard regs. */
+ if (!(df->changeable_flags & DF_NO_HARD_REGS))
+ df_md_bb_local_compute_process_def (bb_info,
+ df_get_artificial_defs (bb_index),
+ DF_REF_AT_TOP);
+
+ FOR_BB_INSNS (bb, insn)
+ {
+ unsigned int uid = INSN_UID (insn);
+ if (!INSN_P (insn))
+ continue;
+
+ df_md_bb_local_compute_process_def (bb_info, DF_INSN_UID_DEFS (uid), 0);
+ }
+
+ if (!(df->changeable_flags & DF_NO_HARD_REGS))
+ df_md_bb_local_compute_process_def (bb_info,
+ df_get_artificial_defs (bb_index),
+ 0);
+}
+
+/* Compute local reaching def info for each basic block within BLOCKS. */
+
+static void
+df_md_local_compute (bitmap all_blocks)
+{
+ unsigned int bb_index, df_bb_index;
+ bitmap_iterator bi1, bi2;
+ basic_block bb;
+ bitmap_head *frontiers;
+
+ bitmap_initialize (&seen_in_insn, &bitmap_default_obstack);
+
+ EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi1)
+ {
+ df_md_bb_local_compute (bb_index);
+ }
+
+ bitmap_clear (&seen_in_insn);
+
+ frontiers = XNEWVEC (bitmap_head, last_basic_block);
+ FOR_ALL_BB (bb)
+ bitmap_initialize (&frontiers[bb->index], &bitmap_default_obstack);
+
+ compute_dominance_frontiers (frontiers);
+
+ /* Add each basic block's kills to the nodes in the frontier of the BB. */
+ EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi1)
+ {
+ bitmap kill = &df_md_get_bb_info (bb_index)->kill;
+ EXECUTE_IF_SET_IN_BITMAP (&frontiers[bb_index], 0, df_bb_index, bi2)
+ {
+ basic_block bb = BASIC_BLOCK (df_bb_index);
+ if (bitmap_bit_p (all_blocks, df_bb_index))
+ bitmap_ior_and_into (&df_md_get_bb_info (df_bb_index)->init, kill,
+ df_get_live_in (bb));
+ }
+ }
+
+ FOR_ALL_BB (bb)
+ bitmap_clear (&frontiers[bb->index]);
+ free (frontiers);
+}
+
+
+/* Reset the global solution for recalculation. */
+
+static void
+df_md_reset (bitmap all_blocks)
+{
+ unsigned int bb_index;
+ bitmap_iterator bi;
+
+ EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
+ {
+ struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
+ gcc_assert (bb_info);
+ bitmap_clear (&bb_info->in);
+ bitmap_clear (&bb_info->out);
+ }
+}
+
+static bool
+df_md_transfer_function (int bb_index)
+{
+ basic_block bb = BASIC_BLOCK (bb_index);
+ struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
+ bitmap in = &bb_info->in;
+ bitmap out = &bb_info->out;
+ bitmap gen = &bb_info->gen;
+ bitmap kill = &bb_info->kill;
+
+ /* We need to use a scratch set here so that the value returned from this
+ function invocation properly reflects whether the sets changed in a
+ significant way; i.e. not just because the live set was anded in. */
+ bitmap_and (&df_md_scratch, gen, df_get_live_out (bb));
+
+ /* Multiple definitions of a register are not relevant if it is not
+ live. Thus we trim the result to the places where it is live. */
+ bitmap_and_into (in, df_get_live_in (bb));
+
+ return bitmap_ior_and_compl (out, &df_md_scratch, in, kill);
+}
+
+/* Initialize the solution bit vectors for problem. */
+
+static void
+df_md_init (bitmap all_blocks)
+{
+ unsigned int bb_index;
+ bitmap_iterator bi;
+
+ EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
+ {
+ struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
+
+ bitmap_copy (&bb_info->in, &bb_info->init);
+ df_md_transfer_function (bb_index);
+ }
+}
+
+static void
+df_md_confluence_0 (basic_block bb)
+{
+ struct df_md_bb_info *bb_info = df_md_get_bb_info (bb->index);
+ bitmap_copy (&bb_info->in, &bb_info->init);
+}
+
+/* In of target gets or of out of source. */
+
+static bool
+df_md_confluence_n (edge e)
+{
+ bitmap op1 = &df_md_get_bb_info (e->dest->index)->in;
+ bitmap op2 = &df_md_get_bb_info (e->src->index)->out;
+
+ if (e->flags & EDGE_FAKE)
+ return false;
+
+ if (e->flags & EDGE_EH)
+ return bitmap_ior_and_compl_into (op1, op2,
+ regs_invalidated_by_call_regset);
+ else
+ return bitmap_ior_into (op1, op2);
+}
+
+/* Free all storage associated with the problem. */
+
+static void
+df_md_free (void)
+{
+ struct df_md_problem_data *problem_data
+ = (struct df_md_problem_data *) df_md->problem_data;
+
+ bitmap_obstack_release (&problem_data->md_bitmaps);
+ free (problem_data);
+ df_md->problem_data = NULL;
+
+ df_md->block_info_size = 0;
+ free (df_md->block_info);
+ df_md->block_info = NULL;
+ free (df_md);
+}
+
+
+/* Debugging info at top of bb. */
+
+static void
+df_md_top_dump (basic_block bb, FILE *file)
+{
+ struct df_md_bb_info *bb_info = df_md_get_bb_info (bb->index);
+ if (!bb_info)
+ return;
+
+ fprintf (file, ";; md in \t");
+ df_print_regset (file, &bb_info->in);
+ fprintf (file, ";; md init \t");
+ df_print_regset (file, &bb_info->init);
+ fprintf (file, ";; md gen \t");
+ df_print_regset (file, &bb_info->gen);
+ fprintf (file, ";; md kill \t");
+ df_print_regset (file, &bb_info->kill);
+}
+
+/* Debugging info at bottom of bb. */
+
+static void
+df_md_bottom_dump (basic_block bb, FILE *file)
+{
+ struct df_md_bb_info *bb_info = df_md_get_bb_info (bb->index);
+ if (!bb_info)
+ return;
+
+ fprintf (file, ";; md out \t");
+ df_print_regset (file, &bb_info->out);
+}
+
+static struct df_problem problem_MD =
+{
+ DF_MD, /* Problem id. */
+ DF_FORWARD, /* Direction. */
+ df_md_alloc, /* Allocate the problem specific data. */
+ df_md_reset, /* Reset global information. */
+ df_md_free_bb_info, /* Free basic block info. */
+ df_md_local_compute, /* Local compute function. */
+ df_md_init, /* Init the solution specific data. */
+ df_worklist_dataflow, /* Worklist solver. */
+ df_md_confluence_0, /* Confluence operator 0. */
+ df_md_confluence_n, /* Confluence operator n. */
+ df_md_transfer_function, /* Transfer function. */
+ NULL, /* Finalize function. */
+ df_md_free, /* Free all of the problem information. */
+ df_md_free, /* Remove this problem from the stack of dataflow problems. */
+ NULL, /* Debugging. */
+ df_md_top_dump, /* Debugging start block. */
+ df_md_bottom_dump, /* Debugging end block. */
+ NULL, /* Incremental solution verify start. */
+ NULL, /* Incremental solution verify end. */
+ NULL, /* Dependent problem. */
+ sizeof (struct df_md_bb_info),/* Size of entry of block_info array. */
+ TV_DF_MD, /* Timing variable. */
+ false /* Reset blocks on dropping out of blocks_to_analyze. */
+};
+
+/* Create a new MD instance and add it to the existing instance
+ of DF. */
+
+void
+df_md_add_problem (void)
+{
+ df_add_problem (&problem_MD);
+}
+
+
+