+/* A subroutine of mark_target_live_regs. Search forward from TARGET
+ looking for registers that are set before they are used. These are dead.
+ Stop after passing a few conditional jumps, and/or a small
+ number of unconditional branches. */
+
+static rtx
+find_dead_or_set_registers (target, res, jump_target, jump_count, set, needed)
+ rtx target;
+ struct resources *res;
+ rtx *jump_target;
+ int jump_count;
+ struct resources set, needed;
+{
+ HARD_REG_SET scratch;
+ rtx insn, next;
+ rtx jump_insn = 0;
+ int i;
+
+ for (insn = target; insn; insn = next)
+ {
+ rtx this_jump_insn = insn;
+
+ next = NEXT_INSN (insn);
+ switch (GET_CODE (insn))
+ {
+ case CODE_LABEL:
+ /* After a label, any pending dead registers that weren't yet
+ used can be made dead. */
+ AND_COMPL_HARD_REG_SET (pending_dead_regs, needed.regs);
+ AND_COMPL_HARD_REG_SET (res->regs, pending_dead_regs);
+ CLEAR_HARD_REG_SET (pending_dead_regs);
+
+ if (CODE_LABEL_NUMBER (insn) < max_label_num_after_reload)
+ {
+ /* All spill registers are dead at a label, so kill all of the
+ ones that aren't needed also. */
+ COPY_HARD_REG_SET (scratch, used_spill_regs);
+ AND_COMPL_HARD_REG_SET (scratch, needed.regs);
+ AND_COMPL_HARD_REG_SET (res->regs, scratch);
+ }
+ continue;
+
+ case BARRIER:
+ case NOTE:
+ continue;
+
+ case INSN:
+ if (GET_CODE (PATTERN (insn)) == USE)
+ {
+ /* If INSN is a USE made by update_block, we care about the
+ underlying insn. Any registers set by the underlying insn
+ are live since the insn is being done somewhere else. */
+ if (GET_RTX_CLASS (GET_CODE (XEXP (PATTERN (insn), 0))) == 'i')
+ mark_set_resources (XEXP (PATTERN (insn), 0), res, 0, 1);
+
+ /* All other USE insns are to be ignored. */
+ continue;
+ }
+ else if (GET_CODE (PATTERN (insn)) == CLOBBER)
+ continue;
+ else if (GET_CODE (PATTERN (insn)) == SEQUENCE)
+ {
+ /* An unconditional jump can be used to fill the delay slot
+ of a call, so search for a JUMP_INSN in any position. */
+ for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ {
+ this_jump_insn = XVECEXP (PATTERN (insn), 0, i);
+ if (GET_CODE (this_jump_insn) == JUMP_INSN)
+ break;
+ }
+ }
+ }
+
+ if (GET_CODE (this_jump_insn) == JUMP_INSN)
+ {
+ if (jump_count++ < 10)
+ {
+ if (simplejump_p (this_jump_insn)
+ || GET_CODE (PATTERN (this_jump_insn)) == RETURN)
+ {
+ next = JUMP_LABEL (this_jump_insn);
+ if (jump_insn == 0)
+ {
+ jump_insn = insn;
+ if (jump_target)
+ *jump_target = JUMP_LABEL (this_jump_insn);
+ }
+ }
+ else if (condjump_p (this_jump_insn)
+ || condjump_in_parallel_p (this_jump_insn))
+ {
+ struct resources target_set, target_res;
+ struct resources fallthrough_res;
+
+ /* We can handle conditional branches here by following
+ both paths, and then IOR the results of the two paths
+ together, which will give us registers that are dead
+ on both paths. Since this is expensive, we give it
+ a much higher cost than unconditional branches. The
+ cost was chosen so that we will follow at most 1
+ conditional branch. */
+
+ jump_count += 4;
+ if (jump_count >= 10)
+ break;
+
+ mark_referenced_resources (insn, &needed, 1);
+
+ /* For an annulled branch, mark_set_resources ignores slots
+ filled by instructions from the target. This is correct
+ if the branch is not taken. Since we are following both
+ paths from the branch, we must also compute correct info
+ if the branch is taken. We do this by inverting all of
+ the INSN_FROM_TARGET_P bits, calling mark_set_resources,
+ and then inverting the INSN_FROM_TARGET_P bits again. */
+
+ if (GET_CODE (PATTERN (insn)) == SEQUENCE
+ && INSN_ANNULLED_BRANCH_P (this_jump_insn))
+ {
+ for (i = 1; i < XVECLEN (PATTERN (insn), 0); i++)
+ INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i))
+ = ! INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i));
+
+ target_set = set;
+ mark_set_resources (insn, &target_set, 0, 1);
+
+ for (i = 1; i < XVECLEN (PATTERN (insn), 0); i++)
+ INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i))
+ = ! INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i));
+
+ mark_set_resources (insn, &set, 0, 1);
+ }
+ else
+ {
+ mark_set_resources (insn, &set, 0, 1);
+ target_set = set;
+ }
+
+ target_res = *res;
+ COPY_HARD_REG_SET (scratch, target_set.regs);
+ AND_COMPL_HARD_REG_SET (scratch, needed.regs);
+ AND_COMPL_HARD_REG_SET (target_res.regs, scratch);
+
+ fallthrough_res = *res;
+ COPY_HARD_REG_SET (scratch, set.regs);
+ AND_COMPL_HARD_REG_SET (scratch, needed.regs);
+ AND_COMPL_HARD_REG_SET (fallthrough_res.regs, scratch);
+
+ find_dead_or_set_registers (JUMP_LABEL (this_jump_insn),
+ &target_res, 0, jump_count,
+ target_set, needed);
+ find_dead_or_set_registers (next,
+ &fallthrough_res, 0, jump_count,
+ set, needed);
+ IOR_HARD_REG_SET (fallthrough_res.regs, target_res.regs);
+ AND_HARD_REG_SET (res->regs, fallthrough_res.regs);
+ break;
+ }
+ else
+ break;
+ }
+ else
+ {
+ /* Don't try this optimization if we expired our jump count
+ above, since that would mean there may be an infinite loop
+ in the function being compiled. */
+ jump_insn = 0;
+ break;
+ }
+ }
+
+ mark_referenced_resources (insn, &needed, 1);
+ mark_set_resources (insn, &set, 0, 1);
+
+ COPY_HARD_REG_SET (scratch, set.regs);
+ AND_COMPL_HARD_REG_SET (scratch, needed.regs);
+ AND_COMPL_HARD_REG_SET (res->regs, scratch);
+ }
+
+ return jump_insn;
+}
+