+/* Subroutine of reload_combine_split_ruids, called to fix up a single
+ ruid pointed to by *PRUID if it is higher than SPLIT_RUID. */
+
+static inline void
+reload_combine_split_one_ruid (int *pruid, int split_ruid)
+{
+ if (*pruid > split_ruid)
+ (*pruid)++;
+}
+
+/* Called when we insert a new insn in a position we've already passed in
+ the scan. Examine all our state, increasing all ruids that are higher
+ than SPLIT_RUID by one in order to make room for a new insn. */
+
+static void
+reload_combine_split_ruids (int split_ruid)
+{
+ unsigned i;
+
+ reload_combine_split_one_ruid (&reload_combine_ruid, split_ruid);
+ reload_combine_split_one_ruid (&last_label_ruid, split_ruid);
+ reload_combine_split_one_ruid (&last_jump_ruid, split_ruid);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ int j, idx = reg_state[i].use_index;
+ reload_combine_split_one_ruid (®_state[i].use_ruid, split_ruid);
+ reload_combine_split_one_ruid (®_state[i].store_ruid, split_ruid);
+ reload_combine_split_one_ruid (®_state[i].real_store_ruid,
+ split_ruid);
+ if (idx < 0)
+ continue;
+ for (j = idx; j < RELOAD_COMBINE_MAX_USES; j++)
+ {
+ reload_combine_split_one_ruid (®_state[i].reg_use[j].ruid,
+ split_ruid);
+ }
+ }
+}
+
+/* Called when we are about to rescan a previously encountered insn with
+ reload_combine_note_use after modifying some part of it. This clears all
+ information about uses in that particular insn. */
+
+static void
+reload_combine_purge_insn_uses (rtx insn)
+{
+ unsigned i;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ int j, k, idx = reg_state[i].use_index;
+ if (idx < 0)
+ continue;
+ j = k = RELOAD_COMBINE_MAX_USES;
+ while (j-- > idx)
+ {
+ if (reg_state[i].reg_use[j].insn != insn)
+ {
+ k--;
+ if (k != j)
+ reg_state[i].reg_use[k] = reg_state[i].reg_use[j];
+ }
+ }
+ reg_state[i].use_index = k;
+ }
+}
+
+/* Called when we need to forget about all uses of REGNO after an insn
+ which is identified by RUID. */
+
+static void
+reload_combine_purge_reg_uses_after_ruid (unsigned regno, int ruid)
+{
+ int j, k, idx = reg_state[regno].use_index;
+ if (idx < 0)
+ return;
+ j = k = RELOAD_COMBINE_MAX_USES;
+ while (j-- > idx)
+ {
+ if (reg_state[regno].reg_use[j].ruid >= ruid)
+ {
+ k--;
+ if (k != j)
+ reg_state[regno].reg_use[k] = reg_state[regno].reg_use[j];
+ }
+ }
+ reg_state[regno].use_index = k;
+}
+
+/* Find the use of REGNO with the ruid that is highest among those
+ lower than RUID_LIMIT, and return it if it is the only use of this
+ reg in the insn. Return NULL otherwise. */
+
+static struct reg_use *
+reload_combine_closest_single_use (unsigned regno, int ruid_limit)
+{
+ int i, best_ruid = 0;
+ int use_idx = reg_state[regno].use_index;
+ struct reg_use *retval;
+
+ if (use_idx < 0)
+ return NULL;
+ retval = NULL;
+ for (i = use_idx; i < RELOAD_COMBINE_MAX_USES; i++)
+ {
+ struct reg_use *use = reg_state[regno].reg_use + i;
+ int this_ruid = use->ruid;
+ if (this_ruid >= ruid_limit)
+ continue;
+ if (this_ruid > best_ruid)
+ {
+ best_ruid = this_ruid;
+ retval = use;
+ }
+ else if (this_ruid == best_ruid)
+ retval = NULL;
+ }
+ if (last_label_ruid >= best_ruid)
+ return NULL;
+ return retval;
+}
+
+/* After we've moved an add insn, fix up any debug insns that occur
+ between the old location of the add and the new location. REG is
+ the destination register of the add insn; REPLACEMENT is the
+ SET_SRC of the add. FROM and TO specify the range in which we
+ should make this change on debug insns. */
+
+static void
+fixup_debug_insns (rtx reg, rtx replacement, rtx from, rtx to)
+{
+ rtx insn;
+ for (insn = from; insn != to; insn = NEXT_INSN (insn))
+ {
+ rtx t;
+
+ if (!DEBUG_INSN_P (insn))
+ continue;
+
+ t = INSN_VAR_LOCATION_LOC (insn);
+ t = simplify_replace_rtx (t, reg, replacement);
+ validate_change (insn, &INSN_VAR_LOCATION_LOC (insn), t, 0);
+ }
+}
+
+/* Subroutine of reload_combine_recognize_const_pattern. Try to replace REG
+ with SRC in the insn described by USE, taking costs into account. Return
+ true if we made the replacement. */
+
+static bool
+try_replace_in_use (struct reg_use *use, rtx reg, rtx src)
+{
+ rtx use_insn = use->insn;
+ rtx mem = use->containing_mem;
+ bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (use_insn));
+
+ if (mem != NULL_RTX)
+ {
+ addr_space_t as = MEM_ADDR_SPACE (mem);
+ rtx oldaddr = XEXP (mem, 0);
+ rtx newaddr = NULL_RTX;
+ int old_cost = address_cost (oldaddr, GET_MODE (mem), as, speed);
+ int new_cost;
+
+ newaddr = simplify_replace_rtx (oldaddr, reg, src);
+ if (memory_address_addr_space_p (GET_MODE (mem), newaddr, as))
+ {
+ XEXP (mem, 0) = newaddr;
+ new_cost = address_cost (newaddr, GET_MODE (mem), as, speed);
+ XEXP (mem, 0) = oldaddr;
+ if (new_cost <= old_cost
+ && validate_change (use_insn,
+ &XEXP (mem, 0), newaddr, 0))
+ return true;
+ }
+ }
+ else
+ {
+ rtx new_set = single_set (use_insn);
+ if (new_set
+ && REG_P (SET_DEST (new_set))
+ && GET_CODE (SET_SRC (new_set)) == PLUS
+ && REG_P (XEXP (SET_SRC (new_set), 0))
+ && CONSTANT_P (XEXP (SET_SRC (new_set), 1)))
+ {
+ rtx new_src;
+ int old_cost = set_src_cost (SET_SRC (new_set), speed);
+
+ gcc_assert (rtx_equal_p (XEXP (SET_SRC (new_set), 0), reg));
+ new_src = simplify_replace_rtx (SET_SRC (new_set), reg, src);
+
+ if (set_src_cost (new_src, speed) <= old_cost
+ && validate_change (use_insn, &SET_SRC (new_set),
+ new_src, 0))
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Called by reload_combine when scanning INSN. This function tries to detect
+ patterns where a constant is added to a register, and the result is used
+ in an address.
+ Return true if no further processing is needed on INSN; false if it wasn't
+ recognized and should be handled normally. */
+
+static bool
+reload_combine_recognize_const_pattern (rtx insn)
+{
+ int from_ruid = reload_combine_ruid;
+ rtx set, pat, reg, src, addreg;
+ unsigned int regno;
+ struct reg_use *use;
+ bool must_move_add;
+ rtx add_moved_after_insn = NULL_RTX;
+ int add_moved_after_ruid = 0;
+ int clobbered_regno = -1;
+
+ set = single_set (insn);
+ if (set == NULL_RTX)
+ return false;
+
+ reg = SET_DEST (set);
+ src = SET_SRC (set);
+ if (!REG_P (reg)
+ || hard_regno_nregs[REGNO (reg)][GET_MODE (reg)] != 1
+ || GET_MODE (reg) != Pmode
+ || reg == stack_pointer_rtx)
+ return false;
+
+ regno = REGNO (reg);
+
+ /* We look for a REG1 = REG2 + CONSTANT insn, followed by either
+ uses of REG1 inside an address, or inside another add insn. If
+ possible and profitable, merge the addition into subsequent
+ uses. */
+ if (GET_CODE (src) != PLUS
+ || !REG_P (XEXP (src, 0))
+ || !CONSTANT_P (XEXP (src, 1)))
+ return false;
+
+ addreg = XEXP (src, 0);
+ must_move_add = rtx_equal_p (reg, addreg);
+
+ pat = PATTERN (insn);
+ if (must_move_add && set != pat)
+ {
+ /* We have to be careful when moving the add; apart from the
+ single_set there may also be clobbers. Recognize one special
+ case, that of one clobber alongside the set (likely a clobber
+ of the CC register). */
+ gcc_assert (GET_CODE (PATTERN (insn)) == PARALLEL);
+ if (XVECLEN (pat, 0) != 2 || XVECEXP (pat, 0, 0) != set
+ || GET_CODE (XVECEXP (pat, 0, 1)) != CLOBBER
+ || !REG_P (XEXP (XVECEXP (pat, 0, 1), 0)))
+ return false;
+ clobbered_regno = REGNO (XEXP (XVECEXP (pat, 0, 1), 0));
+ }
+
+ do
+ {
+ use = reload_combine_closest_single_use (regno, from_ruid);
+
+ if (use)
+ /* Start the search for the next use from here. */
+ from_ruid = use->ruid;
+
+ if (use && GET_MODE (*use->usep) == Pmode)
+ {
+ bool delete_add = false;
+ rtx use_insn = use->insn;
+ int use_ruid = use->ruid;
+
+ /* Avoid moving the add insn past a jump. */
+ if (must_move_add && use_ruid <= last_jump_ruid)
+ break;
+
+ /* If the add clobbers another hard reg in parallel, don't move
+ it past a real set of this hard reg. */
+ if (must_move_add && clobbered_regno >= 0
+ && reg_state[clobbered_regno].real_store_ruid >= use_ruid)
+ break;
+
+#ifdef HAVE_cc0
+ /* Do not separate cc0 setter and cc0 user on HAVE_cc0 targets. */
+ if (must_move_add && sets_cc0_p (PATTERN (use_insn)))
+ break;
+#endif
+
+ gcc_assert (reg_state[regno].store_ruid <= use_ruid);
+ /* Avoid moving a use of ADDREG past a point where it is stored. */
+ if (reg_state[REGNO (addreg)].store_ruid > use_ruid)
+ break;
+
+ /* We also must not move the addition past an insn that sets
+ the same register, unless we can combine two add insns. */
+ if (must_move_add && reg_state[regno].store_ruid == use_ruid)
+ {
+ if (use->containing_mem == NULL_RTX)
+ delete_add = true;
+ else
+ break;
+ }
+
+ if (try_replace_in_use (use, reg, src))
+ {
+ reload_combine_purge_insn_uses (use_insn);
+ reload_combine_note_use (&PATTERN (use_insn), use_insn,
+ use_ruid, NULL_RTX);
+
+ if (delete_add)
+ {
+ fixup_debug_insns (reg, src, insn, use_insn);
+ delete_insn (insn);
+ return true;
+ }
+ if (must_move_add)
+ {
+ add_moved_after_insn = use_insn;
+ add_moved_after_ruid = use_ruid;
+ }
+ continue;
+ }
+ }
+ /* If we get here, we couldn't handle this use. */
+ if (must_move_add)
+ break;
+ }
+ while (use);
+
+ if (!must_move_add || add_moved_after_insn == NULL_RTX)
+ /* Process the add normally. */
+ return false;
+
+ fixup_debug_insns (reg, src, insn, add_moved_after_insn);
+
+ reorder_insns (insn, insn, add_moved_after_insn);
+ reload_combine_purge_reg_uses_after_ruid (regno, add_moved_after_ruid);
+ reload_combine_split_ruids (add_moved_after_ruid - 1);
+ reload_combine_note_use (&PATTERN (insn), insn,
+ add_moved_after_ruid, NULL_RTX);
+ reg_state[regno].store_ruid = add_moved_after_ruid;
+
+ return true;
+}
+
+/* Called by reload_combine when scanning INSN. Try to detect a pattern we
+ can handle and improve. Return true if no further processing is needed on
+ INSN; false if it wasn't recognized and should be handled normally. */
+
+static bool
+reload_combine_recognize_pattern (rtx insn)
+{
+ rtx set, reg, src;
+ unsigned int regno;
+
+ set = single_set (insn);
+ if (set == NULL_RTX)
+ return false;
+
+ reg = SET_DEST (set);
+ src = SET_SRC (set);
+ if (!REG_P (reg)
+ || hard_regno_nregs[REGNO (reg)][GET_MODE (reg)] != 1)
+ return false;
+
+ regno = REGNO (reg);
+
+ /* Look for (set (REGX) (CONST_INT))
+ (set (REGX) (PLUS (REGX) (REGY)))
+ ...
+ ... (MEM (REGX)) ...
+ and convert it to
+ (set (REGZ) (CONST_INT))
+ ...
+ ... (MEM (PLUS (REGZ) (REGY)))... .
+
+ First, check that we have (set (REGX) (PLUS (REGX) (REGY)))
+ and that we know all uses of REGX before it dies.
+ Also, explicitly check that REGX != REGY; our life information
+ does not yet show whether REGY changes in this insn. */
+
+ if (GET_CODE (src) == PLUS
+ && reg_state[regno].all_offsets_match
+ && last_index_reg != -1
+ && REG_P (XEXP (src, 1))
+ && rtx_equal_p (XEXP (src, 0), reg)
+ && !rtx_equal_p (XEXP (src, 1), reg)
+ && reg_state[regno].use_index >= 0
+ && reg_state[regno].use_index < RELOAD_COMBINE_MAX_USES
+ && last_label_ruid < reg_state[regno].use_ruid)
+ {
+ rtx base = XEXP (src, 1);
+ rtx prev = prev_nonnote_nondebug_insn (insn);
+ rtx prev_set = prev ? single_set (prev) : NULL_RTX;
+ rtx index_reg = NULL_RTX;
+ rtx reg_sum = NULL_RTX;
+ int i;
+
+ /* Now we need to set INDEX_REG to an index register (denoted as
+ REGZ in the illustration above) and REG_SUM to the expression
+ register+register that we want to use to substitute uses of REG
+ (typically in MEMs) with. First check REG and BASE for being
+ index registers; we can use them even if they are not dead. */
+ if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], regno)
+ || TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS],
+ REGNO (base)))
+ {
+ index_reg = reg;
+ reg_sum = src;
+ }
+ else
+ {
+ /* Otherwise, look for a free index register. Since we have
+ checked above that neither REG nor BASE are index registers,
+ if we find anything at all, it will be different from these
+ two registers. */
+ for (i = first_index_reg; i <= last_index_reg; i++)
+ {
+ if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], i)
+ && reg_state[i].use_index == RELOAD_COMBINE_MAX_USES
+ && reg_state[i].store_ruid <= reg_state[regno].use_ruid
+ && (call_used_regs[i] || df_regs_ever_live_p (i))
+ && (!frame_pointer_needed || i != HARD_FRAME_POINTER_REGNUM)
+ && !fixed_regs[i] && !global_regs[i]
+ && hard_regno_nregs[i][GET_MODE (reg)] == 1
+ && targetm.hard_regno_scratch_ok (i))
+ {
+ index_reg = gen_rtx_REG (GET_MODE (reg), i);
+ reg_sum = gen_rtx_PLUS (GET_MODE (reg), index_reg, base);
+ break;
+ }
+ }
+ }
+
+ /* Check that PREV_SET is indeed (set (REGX) (CONST_INT)) and that
+ (REGY), i.e. BASE, is not clobbered before the last use we'll
+ create. */
+ if (reg_sum
+ && prev_set
+ && CONST_INT_P (SET_SRC (prev_set))
+ && rtx_equal_p (SET_DEST (prev_set), reg)
+ && (reg_state[REGNO (base)].store_ruid
+ <= reg_state[regno].use_ruid))
+ {
+ /* Change destination register and, if necessary, the constant
+ value in PREV, the constant loading instruction. */
+ validate_change (prev, &SET_DEST (prev_set), index_reg, 1);
+ if (reg_state[regno].offset != const0_rtx)
+ validate_change (prev,
+ &SET_SRC (prev_set),
+ GEN_INT (INTVAL (SET_SRC (prev_set))
+ + INTVAL (reg_state[regno].offset)),
+ 1);
+
+ /* Now for every use of REG that we have recorded, replace REG
+ with REG_SUM. */
+ for (i = reg_state[regno].use_index;
+ i < RELOAD_COMBINE_MAX_USES; i++)
+ validate_unshare_change (reg_state[regno].reg_use[i].insn,
+ reg_state[regno].reg_use[i].usep,
+ /* Each change must have its own
+ replacement. */
+ reg_sum, 1);
+
+ if (apply_change_group ())
+ {
+ struct reg_use *lowest_ruid = NULL;
+
+ /* For every new use of REG_SUM, we have to record the use
+ of BASE therein, i.e. operand 1. */
+ for (i = reg_state[regno].use_index;
+ i < RELOAD_COMBINE_MAX_USES; i++)
+ {
+ struct reg_use *use = reg_state[regno].reg_use + i;
+ reload_combine_note_use (&XEXP (*use->usep, 1), use->insn,
+ use->ruid, use->containing_mem);
+ if (lowest_ruid == NULL || use->ruid < lowest_ruid->ruid)
+ lowest_ruid = use;
+ }
+
+ fixup_debug_insns (reg, reg_sum, insn, lowest_ruid->insn);
+
+ /* Delete the reg-reg addition. */
+ delete_insn (insn);
+
+ if (reg_state[regno].offset != const0_rtx)
+ /* Previous REG_EQUIV / REG_EQUAL notes for PREV
+ are now invalid. */
+ remove_reg_equal_equiv_notes (prev);
+
+ reg_state[regno].use_index = RELOAD_COMBINE_MAX_USES;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+