+/* Pointer that iterates through the list of unscheduled insns if we
+ have a dbg_cnt enabled. It always points at an insn prior to the
+ first unscheduled one. */
+static rtx nonscheduled_insns_begin;
+
+/* Cached cost of the instruction. Use below function to get cost of the
+ insn. -1 here means that the field is not initialized. */
+#define INSN_COST(INSN) (HID (INSN)->cost)
+
+/* Compute cost of executing INSN.
+ This is the number of cycles between instruction issue and
+ instruction results. */
+int
+insn_cost (rtx insn)
+{
+ int cost;
+
+ if (sel_sched_p ())
+ {
+ if (recog_memoized (insn) < 0)
+ return 0;
+
+ cost = insn_default_latency (insn);
+ if (cost < 0)
+ cost = 0;
+
+ return cost;
+ }
+
+ cost = INSN_COST (insn);
+
+ if (cost < 0)
+ {
+ /* A USE insn, or something else we don't need to
+ understand. We can't pass these directly to
+ result_ready_cost or insn_default_latency because it will
+ trigger a fatal error for unrecognizable insns. */
+ if (recog_memoized (insn) < 0)
+ {
+ INSN_COST (insn) = 0;
+ return 0;
+ }
+ else
+ {
+ cost = insn_default_latency (insn);
+ if (cost < 0)
+ cost = 0;
+
+ INSN_COST (insn) = cost;
+ }
+ }
+
+ return cost;
+}
+
+/* Compute cost of dependence LINK.
+ This is the number of cycles between instruction issue and
+ instruction results.
+ ??? We also use this function to call recog_memoized on all insns. */
+int
+dep_cost_1 (dep_t link, dw_t dw)
+{
+ rtx insn = DEP_PRO (link);
+ rtx used = DEP_CON (link);
+ int cost;
+
+ if (DEP_COST (link) != UNKNOWN_DEP_COST)
+ return DEP_COST (link);
+
+ if (delay_htab)
+ {
+ struct delay_pair *delay_entry;
+ delay_entry
+ = (struct delay_pair *)htab_find_with_hash (delay_htab_i2, used,
+ htab_hash_pointer (used));
+ if (delay_entry)
+ {
+ if (delay_entry->i1 == insn)
+ {
+ DEP_COST (link) = pair_delay (delay_entry);
+ return DEP_COST (link);
+ }
+ }
+ }
+
+ /* A USE insn should never require the value used to be computed.
+ This allows the computation of a function's result and parameter
+ values to overlap the return and call. We don't care about the
+ dependence cost when only decreasing register pressure. */
+ if (recog_memoized (used) < 0)
+ {
+ cost = 0;
+ recog_memoized (insn);
+ }
+ else
+ {
+ enum reg_note dep_type = DEP_TYPE (link);
+
+ cost = insn_cost (insn);
+
+ if (INSN_CODE (insn) >= 0)
+ {
+ if (dep_type == REG_DEP_ANTI)
+ cost = 0;
+ else if (dep_type == REG_DEP_OUTPUT)
+ {
+ cost = (insn_default_latency (insn)
+ - insn_default_latency (used));
+ if (cost <= 0)
+ cost = 1;
+ }
+ else if (bypass_p (insn))
+ cost = insn_latency (insn, used);
+ }
+
+
+ if (targetm.sched.adjust_cost_2)
+ cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
+ dw);
+ else if (targetm.sched.adjust_cost != NULL)
+ {
+ /* This variable is used for backward compatibility with the
+ targets. */
+ rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
+
+ /* Make it self-cycled, so that if some tries to walk over this
+ incomplete list he/she will be caught in an endless loop. */
+ XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
+
+ /* Targets use only REG_NOTE_KIND of the link. */
+ PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
+
+ cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
+ insn, cost);
+
+ free_INSN_LIST_node (dep_cost_rtx_link);
+ }
+
+ if (cost < 0)
+ cost = 0;
+ }
+
+ DEP_COST (link) = cost;
+ return cost;
+}
+
+/* Compute cost of dependence LINK.
+ This is the number of cycles between instruction issue and
+ instruction results. */
+int
+dep_cost (dep_t link)
+{
+ return dep_cost_1 (link, 0);
+}
+
+/* Use this sel-sched.c friendly function in reorder2 instead of increasing
+ INSN_PRIORITY explicitly. */
+void
+increase_insn_priority (rtx insn, int amount)
+{
+ if (!sel_sched_p ())
+ {
+ /* We're dealing with haifa-sched.c INSN_PRIORITY. */
+ if (INSN_PRIORITY_KNOWN (insn))
+ INSN_PRIORITY (insn) += amount;
+ }
+ else
+ {
+ /* In sel-sched.c INSN_PRIORITY is not kept up to date.
+ Use EXPR_PRIORITY instead. */
+ sel_add_to_insn_priority (insn, amount);
+ }
+}
+
+/* Return 'true' if DEP should be included in priority calculations. */
+static bool
+contributes_to_priority_p (dep_t dep)
+{
+ if (DEBUG_INSN_P (DEP_CON (dep))
+ || DEBUG_INSN_P (DEP_PRO (dep)))
+ return false;
+
+ /* Critical path is meaningful in block boundaries only. */
+ if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
+ DEP_PRO (dep)))
+ return false;
+
+ /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
+ then speculative instructions will less likely be
+ scheduled. That is because the priority of
+ their producers will increase, and, thus, the
+ producers will more likely be scheduled, thus,
+ resolving the dependence. */
+ if (sched_deps_info->generate_spec_deps
+ && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
+ && (DEP_STATUS (dep) & SPECULATIVE))
+ return false;
+
+ return true;
+}
+
+/* Compute the number of nondebug forward deps of an insn. */
+
+static int
+dep_list_size (rtx insn)
+{
+ sd_iterator_def sd_it;
+ dep_t dep;
+ int dbgcount = 0, nodbgcount = 0;
+
+ if (!MAY_HAVE_DEBUG_INSNS)
+ return sd_lists_size (insn, SD_LIST_FORW);
+
+ FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
+ {
+ if (DEBUG_INSN_P (DEP_CON (dep)))
+ dbgcount++;
+ else if (!DEBUG_INSN_P (DEP_PRO (dep)))
+ nodbgcount++;
+ }
+
+ gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, SD_LIST_FORW));
+
+ return nodbgcount;
+}
+
+/* Compute the priority number for INSN. */
+static int
+priority (rtx insn)
+{
+ if (! INSN_P (insn))
+ return 0;
+
+ /* We should not be interested in priority of an already scheduled insn. */
+ gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
+
+ if (!INSN_PRIORITY_KNOWN (insn))
+ {
+ int this_priority = -1;
+
+ if (dep_list_size (insn) == 0)
+ /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
+ some forward deps but all of them are ignored by
+ contributes_to_priority hook. At the moment we set priority of
+ such insn to 0. */
+ this_priority = insn_cost (insn);
+ else
+ {
+ rtx prev_first, twin;
+ basic_block rec;
+
+ /* For recovery check instructions we calculate priority slightly
+ different than that of normal instructions. Instead of walking
+ through INSN_FORW_DEPS (check) list, we walk through
+ INSN_FORW_DEPS list of each instruction in the corresponding
+ recovery block. */
+
+ /* Selective scheduling does not define RECOVERY_BLOCK macro. */
+ rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
+ if (!rec || rec == EXIT_BLOCK_PTR)
+ {
+ prev_first = PREV_INSN (insn);
+ twin = insn;
+ }
+ else
+ {
+ prev_first = NEXT_INSN (BB_HEAD (rec));
+ twin = PREV_INSN (BB_END (rec));
+ }
+
+ do
+ {
+ sd_iterator_def sd_it;
+ dep_t dep;
+
+ FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
+ {
+ rtx next;
+ int next_priority;
+
+ next = DEP_CON (dep);
+
+ if (BLOCK_FOR_INSN (next) != rec)
+ {
+ int cost;
+
+ if (!contributes_to_priority_p (dep))
+ continue;
+
+ if (twin == insn)
+ cost = dep_cost (dep);
+ else
+ {
+ struct _dep _dep1, *dep1 = &_dep1;
+
+ init_dep (dep1, insn, next, REG_DEP_ANTI);
+
+ cost = dep_cost (dep1);
+ }
+
+ next_priority = cost + priority (next);
+
+ if (next_priority > this_priority)
+ this_priority = next_priority;
+ }
+ }
+
+ twin = PREV_INSN (twin);
+ }
+ while (twin != prev_first);
+ }
+
+ if (this_priority < 0)
+ {
+ gcc_assert (this_priority == -1);
+
+ this_priority = insn_cost (insn);
+ }
+
+ INSN_PRIORITY (insn) = this_priority;
+ INSN_PRIORITY_STATUS (insn) = 1;
+ }
+
+ return INSN_PRIORITY (insn);
+}
+\f
+/* Macros and functions for keeping the priority queue sorted, and
+ dealing with queuing and dequeuing of instructions. */
+
+#define SCHED_SORT(READY, N_READY) \
+do { if ((N_READY) == 2) \
+ swap_sort (READY, N_READY); \
+ else if ((N_READY) > 2) \
+ qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
+while (0)
+
+/* Setup info about the current register pressure impact of scheduling
+ INSN at the current scheduling point. */
+static void
+setup_insn_reg_pressure_info (rtx insn)
+{
+ int i, change, before, after, hard_regno;
+ int excess_cost_change;
+ enum machine_mode mode;
+ enum reg_class cl;
+ struct reg_pressure_data *pressure_info;
+ int *max_reg_pressure;
+ struct reg_use_data *use;
+ static int death[N_REG_CLASSES];
+
+ gcc_checking_assert (!DEBUG_INSN_P (insn));
+
+ excess_cost_change = 0;
+ for (i = 0; i < ira_pressure_classes_num; i++)
+ death[ira_pressure_classes[i]] = 0;
+ for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
+ if (dying_use_p (use))
+ {
+ cl = sched_regno_pressure_class[use->regno];
+ if (use->regno < FIRST_PSEUDO_REGISTER)
+ death[cl]++;
+ else
+ death[cl]
+ += ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
+ }
+ pressure_info = INSN_REG_PRESSURE (insn);
+ max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
+ gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
+ for (i = 0; i < ira_pressure_classes_num; i++)
+ {
+ cl = ira_pressure_classes[i];
+ gcc_assert (curr_reg_pressure[cl] >= 0);
+ change = (int) pressure_info[i].set_increase - death[cl];
+ before = MAX (0, max_reg_pressure[i] - ira_available_class_regs[cl]);
+ after = MAX (0, max_reg_pressure[i] + change
+ - ira_available_class_regs[cl]);
+ hard_regno = ira_class_hard_regs[cl][0];
+ gcc_assert (hard_regno >= 0);
+ mode = reg_raw_mode[hard_regno];
+ excess_cost_change += ((after - before)
+ * (ira_memory_move_cost[mode][cl][0]
+ + ira_memory_move_cost[mode][cl][1]));
+ }
+ INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
+}
+
+/* Returns a positive value if x is preferred; returns a negative value if
+ y is preferred. Should never return 0, since that will make the sort
+ unstable. */
+
+static int
+rank_for_schedule (const void *x, const void *y)
+{
+ rtx tmp = *(const rtx *) y;
+ rtx tmp2 = *(const rtx *) x;
+ int tmp_class, tmp2_class;
+ int val, priority_val, info_val;
+
+ if (MAY_HAVE_DEBUG_INSNS)
+ {
+ /* Schedule debug insns as early as possible. */
+ if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
+ return -1;
+ else if (DEBUG_INSN_P (tmp2))
+ return 1;
+ }
+
+ /* The insn in a schedule group should be issued the first. */
+ if (flag_sched_group_heuristic &&
+ SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
+ return SCHED_GROUP_P (tmp2) ? 1 : -1;
+
+ /* Make sure that priority of TMP and TMP2 are initialized. */
+ gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
+
+ if (sched_pressure_p)
+ {
+ int diff;
+
+ /* Prefer insn whose scheduling results in the smallest register
+ pressure excess. */
+ if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
+ + (INSN_TICK (tmp) > clock_var
+ ? INSN_TICK (tmp) - clock_var : 0)
+ - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
+ - (INSN_TICK (tmp2) > clock_var
+ ? INSN_TICK (tmp2) - clock_var : 0))) != 0)
+ return diff;
+ }
+
+
+ if (sched_pressure_p
+ && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var))
+ {
+ if (INSN_TICK (tmp) <= clock_var)
+ return -1;
+ else if (INSN_TICK (tmp2) <= clock_var)
+ return 1;
+ else
+ return INSN_TICK (tmp) - INSN_TICK (tmp2);
+ }
+
+ /* If we are doing backtracking in this schedule, prefer insns that
+ have forward dependencies with negative cost against an insn that
+ was already scheduled. */
+ if (current_sched_info->flags & DO_BACKTRACKING)
+ {
+ priority_val = FEEDS_BACKTRACK_INSN (tmp2) - FEEDS_BACKTRACK_INSN (tmp);
+ if (priority_val)
+ return priority_val;
+ }
+
+ /* Prefer insn with higher priority. */
+ priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
+
+ if (flag_sched_critical_path_heuristic && priority_val)
+ return priority_val;
+
+ /* Prefer speculative insn with greater dependencies weakness. */
+ if (flag_sched_spec_insn_heuristic && spec_info)
+ {
+ ds_t ds1, ds2;
+ dw_t dw1, dw2;
+ int dw;
+
+ ds1 = TODO_SPEC (tmp) & SPECULATIVE;
+ if (ds1)
+ dw1 = ds_weak (ds1);
+ else
+ dw1 = NO_DEP_WEAK;
+
+ ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
+ if (ds2)
+ dw2 = ds_weak (ds2);
+ else
+ dw2 = NO_DEP_WEAK;
+
+ dw = dw2 - dw1;
+ if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
+ return dw;
+ }
+
+ info_val = (*current_sched_info->rank) (tmp, tmp2);
+ if(flag_sched_rank_heuristic && info_val)
+ return info_val;
+
+ /* Compare insns based on their relation to the last scheduled
+ non-debug insn. */
+ if (flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn)
+ {
+ dep_t dep1;
+ dep_t dep2;
+ rtx last = last_nondebug_scheduled_insn;
+
+ /* Classify the instructions into three classes:
+ 1) Data dependent on last schedule insn.
+ 2) Anti/Output dependent on last scheduled insn.
+ 3) Independent of last scheduled insn, or has latency of one.
+ Choose the insn from the highest numbered class if different. */
+ dep1 = sd_find_dep_between (last, tmp, true);
+
+ if (dep1 == NULL || dep_cost (dep1) == 1)
+ tmp_class = 3;
+ else if (/* Data dependence. */
+ DEP_TYPE (dep1) == REG_DEP_TRUE)
+ tmp_class = 1;
+ else
+ tmp_class = 2;
+
+ dep2 = sd_find_dep_between (last, tmp2, true);
+
+ if (dep2 == NULL || dep_cost (dep2) == 1)
+ tmp2_class = 3;
+ else if (/* Data dependence. */
+ DEP_TYPE (dep2) == REG_DEP_TRUE)
+ tmp2_class = 1;
+ else
+ tmp2_class = 2;
+
+ if ((val = tmp2_class - tmp_class))
+ return val;
+ }
+
+ /* Prefer the insn which has more later insns that depend on it.
+ This gives the scheduler more freedom when scheduling later
+ instructions at the expense of added register pressure. */
+
+ val = (dep_list_size (tmp2) - dep_list_size (tmp));
+
+ if (flag_sched_dep_count_heuristic && val != 0)
+ return val;
+
+ /* If insns are equally good, sort by INSN_LUID (original insn order),
+ so that we make the sort stable. This minimizes instruction movement,
+ thus minimizing sched's effect on debugging and cross-jumping. */
+ return INSN_LUID (tmp) - INSN_LUID (tmp2);
+}
+
+/* Resort the array A in which only element at index N may be out of order. */
+
+HAIFA_INLINE static void
+swap_sort (rtx *a, int n)
+{
+ rtx insn = a[n - 1];
+ int i = n - 2;
+
+ while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
+ {
+ a[i + 1] = a[i];
+ i -= 1;
+ }
+ a[i + 1] = insn;
+}
+
+/* Add INSN to the insn queue so that it can be executed at least
+ N_CYCLES after the currently executing insn. Preserve insns
+ chain for debugging purposes. REASON will be printed in debugging
+ output. */
+
+HAIFA_INLINE static void
+queue_insn (rtx insn, int n_cycles, const char *reason)
+{
+ int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
+ rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
+ int new_tick;
+
+ gcc_assert (n_cycles <= max_insn_queue_index);
+ gcc_assert (!DEBUG_INSN_P (insn));
+
+ insn_queue[next_q] = link;
+ q_size += 1;
+
+ if (sched_verbose >= 2)
+ {
+ fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
+ (*current_sched_info->print_insn) (insn, 0));
+
+ fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason);
+ }
+
+ QUEUE_INDEX (insn) = next_q;
+
+ if (current_sched_info->flags & DO_BACKTRACKING)
+ {
+ new_tick = clock_var + n_cycles;
+ if (INSN_TICK (insn) == INVALID_TICK || INSN_TICK (insn) < new_tick)
+ INSN_TICK (insn) = new_tick;
+
+ if (INSN_EXACT_TICK (insn) != INVALID_TICK
+ && INSN_EXACT_TICK (insn) < clock_var + n_cycles)
+ {
+ must_backtrack = true;
+ if (sched_verbose >= 2)
+ fprintf (sched_dump, ";;\t\tcausing a backtrack.\n");
+ }
+ }
+}
+
+/* Remove INSN from queue. */
+static void
+queue_remove (rtx insn)
+{
+ gcc_assert (QUEUE_INDEX (insn) >= 0);
+ remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
+ q_size--;
+ QUEUE_INDEX (insn) = QUEUE_NOWHERE;
+}
+
+/* Return a pointer to the bottom of the ready list, i.e. the insn
+ with the lowest priority. */
+
+rtx *
+ready_lastpos (struct ready_list *ready)
+{
+ gcc_assert (ready->n_ready >= 1);
+ return ready->vec + ready->first - ready->n_ready + 1;
+}
+
+/* Add an element INSN to the ready list so that it ends up with the
+ lowest/highest priority depending on FIRST_P. */
+
+HAIFA_INLINE static void
+ready_add (struct ready_list *ready, rtx insn, bool first_p)
+{
+ if (!first_p)
+ {
+ if (ready->first == ready->n_ready)
+ {
+ memmove (ready->vec + ready->veclen - ready->n_ready,
+ ready_lastpos (ready),
+ ready->n_ready * sizeof (rtx));
+ ready->first = ready->veclen - 1;
+ }
+ ready->vec[ready->first - ready->n_ready] = insn;
+ }
+ else
+ {
+ if (ready->first == ready->veclen - 1)
+ {
+ if (ready->n_ready)
+ /* ready_lastpos() fails when called with (ready->n_ready == 0). */
+ memmove (ready->vec + ready->veclen - ready->n_ready - 1,
+ ready_lastpos (ready),
+ ready->n_ready * sizeof (rtx));
+ ready->first = ready->veclen - 2;
+ }
+ ready->vec[++(ready->first)] = insn;
+ }
+
+ ready->n_ready++;
+ if (DEBUG_INSN_P (insn))
+ ready->n_debug++;
+
+ gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
+ QUEUE_INDEX (insn) = QUEUE_READY;
+
+ if (INSN_EXACT_TICK (insn) != INVALID_TICK
+ && INSN_EXACT_TICK (insn) < clock_var)
+ {
+ must_backtrack = true;
+ }
+}
+
+/* Remove the element with the highest priority from the ready list and
+ return it. */
+
+HAIFA_INLINE static rtx
+ready_remove_first (struct ready_list *ready)
+{
+ rtx t;
+
+ gcc_assert (ready->n_ready);
+ t = ready->vec[ready->first--];
+ ready->n_ready--;
+ if (DEBUG_INSN_P (t))
+ ready->n_debug--;
+ /* If the queue becomes empty, reset it. */
+ if (ready->n_ready == 0)
+ ready->first = ready->veclen - 1;
+
+ gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
+ QUEUE_INDEX (t) = QUEUE_NOWHERE;
+
+ return t;
+}
+
+/* The following code implements multi-pass scheduling for the first
+ cycle. In other words, we will try to choose ready insn which
+ permits to start maximum number of insns on the same cycle. */
+
+/* Return a pointer to the element INDEX from the ready. INDEX for
+ insn with the highest priority is 0, and the lowest priority has
+ N_READY - 1. */
+
+rtx
+ready_element (struct ready_list *ready, int index)
+{
+ gcc_assert (ready->n_ready && index < ready->n_ready);
+
+ return ready->vec[ready->first - index];
+}
+
+/* Remove the element INDEX from the ready list and return it. INDEX
+ for insn with the highest priority is 0, and the lowest priority
+ has N_READY - 1. */
+
+HAIFA_INLINE static rtx
+ready_remove (struct ready_list *ready, int index)
+{
+ rtx t;
+ int i;
+
+ if (index == 0)
+ return ready_remove_first (ready);
+ gcc_assert (ready->n_ready && index < ready->n_ready);
+ t = ready->vec[ready->first - index];
+ ready->n_ready--;
+ if (DEBUG_INSN_P (t))
+ ready->n_debug--;
+ for (i = index; i < ready->n_ready; i++)
+ ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
+ QUEUE_INDEX (t) = QUEUE_NOWHERE;
+ return t;
+}
+
+/* Remove INSN from the ready list. */
+static void
+ready_remove_insn (rtx insn)
+{
+ int i;
+
+ for (i = 0; i < readyp->n_ready; i++)
+ if (ready_element (readyp, i) == insn)
+ {
+ ready_remove (readyp, i);
+ return;
+ }
+ gcc_unreachable ();
+}
+
+/* Sort the ready list READY by ascending priority, using the SCHED_SORT
+ macro. */
+
+void
+ready_sort (struct ready_list *ready)
+{
+ int i;
+ rtx *first = ready_lastpos (ready);
+
+ if (sched_pressure_p)
+ {
+ for (i = 0; i < ready->n_ready; i++)
+ if (!DEBUG_INSN_P (first[i]))
+ setup_insn_reg_pressure_info (first[i]);
+ }
+ SCHED_SORT (first, ready->n_ready);
+}
+
+/* PREV is an insn that is ready to execute. Adjust its priority if that
+ will help shorten or lengthen register lifetimes as appropriate. Also
+ provide a hook for the target to tweak itself. */
+
+HAIFA_INLINE static void
+adjust_priority (rtx prev)
+{
+ /* ??? There used to be code here to try and estimate how an insn
+ affected register lifetimes, but it did it by looking at REG_DEAD
+ notes, which we removed in schedule_region. Nor did it try to
+ take into account register pressure or anything useful like that.
+
+ Revisit when we have a machine model to work with and not before. */
+
+ if (targetm.sched.adjust_priority)
+ INSN_PRIORITY (prev) =
+ targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
+}
+
+/* Advance DFA state STATE on one cycle. */
+void
+advance_state (state_t state)
+{
+ if (targetm.sched.dfa_pre_advance_cycle)
+ targetm.sched.dfa_pre_advance_cycle ();
+
+ if (targetm.sched.dfa_pre_cycle_insn)
+ state_transition (state,
+ targetm.sched.dfa_pre_cycle_insn ());
+
+ state_transition (state, NULL);
+
+ if (targetm.sched.dfa_post_cycle_insn)
+ state_transition (state,
+ targetm.sched.dfa_post_cycle_insn ());
+
+ if (targetm.sched.dfa_post_advance_cycle)
+ targetm.sched.dfa_post_advance_cycle ();
+}
+
+/* Advance time on one cycle. */
+HAIFA_INLINE static void
+advance_one_cycle (void)
+{
+ advance_state (curr_state);
+ if (sched_verbose >= 6)
+ fprintf (sched_dump, ";;\tAdvanced a state.\n");
+}
+
+/* Update register pressure after scheduling INSN. */
+static void
+update_register_pressure (rtx insn)
+{
+ struct reg_use_data *use;
+ struct reg_set_data *set;
+
+ gcc_checking_assert (!DEBUG_INSN_P (insn));
+
+ for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
+ if (dying_use_p (use) && bitmap_bit_p (curr_reg_live, use->regno))
+ mark_regno_birth_or_death (use->regno, false);
+ for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
+ mark_regno_birth_or_death (set->regno, true);
+}
+
+/* Set up or update (if UPDATE_P) max register pressure (see its
+ meaning in sched-int.h::_haifa_insn_data) for all current BB insns
+ after insn AFTER. */
+static void
+setup_insn_max_reg_pressure (rtx after, bool update_p)
+{
+ int i, p;
+ bool eq_p;
+ rtx insn;
+ static int max_reg_pressure[N_REG_CLASSES];
+
+ save_reg_pressure ();
+ for (i = 0; i < ira_pressure_classes_num; i++)
+ max_reg_pressure[ira_pressure_classes[i]]
+ = curr_reg_pressure[ira_pressure_classes[i]];
+ for (insn = NEXT_INSN (after);
+ insn != NULL_RTX && ! BARRIER_P (insn)
+ && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
+ insn = NEXT_INSN (insn))
+ if (NONDEBUG_INSN_P (insn))
+ {
+ eq_p = true;
+ for (i = 0; i < ira_pressure_classes_num; i++)
+ {
+ p = max_reg_pressure[ira_pressure_classes[i]];
+ if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
+ {
+ eq_p = false;
+ INSN_MAX_REG_PRESSURE (insn)[i]
+ = max_reg_pressure[ira_pressure_classes[i]];
+ }
+ }
+ if (update_p && eq_p)
+ break;
+ update_register_pressure (insn);
+ for (i = 0; i < ira_pressure_classes_num; i++)
+ if (max_reg_pressure[ira_pressure_classes[i]]
+ < curr_reg_pressure[ira_pressure_classes[i]])
+ max_reg_pressure[ira_pressure_classes[i]]
+ = curr_reg_pressure[ira_pressure_classes[i]];
+ }
+ restore_reg_pressure ();
+}
+
+/* Update the current register pressure after scheduling INSN. Update
+ also max register pressure for unscheduled insns of the current
+ BB. */
+static void
+update_reg_and_insn_max_reg_pressure (rtx insn)
+{
+ int i;
+ int before[N_REG_CLASSES];
+
+ for (i = 0; i < ira_pressure_classes_num; i++)
+ before[i] = curr_reg_pressure[ira_pressure_classes[i]];
+ update_register_pressure (insn);
+ for (i = 0; i < ira_pressure_classes_num; i++)
+ if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
+ break;
+ if (i < ira_pressure_classes_num)
+ setup_insn_max_reg_pressure (insn, true);
+}
+
+/* Set up register pressure at the beginning of basic block BB whose
+ insns starting after insn AFTER. Set up also max register pressure
+ for all insns of the basic block. */
+void
+sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
+{
+ gcc_assert (sched_pressure_p);
+ initiate_bb_reg_pressure_info (bb);
+ setup_insn_max_reg_pressure (after, false);
+}
+\f
+/* If doing predication while scheduling, verify whether INSN, which
+ has just been scheduled, clobbers the conditions of any
+ instructions that must be predicated in order to break their
+ dependencies. If so, remove them from the queues so that they will
+ only be scheduled once their control dependency is resolved. */
+
+static void
+check_clobbered_conditions (rtx insn)
+{
+ HARD_REG_SET t;
+ int i;
+
+ if ((current_sched_info->flags & DO_PREDICATION) == 0)
+ return;
+
+ find_all_hard_reg_sets (insn, &t);
+
+ restart:
+ for (i = 0; i < ready.n_ready; i++)
+ {
+ rtx x = ready_element (&ready, i);
+ if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
+ {
+ ready_remove_insn (x);
+ goto restart;
+ }
+ }
+ for (i = 0; i <= max_insn_queue_index; i++)
+ {
+ rtx link;
+ int q = NEXT_Q_AFTER (q_ptr, i);
+
+ restart_queue:
+ for (link = insn_queue[q]; link; link = XEXP (link, 1))
+ {
+ rtx x = XEXP (link, 0);
+ if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
+ {
+ queue_remove (x);
+ goto restart_queue;
+ }
+ }
+ }
+}
+\f
+/* A structure that holds local state for the loop in schedule_block. */
+struct sched_block_state
+{
+ /* True if no real insns have been scheduled in the current cycle. */
+ bool first_cycle_insn_p;
+ /* True if a shadow insn has been scheduled in the current cycle, which
+ means that no more normal insns can be issued. */
+ bool shadows_only_p;
+ /* True if we're winding down a modulo schedule, which means that we only
+ issue insns with INSN_EXACT_TICK set. */
+ bool modulo_epilogue;
+ /* Initialized with the machine's issue rate every cycle, and updated
+ by calls to the variable_issue hook. */
+ int can_issue_more;
+};
+
+/* INSN is the "currently executing insn". Launch each insn which was
+ waiting on INSN. READY is the ready list which contains the insns
+ that are ready to fire. CLOCK is the current cycle. The function
+ returns necessary cycle advance after issuing the insn (it is not
+ zero for insns in a schedule group). */
+
+static int
+schedule_insn (rtx insn)
+{
+ sd_iterator_def sd_it;
+ dep_t dep;
+ int i;
+ int advance = 0;
+
+ if (sched_verbose >= 1)
+ {
+ struct reg_pressure_data *pressure_info;
+ char buf[2048];
+
+ print_insn (buf, insn, 0);
+ buf[40] = 0;
+ fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
+
+ if (recog_memoized (insn) < 0)
+ fprintf (sched_dump, "nothing");
+ else
+ print_reservation (sched_dump, insn);
+ pressure_info = INSN_REG_PRESSURE (insn);
+ if (pressure_info != NULL)
+ {
+ fputc (':', sched_dump);
+ for (i = 0; i < ira_pressure_classes_num; i++)
+ fprintf (sched_dump, "%s%+d(%d)",
+ reg_class_names[ira_pressure_classes[i]],
+ pressure_info[i].set_increase, pressure_info[i].change);
+ }
+ fputc ('\n', sched_dump);
+ }
+
+ if (sched_pressure_p && !DEBUG_INSN_P (insn))
+ update_reg_and_insn_max_reg_pressure (insn);
+
+ /* Scheduling instruction should have all its dependencies resolved and
+ should have been removed from the ready list. */
+ gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
+
+ /* Reset debug insns invalidated by moving this insn. */
+ if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
+ for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
+ sd_iterator_cond (&sd_it, &dep);)
+ {
+ rtx dbg = DEP_PRO (dep);
+ struct reg_use_data *use, *next;
+
+ if (DEP_STATUS (dep) & DEP_CANCELLED)
+ {
+ sd_iterator_next (&sd_it);
+ continue;
+ }
+
+ gcc_assert (DEBUG_INSN_P (dbg));
+
+ if (sched_verbose >= 6)
+ fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
+ INSN_UID (dbg));
+
+ /* ??? Rather than resetting the debug insn, we might be able
+ to emit a debug temp before the just-scheduled insn, but
+ this would involve checking that the expression at the
+ point of the debug insn is equivalent to the expression
+ before the just-scheduled insn. They might not be: the
+ expression in the debug insn may depend on other insns not
+ yet scheduled that set MEMs, REGs or even other debug
+ insns. It's not clear that attempting to preserve debug
+ information in these cases is worth the effort, given how
+ uncommon these resets are and the likelihood that the debug
+ temps introduced won't survive the schedule change. */
+ INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
+ df_insn_rescan (dbg);
+
+ /* Unknown location doesn't use any registers. */
+ for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
+ {
+ struct reg_use_data *prev = use;
+
+ /* Remove use from the cyclic next_regno_use chain first. */
+ while (prev->next_regno_use != use)
+ prev = prev->next_regno_use;
+ prev->next_regno_use = use->next_regno_use;
+ next = use->next_insn_use;
+ free (use);
+ }
+ INSN_REG_USE_LIST (dbg) = NULL;
+
+ /* We delete rather than resolve these deps, otherwise we
+ crash in sched_free_deps(), because forward deps are
+ expected to be released before backward deps. */
+ sd_delete_dep (sd_it);
+ }
+
+ gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
+ QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
+
+ gcc_assert (INSN_TICK (insn) >= MIN_TICK);
+ if (INSN_TICK (insn) > clock_var)
+ /* INSN has been prematurely moved from the queue to the ready list.
+ This is possible only if following flag is set. */
+ gcc_assert (flag_sched_stalled_insns);
+
+ /* ??? Probably, if INSN is scheduled prematurely, we should leave
+ INSN_TICK untouched. This is a machine-dependent issue, actually. */
+ INSN_TICK (insn) = clock_var;
+
+ check_clobbered_conditions (insn);
+
+ /* Update dependent instructions. */
+ for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
+ sd_iterator_cond (&sd_it, &dep);)
+ {
+ rtx next = DEP_CON (dep);
+ bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
+
+ /* Resolve the dependence between INSN and NEXT.
+ sd_resolve_dep () moves current dep to another list thus
+ advancing the iterator. */
+ sd_resolve_dep (sd_it);
+
+ if (cancelled)
+ {
+ if (QUEUE_INDEX (next) != QUEUE_SCHEDULED)
+ {
+ int tick = INSN_TICK (next);
+ gcc_assert (ORIG_PAT (next) != NULL_RTX);
+ haifa_change_pattern (next, ORIG_PAT (next));
+ INSN_TICK (next) = tick;
+ if (sd_lists_empty_p (next, SD_LIST_BACK))
+ TODO_SPEC (next) = 0;
+ else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
+ TODO_SPEC (next) = HARD_DEP;
+ }
+ continue;
+ }
+
+ /* Don't bother trying to mark next as ready if insn is a debug
+ insn. If insn is the last hard dependency, it will have
+ already been discounted. */
+ if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
+ continue;
+
+ if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
+ {
+ int effective_cost;
+
+ effective_cost = try_ready (next);
+
+ if (effective_cost >= 0
+ && SCHED_GROUP_P (next)
+ && advance < effective_cost)
+ advance = effective_cost;
+ }
+ else
+ /* Check always has only one forward dependence (to the first insn in
+ the recovery block), therefore, this will be executed only once. */
+ {
+ gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
+ fix_recovery_deps (RECOVERY_BLOCK (insn));
+ }
+ }
+
+ /* Annotate the instruction with issue information -- TImode
+ indicates that the instruction is expected not to be able
+ to issue on the same cycle as the previous insn. A machine
+ may use this information to decide how the instruction should
+ be aligned. */
+ if (issue_rate > 1
+ && GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER
+ && !DEBUG_INSN_P (insn))
+ {
+ if (reload_completed)
+ PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
+ last_clock_var = clock_var;
+ }