+
+ regstat_free_calls_crossed ();
+
+ dfa_finish ();
+
+#ifdef ENABLE_CHECKING
+ /* After reload ia64 backend clobbers CFG, so can't check anything. */
+ if (!reload_completed)
+ check_cfg (0, 0);
+#endif
+}
+
+/* Fix INSN_TICKs of the instructions in the current block as well as
+ INSN_TICKs of their dependents.
+ HEAD and TAIL are the begin and the end of the current scheduled block. */
+static void
+fix_inter_tick (rtx head, rtx tail)
+{
+ /* Set of instructions with corrected INSN_TICK. */
+ bitmap_head processed;
+ /* ??? It is doubtful if we should assume that cycle advance happens on
+ basic block boundaries. Basically insns that are unconditionally ready
+ on the start of the block are more preferable then those which have
+ a one cycle dependency over insn from the previous block. */
+ int next_clock = clock_var + 1;
+
+ bitmap_initialize (&processed, 0);
+
+ /* Iterates over scheduled instructions and fix their INSN_TICKs and
+ INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
+ across different blocks. */
+ for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
+ {
+ if (INSN_P (head))
+ {
+ int tick;
+ sd_iterator_def sd_it;
+ dep_t dep;
+
+ tick = INSN_TICK (head);
+ gcc_assert (tick >= MIN_TICK);
+
+ /* Fix INSN_TICK of instruction from just scheduled block. */
+ if (!bitmap_bit_p (&processed, INSN_LUID (head)))
+ {
+ bitmap_set_bit (&processed, INSN_LUID (head));
+ tick -= next_clock;
+
+ if (tick < MIN_TICK)
+ tick = MIN_TICK;
+
+ INSN_TICK (head) = tick;
+ }
+
+ FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
+ {
+ rtx next;
+
+ next = DEP_CON (dep);
+ tick = INSN_TICK (next);
+
+ if (tick != INVALID_TICK
+ /* If NEXT has its INSN_TICK calculated, fix it.
+ If not - it will be properly calculated from
+ scratch later in fix_tick_ready. */
+ && !bitmap_bit_p (&processed, INSN_LUID (next)))
+ {
+ bitmap_set_bit (&processed, INSN_LUID (next));
+ tick -= next_clock;
+
+ if (tick < MIN_TICK)
+ tick = MIN_TICK;
+
+ if (tick > INTER_TICK (next))
+ INTER_TICK (next) = tick;
+ else
+ tick = INTER_TICK (next);
+
+ INSN_TICK (next) = tick;
+ }
+ }
+ }
+ }
+ bitmap_clear (&processed);
+}
+
+static int haifa_speculate_insn (rtx, ds_t, rtx *);
+
+/* Check if NEXT is ready to be added to the ready or queue list.
+ If "yes", add it to the proper list.
+ Returns:
+ -1 - is not ready yet,
+ 0 - added to the ready list,
+ 0 < N - queued for N cycles. */
+int
+try_ready (rtx next)
+{
+ ds_t old_ts, *ts;
+
+ ts = &TODO_SPEC (next);
+ old_ts = *ts;
+
+ gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
+ && ((old_ts & HARD_DEP)
+ || (old_ts & SPECULATIVE)));
+
+ if (sd_lists_empty_p (next, SD_LIST_BACK))
+ /* NEXT has all its dependencies resolved. */
+ {
+ /* Remove HARD_DEP bit from NEXT's status. */
+ *ts &= ~HARD_DEP;
+
+ if (current_sched_info->flags & DO_SPECULATION)
+ /* Remove all speculative bits from NEXT's status. */
+ *ts &= ~SPECULATIVE;
+ }
+ else
+ {
+ /* One of the NEXT's dependencies has been resolved.
+ Recalculate NEXT's status. */
+
+ *ts &= ~SPECULATIVE & ~HARD_DEP;
+
+ if (sd_lists_empty_p (next, SD_LIST_HARD_BACK))
+ /* Now we've got NEXT with speculative deps only.
+ 1. Look at the deps to see what we have to do.
+ 2. Check if we can do 'todo'. */
+ {
+ sd_iterator_def sd_it;
+ dep_t dep;
+ bool first_p = true;
+
+ FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
+ {
+ ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
+
+ if (DEBUG_INSN_P (DEP_PRO (dep))
+ && !DEBUG_INSN_P (next))
+ continue;
+
+ if (first_p)
+ {
+ first_p = false;
+
+ *ts = ds;
+ }
+ else
+ *ts = ds_merge (*ts, ds);
+ }
+
+ if (ds_weak (*ts) < spec_info->data_weakness_cutoff)
+ /* Too few points. */
+ *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
+ }
+ else
+ *ts |= HARD_DEP;
+ }
+
+ if (*ts & HARD_DEP)
+ gcc_assert (*ts == old_ts
+ && QUEUE_INDEX (next) == QUEUE_NOWHERE);
+ else if (current_sched_info->new_ready)
+ *ts = current_sched_info->new_ready (next, *ts);
+
+ /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
+ have its original pattern or changed (speculative) one. This is due
+ to changing ebb in region scheduling.
+ * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
+ has speculative pattern.
+
+ We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
+ control-speculative NEXT could have been discarded by sched-rgn.c
+ (the same case as when discarded by can_schedule_ready_p ()). */
+
+ if ((*ts & SPECULATIVE)
+ /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
+ need to change anything. */
+ && *ts != old_ts)
+ {
+ int res;
+ rtx new_pat;
+
+ gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
+
+ res = haifa_speculate_insn (next, *ts, &new_pat);
+
+ switch (res)
+ {
+ case -1:
+ /* It would be nice to change DEP_STATUS of all dependences,
+ which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
+ so we won't reanalyze anything. */
+ *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
+ break;
+
+ case 0:
+ /* We follow the rule, that every speculative insn
+ has non-null ORIG_PAT. */
+ if (!ORIG_PAT (next))
+ ORIG_PAT (next) = PATTERN (next);
+ break;
+
+ case 1:
+ if (!ORIG_PAT (next))
+ /* If we gonna to overwrite the original pattern of insn,
+ save it. */
+ ORIG_PAT (next) = PATTERN (next);
+
+ haifa_change_pattern (next, new_pat);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ /* We need to restore pattern only if (*ts == 0), because otherwise it is
+ either correct (*ts & SPECULATIVE),
+ or we simply don't care (*ts & HARD_DEP). */
+
+ gcc_assert (!ORIG_PAT (next)
+ || !IS_SPECULATION_BRANCHY_CHECK_P (next));
+
+ if (*ts & HARD_DEP)
+ {
+ /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
+ control-speculative NEXT could have been discarded by sched-rgn.c
+ (the same case as when discarded by can_schedule_ready_p ()). */
+ /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
+
+ change_queue_index (next, QUEUE_NOWHERE);
+ return -1;
+ }
+ else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
+ /* We should change pattern of every previously speculative
+ instruction - and we determine if NEXT was speculative by using
+ ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
+ pat too, so skip them. */
+ {
+ haifa_change_pattern (next, ORIG_PAT (next));
+ ORIG_PAT (next) = 0;
+ }
+
+ if (sched_verbose >= 2)
+ {
+ int s = TODO_SPEC (next);
+
+ fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
+ (*current_sched_info->print_insn) (next, 0));
+
+ if (spec_info && spec_info->dump)
+ {
+ if (s & BEGIN_DATA)
+ fprintf (spec_info->dump, "; data-spec;");
+ if (s & BEGIN_CONTROL)
+ fprintf (spec_info->dump, "; control-spec;");
+ if (s & BE_IN_CONTROL)
+ fprintf (spec_info->dump, "; in-control-spec;");
+ }
+
+ fprintf (sched_dump, "\n");
+ }
+
+ adjust_priority (next);
+
+ return fix_tick_ready (next);
+}
+
+/* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
+static int
+fix_tick_ready (rtx next)
+{
+ int tick, delay;
+
+ if (!sd_lists_empty_p (next, SD_LIST_RES_BACK))
+ {
+ int full_p;
+ sd_iterator_def sd_it;
+ dep_t dep;
+
+ tick = INSN_TICK (next);
+ /* if tick is not equal to INVALID_TICK, then update
+ INSN_TICK of NEXT with the most recent resolved dependence
+ cost. Otherwise, recalculate from scratch. */
+ full_p = (tick == INVALID_TICK);
+
+ FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
+ {
+ rtx pro = DEP_PRO (dep);
+ int tick1;
+
+ gcc_assert (INSN_TICK (pro) >= MIN_TICK);
+
+ tick1 = INSN_TICK (pro) + dep_cost (dep);
+ if (tick1 > tick)
+ tick = tick1;
+
+ if (!full_p)
+ break;
+ }
+ }
+ else
+ tick = -1;
+
+ INSN_TICK (next) = tick;
+
+ delay = tick - clock_var;
+ if (delay <= 0 || sched_pressure_p)
+ delay = QUEUE_READY;
+
+ change_queue_index (next, delay);
+
+ return delay;
+}
+
+/* Move NEXT to the proper queue list with (DELAY >= 1),
+ or add it to the ready list (DELAY == QUEUE_READY),
+ or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
+static void
+change_queue_index (rtx next, int delay)
+{
+ int i = QUEUE_INDEX (next);
+
+ gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
+ && delay != 0);
+ gcc_assert (i != QUEUE_SCHEDULED);
+
+ if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
+ || (delay < 0 && delay == i))
+ /* We have nothing to do. */
+ return;
+
+ /* Remove NEXT from wherever it is now. */
+ if (i == QUEUE_READY)
+ ready_remove_insn (next);
+ else if (i >= 0)
+ queue_remove (next);
+
+ /* Add it to the proper place. */
+ if (delay == QUEUE_READY)
+ ready_add (readyp, next, false);
+ else if (delay >= 1)
+ queue_insn (next, delay);
+
+ if (sched_verbose >= 2)
+ {
+ fprintf (sched_dump, ";;\t\ttick updated: insn %s",
+ (*current_sched_info->print_insn) (next, 0));
+
+ if (delay == QUEUE_READY)
+ fprintf (sched_dump, " into ready\n");
+ else if (delay >= 1)
+ fprintf (sched_dump, " into queue with cost=%d\n", delay);
+ else
+ fprintf (sched_dump, " removed from ready or queue lists\n");
+ }
+}
+
+static int sched_ready_n_insns = -1;
+
+/* Initialize per region data structures. */
+void
+sched_extend_ready_list (int new_sched_ready_n_insns)
+{
+ int i;
+
+ if (sched_ready_n_insns == -1)
+ /* At the first call we need to initialize one more choice_stack
+ entry. */
+ {
+ i = 0;
+ sched_ready_n_insns = 0;
+ }
+ else
+ i = sched_ready_n_insns + 1;
+
+ ready.veclen = new_sched_ready_n_insns + issue_rate;
+ ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
+
+ gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
+
+ ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns,
+ sched_ready_n_insns, sizeof (*ready_try));
+
+ /* We allocate +1 element to save initial state in the choice_stack[0]
+ entry. */
+ choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
+ new_sched_ready_n_insns + 1);
+
+ for (; i <= new_sched_ready_n_insns; i++)
+ choice_stack[i].state = xmalloc (dfa_state_size);
+
+ sched_ready_n_insns = new_sched_ready_n_insns;
+}
+
+/* Free per region data structures. */
+void
+sched_finish_ready_list (void)
+{
+ int i;
+
+ free (ready.vec);
+ ready.vec = NULL;
+ ready.veclen = 0;
+
+ free (ready_try);
+ ready_try = NULL;
+
+ for (i = 0; i <= sched_ready_n_insns; i++)
+ free (choice_stack [i].state);
+ free (choice_stack);
+ choice_stack = NULL;
+
+ sched_ready_n_insns = -1;
+}
+
+static int
+haifa_luid_for_non_insn (rtx x)
+{
+ gcc_assert (NOTE_P (x) || LABEL_P (x));
+
+ return 0;
+}
+
+/* Generates recovery code for INSN. */
+static void
+generate_recovery_code (rtx insn)
+{
+ if (TODO_SPEC (insn) & BEGIN_SPEC)
+ begin_speculative_block (insn);
+
+ /* Here we have insn with no dependencies to
+ instructions other then CHECK_SPEC ones. */
+
+ if (TODO_SPEC (insn) & BE_IN_SPEC)
+ add_to_speculative_block (insn);
+}
+
+/* Helper function.
+ Tries to add speculative dependencies of type FS between instructions
+ in deps_list L and TWIN. */
+static void
+process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
+{
+ sd_iterator_def sd_it;
+ dep_t dep;
+
+ FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
+ {
+ ds_t ds;
+ rtx consumer;
+
+ consumer = DEP_CON (dep);
+
+ ds = DEP_STATUS (dep);
+
+ if (/* If we want to create speculative dep. */
+ fs
+ /* And we can do that because this is a true dep. */
+ && (ds & DEP_TYPES) == DEP_TRUE)
+ {
+ gcc_assert (!(ds & BE_IN_SPEC));
+
+ if (/* If this dep can be overcome with 'begin speculation'. */
+ ds & BEGIN_SPEC)
+ /* Then we have a choice: keep the dep 'begin speculative'
+ or transform it into 'be in speculative'. */
+ {
+ if (/* In try_ready we assert that if insn once became ready
+ it can be removed from the ready (or queue) list only
+ due to backend decision. Hence we can't let the
+ probability of the speculative dep to decrease. */
+ ds_weak (ds) <= ds_weak (fs))
+ {
+ ds_t new_ds;
+
+ new_ds = (ds & ~BEGIN_SPEC) | fs;
+
+ if (/* consumer can 'be in speculative'. */
+ sched_insn_is_legitimate_for_speculation_p (consumer,
+ new_ds))
+ /* Transform it to be in speculative. */
+ ds = new_ds;
+ }
+ }
+ else
+ /* Mark the dep as 'be in speculative'. */
+ ds |= fs;
+ }
+
+ {
+ dep_def _new_dep, *new_dep = &_new_dep;
+
+ init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
+ sd_add_dep (new_dep, false);
+ }
+ }
+}
+
+/* Generates recovery code for BEGIN speculative INSN. */
+static void
+begin_speculative_block (rtx insn)
+{
+ if (TODO_SPEC (insn) & BEGIN_DATA)
+ nr_begin_data++;
+ if (TODO_SPEC (insn) & BEGIN_CONTROL)
+ nr_begin_control++;
+
+ create_check_block_twin (insn, false);
+
+ TODO_SPEC (insn) &= ~BEGIN_SPEC;
+}
+
+static void haifa_init_insn (rtx);
+
+/* Generates recovery code for BE_IN speculative INSN. */
+static void
+add_to_speculative_block (rtx insn)
+{
+ ds_t ts;
+ sd_iterator_def sd_it;
+ dep_t dep;
+ rtx twins = NULL;
+ rtx_vec_t priorities_roots;
+
+ ts = TODO_SPEC (insn);
+ gcc_assert (!(ts & ~BE_IN_SPEC));
+
+ if (ts & BE_IN_DATA)
+ nr_be_in_data++;
+ if (ts & BE_IN_CONTROL)
+ nr_be_in_control++;
+
+ TODO_SPEC (insn) &= ~BE_IN_SPEC;
+ gcc_assert (!TODO_SPEC (insn));
+
+ DONE_SPEC (insn) |= ts;
+
+ /* First we convert all simple checks to branchy. */
+ for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
+ sd_iterator_cond (&sd_it, &dep);)
+ {
+ rtx check = DEP_PRO (dep);
+
+ if (IS_SPECULATION_SIMPLE_CHECK_P (check))
+ {
+ create_check_block_twin (check, true);
+
+ /* Restart search. */
+ sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
+ }
+ else
+ /* Continue search. */
+ sd_iterator_next (&sd_it);
+ }
+
+ priorities_roots = NULL;
+ clear_priorities (insn, &priorities_roots);
+
+ while (1)
+ {
+ rtx check, twin;
+ basic_block rec;
+
+ /* Get the first backward dependency of INSN. */
+ sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
+ if (!sd_iterator_cond (&sd_it, &dep))
+ /* INSN has no backward dependencies left. */
+ break;
+
+ gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
+ && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
+ && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
+
+ check = DEP_PRO (dep);
+
+ gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
+ && QUEUE_INDEX (check) == QUEUE_NOWHERE);
+
+ rec = BLOCK_FOR_INSN (check);
+
+ twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
+ haifa_init_insn (twin);
+
+ sd_copy_back_deps (twin, insn, true);
+
+ if (sched_verbose && spec_info->dump)
+ /* INSN_BB (insn) isn't determined for twin insns yet.
+ So we can't use current_sched_info->print_insn. */
+ fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
+ INSN_UID (twin), rec->index);
+
+ twins = alloc_INSN_LIST (twin, twins);
+
+ /* Add dependences between TWIN and all appropriate
+ instructions from REC. */
+ FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
+ {
+ rtx pro = DEP_PRO (dep);
+
+ gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
+
+ /* INSN might have dependencies from the instructions from
+ several recovery blocks. At this iteration we process those
+ producers that reside in REC. */
+ if (BLOCK_FOR_INSN (pro) == rec)
+ {
+ dep_def _new_dep, *new_dep = &_new_dep;
+
+ init_dep (new_dep, pro, twin, REG_DEP_TRUE);
+ sd_add_dep (new_dep, false);
+ }
+ }
+
+ process_insn_forw_deps_be_in_spec (insn, twin, ts);
+
+ /* Remove all dependencies between INSN and insns in REC. */
+ for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
+ sd_iterator_cond (&sd_it, &dep);)
+ {
+ rtx pro = DEP_PRO (dep);
+
+ if (BLOCK_FOR_INSN (pro) == rec)
+ sd_delete_dep (sd_it);
+ else
+ sd_iterator_next (&sd_it);
+ }
+ }
+
+ /* We couldn't have added the dependencies between INSN and TWINS earlier
+ because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
+ while (twins)
+ {
+ rtx twin;
+
+ twin = XEXP (twins, 0);
+
+ {
+ dep_def _new_dep, *new_dep = &_new_dep;
+
+ init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
+ sd_add_dep (new_dep, false);
+ }
+
+ twin = XEXP (twins, 1);
+ free_INSN_LIST_node (twins);
+ twins = twin;
+ }
+
+ calc_priorities (priorities_roots);
+ VEC_free (rtx, heap, priorities_roots);
+}
+
+/* Extends and fills with zeros (only the new part) array pointed to by P. */
+void *
+xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
+{
+ gcc_assert (new_nmemb >= old_nmemb);
+ p = XRESIZEVAR (void, p, new_nmemb * size);
+ memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
+ return p;
+}
+
+/* Helper function.
+ Find fallthru edge from PRED. */
+edge
+find_fallthru_edge (basic_block pred)
+{
+ edge e;
+ edge_iterator ei;
+ basic_block succ;
+
+ succ = pred->next_bb;
+ gcc_assert (succ->prev_bb == pred);
+
+ if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
+ {
+ FOR_EACH_EDGE (e, ei, pred->succs)
+ if (e->flags & EDGE_FALLTHRU)
+ {
+ gcc_assert (e->dest == succ);
+ return e;
+ }
+ }
+ else
+ {
+ FOR_EACH_EDGE (e, ei, succ->preds)
+ if (e->flags & EDGE_FALLTHRU)
+ {
+ gcc_assert (e->src == pred);
+ return e;
+ }
+ }
+
+ return NULL;
+}
+
+/* Extend per basic block data structures. */
+static void
+sched_extend_bb (void)
+{
+ rtx insn;
+
+ /* The following is done to keep current_sched_info->next_tail non null. */
+ insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
+ if (NEXT_INSN (insn) == 0
+ || (!NOTE_P (insn)
+ && !LABEL_P (insn)
+ /* Don't emit a NOTE if it would end up before a BARRIER. */
+ && !BARRIER_P (NEXT_INSN (insn))))
+ {
+ rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
+ /* Make insn appear outside BB. */
+ set_block_for_insn (note, NULL);
+ BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
+ }
+}
+
+/* Init per basic block data structures. */
+void
+sched_init_bbs (void)
+{
+ sched_extend_bb ();
+}
+
+/* Initialize BEFORE_RECOVERY variable. */
+static void
+init_before_recovery (basic_block *before_recovery_ptr)
+{
+ basic_block last;
+ edge e;
+
+ last = EXIT_BLOCK_PTR->prev_bb;
+ e = find_fallthru_edge (last);
+
+ if (e)
+ {
+ /* We create two basic blocks:
+ 1. Single instruction block is inserted right after E->SRC
+ and has jump to
+ 2. Empty block right before EXIT_BLOCK.
+ Between these two blocks recovery blocks will be emitted. */
+
+ basic_block single, empty;
+ rtx x, label;
+
+ /* If the fallthrough edge to exit we've found is from the block we've
+ created before, don't do anything more. */
+ if (last == after_recovery)
+ return;
+
+ adding_bb_to_current_region_p = false;
+
+ single = sched_create_empty_bb (last);
+ empty = sched_create_empty_bb (single);
+
+ /* Add new blocks to the root loop. */
+ if (current_loops != NULL)
+ {
+ add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
+ add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
+ }
+
+ single->count = last->count;
+ empty->count = last->count;
+ single->frequency = last->frequency;
+ empty->frequency = last->frequency;
+ BB_COPY_PARTITION (single, last);
+ BB_COPY_PARTITION (empty, last);
+
+ redirect_edge_succ (e, single);
+ make_single_succ_edge (single, empty, 0);
+ make_single_succ_edge (empty, EXIT_BLOCK_PTR,
+ EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
+
+ label = block_label (empty);
+ x = emit_jump_insn_after (gen_jump (label), BB_END (single));
+ JUMP_LABEL (x) = label;
+ LABEL_NUSES (label)++;
+ haifa_init_insn (x);
+
+ emit_barrier_after (x);
+
+ sched_init_only_bb (empty, NULL);
+ sched_init_only_bb (single, NULL);
+ sched_extend_bb ();
+
+ adding_bb_to_current_region_p = true;
+ before_recovery = single;
+ after_recovery = empty;
+
+ if (before_recovery_ptr)
+ *before_recovery_ptr = before_recovery;
+
+ if (sched_verbose >= 2 && spec_info->dump)
+ fprintf (spec_info->dump,
+ ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
+ last->index, single->index, empty->index);
+ }
+ else
+ before_recovery = last;
+}
+
+/* Returns new recovery block. */
+basic_block
+sched_create_recovery_block (basic_block *before_recovery_ptr)
+{
+ rtx label;
+ rtx barrier;
+ basic_block rec;
+
+ haifa_recovery_bb_recently_added_p = true;
+ haifa_recovery_bb_ever_added_p = true;
+
+ init_before_recovery (before_recovery_ptr);
+
+ barrier = get_last_bb_insn (before_recovery);
+ gcc_assert (BARRIER_P (barrier));
+
+ label = emit_label_after (gen_label_rtx (), barrier);
+
+ rec = create_basic_block (label, label, before_recovery);
+
+ /* A recovery block always ends with an unconditional jump. */
+ emit_barrier_after (BB_END (rec));
+
+ if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
+ BB_SET_PARTITION (rec, BB_COLD_PARTITION);
+
+ if (sched_verbose && spec_info->dump)
+ fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
+ rec->index);
+
+ return rec;
+}
+
+/* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
+ and emit necessary jumps. */
+void
+sched_create_recovery_edges (basic_block first_bb, basic_block rec,
+ basic_block second_bb)
+{
+ rtx label;
+ rtx jump;
+ int edge_flags;
+
+ /* This is fixing of incoming edge. */
+ /* ??? Which other flags should be specified? */
+ if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
+ /* Partition type is the same, if it is "unpartitioned". */
+ edge_flags = EDGE_CROSSING;
+ else
+ edge_flags = 0;
+
+ make_edge (first_bb, rec, edge_flags);
+ label = block_label (second_bb);
+ jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
+ JUMP_LABEL (jump) = label;
+ LABEL_NUSES (label)++;
+
+ if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
+ /* Partition type is the same, if it is "unpartitioned". */
+ {
+ /* Rewritten from cfgrtl.c. */
+ if (flag_reorder_blocks_and_partition
+ && targetm.have_named_sections)
+ {
+ /* We don't need the same note for the check because
+ any_condjump_p (check) == true. */
+ add_reg_note (jump, REG_CROSSING_JUMP, NULL_RTX);
+ }
+ edge_flags = EDGE_CROSSING;
+ }
+ else
+ edge_flags = 0;
+
+ make_single_succ_edge (rec, second_bb, edge_flags);
+}
+
+/* This function creates recovery code for INSN. If MUTATE_P is nonzero,
+ INSN is a simple check, that should be converted to branchy one. */
+static void
+create_check_block_twin (rtx insn, bool mutate_p)
+{
+ basic_block rec;
+ rtx label, check, twin;
+ ds_t fs;
+ sd_iterator_def sd_it;
+ dep_t dep;
+ dep_def _new_dep, *new_dep = &_new_dep;
+ ds_t todo_spec;
+
+ gcc_assert (ORIG_PAT (insn) != NULL_RTX);
+
+ if (!mutate_p)
+ todo_spec = TODO_SPEC (insn);
+ else
+ {
+ gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
+ && (TODO_SPEC (insn) & SPECULATIVE) == 0);
+
+ todo_spec = CHECK_SPEC (insn);
+ }
+
+ todo_spec &= SPECULATIVE;
+
+ /* Create recovery block. */
+ if (mutate_p || targetm.sched.needs_block_p (todo_spec))
+ {
+ rec = sched_create_recovery_block (NULL);
+ label = BB_HEAD (rec);
+ }
+ else
+ {
+ rec = EXIT_BLOCK_PTR;
+ label = NULL_RTX;
+ }
+
+ /* Emit CHECK. */
+ check = targetm.sched.gen_spec_check (insn, label, todo_spec);
+
+ if (rec != EXIT_BLOCK_PTR)
+ {
+ /* To have mem_reg alive at the beginning of second_bb,
+ we emit check BEFORE insn, so insn after splitting
+ insn will be at the beginning of second_bb, which will
+ provide us with the correct life information. */
+ check = emit_jump_insn_before (check, insn);
+ JUMP_LABEL (check) = label;
+ LABEL_NUSES (label)++;
+ }
+ else
+ check = emit_insn_before (check, insn);
+
+ /* Extend data structures. */
+ haifa_init_insn (check);
+
+ /* CHECK is being added to current region. Extend ready list. */
+ gcc_assert (sched_ready_n_insns != -1);
+ sched_extend_ready_list (sched_ready_n_insns + 1);
+
+ if (current_sched_info->add_remove_insn)
+ current_sched_info->add_remove_insn (insn, 0);
+
+ RECOVERY_BLOCK (check) = rec;
+
+ if (sched_verbose && spec_info->dump)
+ fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
+ (*current_sched_info->print_insn) (check, 0));
+
+ gcc_assert (ORIG_PAT (insn));
+
+ /* Initialize TWIN (twin is a duplicate of original instruction
+ in the recovery block). */
+ if (rec != EXIT_BLOCK_PTR)
+ {
+ sd_iterator_def sd_it;
+ dep_t dep;
+
+ FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
+ if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
+ {
+ struct _dep _dep2, *dep2 = &_dep2;
+
+ init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
+
+ sd_add_dep (dep2, true);
+ }
+
+ twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
+ haifa_init_insn (twin);
+
+ if (sched_verbose && spec_info->dump)
+ /* INSN_BB (insn) isn't determined for twin insns yet.
+ So we can't use current_sched_info->print_insn. */
+ fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
+ INSN_UID (twin), rec->index);
+ }
+ else
+ {
+ ORIG_PAT (check) = ORIG_PAT (insn);
+ HAS_INTERNAL_DEP (check) = 1;
+ twin = check;
+ /* ??? We probably should change all OUTPUT dependencies to
+ (TRUE | OUTPUT). */
+ }
+
+ /* Copy all resolved back dependencies of INSN to TWIN. This will
+ provide correct value for INSN_TICK (TWIN). */
+ sd_copy_back_deps (twin, insn, true);
+
+ if (rec != EXIT_BLOCK_PTR)
+ /* In case of branchy check, fix CFG. */
+ {
+ basic_block first_bb, second_bb;
+ rtx jump;
+
+ first_bb = BLOCK_FOR_INSN (check);
+ second_bb = sched_split_block (first_bb, check);
+
+ sched_create_recovery_edges (first_bb, rec, second_bb);
+
+ sched_init_only_bb (second_bb, first_bb);
+ sched_init_only_bb (rec, EXIT_BLOCK_PTR);
+
+ jump = BB_END (rec);
+ haifa_init_insn (jump);
+ }
+
+ /* Move backward dependences from INSN to CHECK and
+ move forward dependences from INSN to TWIN. */
+
+ /* First, create dependencies between INSN's producers and CHECK & TWIN. */
+ FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
+ {
+ rtx pro = DEP_PRO (dep);
+ ds_t ds;
+
+ /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
+ check --TRUE--> producer ??? or ANTI ???
+ twin --TRUE--> producer
+ twin --ANTI--> check
+
+ If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
+ check --ANTI--> producer
+ twin --ANTI--> producer
+ twin --ANTI--> check
+
+ If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
+ check ~~TRUE~~> producer
+ twin ~~TRUE~~> producer
+ twin --ANTI--> check */
+
+ ds = DEP_STATUS (dep);
+
+ if (ds & BEGIN_SPEC)
+ {
+ gcc_assert (!mutate_p);
+ ds &= ~BEGIN_SPEC;
+ }
+
+ init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
+ sd_add_dep (new_dep, false);
+
+ if (rec != EXIT_BLOCK_PTR)
+ {
+ DEP_CON (new_dep) = twin;
+ sd_add_dep (new_dep, false);
+ }
+ }
+
+ /* Second, remove backward dependencies of INSN. */
+ for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
+ sd_iterator_cond (&sd_it, &dep);)
+ {
+ if ((DEP_STATUS (dep) & BEGIN_SPEC)
+ || mutate_p)
+ /* We can delete this dep because we overcome it with
+ BEGIN_SPECULATION. */
+ sd_delete_dep (sd_it);
+ else
+ sd_iterator_next (&sd_it);
+ }
+
+ /* Future Speculations. Determine what BE_IN speculations will be like. */
+ fs = 0;
+
+ /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
+ here. */
+
+ gcc_assert (!DONE_SPEC (insn));
+
+ if (!mutate_p)
+ {
+ ds_t ts = TODO_SPEC (insn);
+
+ DONE_SPEC (insn) = ts & BEGIN_SPEC;
+ CHECK_SPEC (check) = ts & BEGIN_SPEC;
+
+ /* Luckiness of future speculations solely depends upon initial
+ BEGIN speculation. */
+ if (ts & BEGIN_DATA)
+ fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
+ if (ts & BEGIN_CONTROL)
+ fs = set_dep_weak (fs, BE_IN_CONTROL,
+ get_dep_weak (ts, BEGIN_CONTROL));
+ }
+ else
+ CHECK_SPEC (check) = CHECK_SPEC (insn);
+
+ /* Future speculations: call the helper. */
+ process_insn_forw_deps_be_in_spec (insn, twin, fs);
+
+ if (rec != EXIT_BLOCK_PTR)
+ {
+ /* Which types of dependencies should we use here is,
+ generally, machine-dependent question... But, for now,
+ it is not. */
+
+ if (!mutate_p)
+ {
+ init_dep (new_dep, insn, check, REG_DEP_TRUE);
+ sd_add_dep (new_dep, false);
+
+ init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
+ sd_add_dep (new_dep, false);
+ }
+ else
+ {
+ if (spec_info->dump)
+ fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
+ (*current_sched_info->print_insn) (insn, 0));
+
+ /* Remove all dependencies of the INSN. */
+ {
+ sd_it = sd_iterator_start (insn, (SD_LIST_FORW
+ | SD_LIST_BACK
+ | SD_LIST_RES_BACK));
+ while (sd_iterator_cond (&sd_it, &dep))
+ sd_delete_dep (sd_it);
+ }
+
+ /* If former check (INSN) already was moved to the ready (or queue)
+ list, add new check (CHECK) there too. */
+ if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
+ try_ready (check);
+
+ /* Remove old check from instruction stream and free its
+ data. */
+ sched_remove_insn (insn);
+ }
+
+ init_dep (new_dep, check, twin, REG_DEP_ANTI);
+ sd_add_dep (new_dep, false);
+ }
+ else
+ {
+ init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
+ sd_add_dep (new_dep, false);
+ }
+
+ if (!mutate_p)
+ /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
+ because it'll be done later in add_to_speculative_block. */
+ {
+ rtx_vec_t priorities_roots = NULL;
+
+ clear_priorities (twin, &priorities_roots);
+ calc_priorities (priorities_roots);
+ VEC_free (rtx, heap, priorities_roots);
+ }
+}
+
+/* Removes dependency between instructions in the recovery block REC
+ and usual region instructions. It keeps inner dependences so it
+ won't be necessary to recompute them. */
+static void
+fix_recovery_deps (basic_block rec)
+{
+ rtx note, insn, jump, ready_list = 0;
+ bitmap_head in_ready;
+ rtx link;
+
+ bitmap_initialize (&in_ready, 0);
+
+ /* NOTE - a basic block note. */
+ note = NEXT_INSN (BB_HEAD (rec));
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
+ insn = BB_END (rec);
+ gcc_assert (JUMP_P (insn));
+ insn = PREV_INSN (insn);
+
+ do
+ {
+ sd_iterator_def sd_it;
+ dep_t dep;
+
+ for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
+ sd_iterator_cond (&sd_it, &dep);)
+ {
+ rtx consumer = DEP_CON (dep);
+
+ if (BLOCK_FOR_INSN (consumer) != rec)
+ {
+ sd_delete_dep (sd_it);
+
+ if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
+ {
+ ready_list = alloc_INSN_LIST (consumer, ready_list);
+ bitmap_set_bit (&in_ready, INSN_LUID (consumer));
+ }
+ }
+ else
+ {
+ gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
+
+ sd_iterator_next (&sd_it);
+ }
+ }
+
+ insn = PREV_INSN (insn);
+ }
+ while (insn != note);
+
+ bitmap_clear (&in_ready);
+
+ /* Try to add instructions to the ready or queue list. */
+ for (link = ready_list; link; link = XEXP (link, 1))
+ try_ready (XEXP (link, 0));
+ free_INSN_LIST_list (&ready_list);
+
+ /* Fixing jump's dependences. */
+ insn = BB_HEAD (rec);
+ jump = BB_END (rec);
+
+ gcc_assert (LABEL_P (insn));
+ insn = NEXT_INSN (insn);
+
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
+ add_jump_dependencies (insn, jump);
+}
+
+/* Change pattern of INSN to NEW_PAT. */
+void
+sched_change_pattern (rtx insn, rtx new_pat)
+{
+ int t;
+
+ t = validate_change (insn, &PATTERN (insn), new_pat, 0);
+ gcc_assert (t);
+ dfa_clear_single_insn_cache (insn);
+}
+
+/* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
+ instruction data. */
+static void
+haifa_change_pattern (rtx insn, rtx new_pat)
+{
+ sched_change_pattern (insn, new_pat);
+
+ /* Invalidate INSN_COST, so it'll be recalculated. */
+ INSN_COST (insn) = -1;
+ /* Invalidate INSN_TICK, so it'll be recalculated. */
+ INSN_TICK (insn) = INVALID_TICK;
+}
+
+/* -1 - can't speculate,
+ 0 - for speculation with REQUEST mode it is OK to use
+ current instruction pattern,
+ 1 - need to change pattern for *NEW_PAT to be speculative. */
+int
+sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
+{
+ gcc_assert (current_sched_info->flags & DO_SPECULATION
+ && (request & SPECULATIVE)
+ && sched_insn_is_legitimate_for_speculation_p (insn, request));
+
+ if ((request & spec_info->mask) != request)
+ return -1;
+
+ if (request & BE_IN_SPEC
+ && !(request & BEGIN_SPEC))
+ return 0;
+
+ return targetm.sched.speculate_insn (insn, request, new_pat);
+}
+
+static int
+haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
+{
+ gcc_assert (sched_deps_info->generate_spec_deps
+ && !IS_SPECULATION_CHECK_P (insn));
+
+ if (HAS_INTERNAL_DEP (insn)
+ || SCHED_GROUP_P (insn))
+ return -1;
+
+ return sched_speculate_insn (insn, request, new_pat);
+}
+
+/* Print some information about block BB, which starts with HEAD and
+ ends with TAIL, before scheduling it.
+ I is zero, if scheduler is about to start with the fresh ebb. */
+static void
+dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
+{
+ if (!i)
+ fprintf (sched_dump,
+ ";; ======================================================\n");
+ else
+ fprintf (sched_dump,
+ ";; =====================ADVANCING TO=====================\n");
+ fprintf (sched_dump,
+ ";; -- basic block %d from %d to %d -- %s reload\n",
+ bb->index, INSN_UID (head), INSN_UID (tail),
+ (reload_completed ? "after" : "before"));
+ fprintf (sched_dump,
+ ";; ======================================================\n");
+ fprintf (sched_dump, "\n");
+}
+
+/* Unlink basic block notes and labels and saves them, so they
+ can be easily restored. We unlink basic block notes in EBB to
+ provide back-compatibility with the previous code, as target backends
+ assume, that there'll be only instructions between
+ current_sched_info->{head and tail}. We restore these notes as soon
+ as we can.
+ FIRST (LAST) is the first (last) basic block in the ebb.
+ NB: In usual case (FIRST == LAST) nothing is really done. */
+void
+unlink_bb_notes (basic_block first, basic_block last)
+{
+ /* We DON'T unlink basic block notes of the first block in the ebb. */
+ if (first == last)
+ return;
+
+ bb_header = XNEWVEC (rtx, last_basic_block);
+
+ /* Make a sentinel. */
+ if (last->next_bb != EXIT_BLOCK_PTR)
+ bb_header[last->next_bb->index] = 0;
+
+ first = first->next_bb;
+ do
+ {
+ rtx prev, label, note, next;
+
+ label = BB_HEAD (last);
+ if (LABEL_P (label))
+ note = NEXT_INSN (label);
+ else
+ note = label;
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
+
+ prev = PREV_INSN (label);
+ next = NEXT_INSN (note);
+ gcc_assert (prev && next);
+
+ NEXT_INSN (prev) = next;
+ PREV_INSN (next) = prev;
+
+ bb_header[last->index] = label;
+
+ if (last == first)
+ break;
+
+ last = last->prev_bb;
+ }
+ while (1);
+}
+
+/* Restore basic block notes.
+ FIRST is the first basic block in the ebb. */
+static void
+restore_bb_notes (basic_block first)
+{
+ if (!bb_header)
+ return;
+
+ /* We DON'T unlink basic block notes of the first block in the ebb. */
+ first = first->next_bb;
+ /* Remember: FIRST is actually a second basic block in the ebb. */
+
+ while (first != EXIT_BLOCK_PTR
+ && bb_header[first->index])
+ {
+ rtx prev, label, note, next;
+
+ label = bb_header[first->index];
+ prev = PREV_INSN (label);
+ next = NEXT_INSN (prev);
+
+ if (LABEL_P (label))
+ note = NEXT_INSN (label);
+ else
+ note = label;
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
+
+ bb_header[first->index] = 0;
+
+ NEXT_INSN (prev) = label;
+ NEXT_INSN (note) = next;
+ PREV_INSN (next) = note;
+
+ first = first->next_bb;
+ }
+
+ free (bb_header);
+ bb_header = 0;
+}
+
+/* Helper function.
+ Fix CFG after both in- and inter-block movement of
+ control_flow_insn_p JUMP. */
+static void
+fix_jump_move (rtx jump)
+{
+ basic_block bb, jump_bb, jump_bb_next;
+
+ bb = BLOCK_FOR_INSN (PREV_INSN (jump));
+ jump_bb = BLOCK_FOR_INSN (jump);
+ jump_bb_next = jump_bb->next_bb;
+
+ gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
+ || IS_SPECULATION_BRANCHY_CHECK_P (jump));
+
+ if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
+ /* if jump_bb_next is not empty. */
+ BB_END (jump_bb) = BB_END (jump_bb_next);
+
+ if (BB_END (bb) != PREV_INSN (jump))
+ /* Then there are instruction after jump that should be placed
+ to jump_bb_next. */
+ BB_END (jump_bb_next) = BB_END (bb);
+ else
+ /* Otherwise jump_bb_next is empty. */
+ BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
+
+ /* To make assertion in move_insn happy. */
+ BB_END (bb) = PREV_INSN (jump);
+
+ update_bb_for_insn (jump_bb_next);
+}
+
+/* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
+static void
+move_block_after_check (rtx jump)
+{
+ basic_block bb, jump_bb, jump_bb_next;
+ VEC(edge,gc) *t;
+
+ bb = BLOCK_FOR_INSN (PREV_INSN (jump));
+ jump_bb = BLOCK_FOR_INSN (jump);
+ jump_bb_next = jump_bb->next_bb;
+
+ update_bb_for_insn (jump_bb);
+
+ gcc_assert (IS_SPECULATION_CHECK_P (jump)
+ || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
+
+ unlink_block (jump_bb_next);
+ link_block (jump_bb_next, bb);
+
+ t = bb->succs;
+ bb->succs = 0;
+ move_succs (&(jump_bb->succs), bb);
+ move_succs (&(jump_bb_next->succs), jump_bb);
+ move_succs (&t, jump_bb_next);
+
+ df_mark_solutions_dirty ();
+
+ common_sched_info->fix_recovery_cfg
+ (bb->index, jump_bb->index, jump_bb_next->index);
+}
+
+/* Helper function for move_block_after_check.
+ This functions attaches edge vector pointed to by SUCCSP to
+ block TO. */
+static void
+move_succs (VEC(edge,gc) **succsp, basic_block to)
+{
+ edge e;
+ edge_iterator ei;
+
+ gcc_assert (to->succs == 0);
+
+ to->succs = *succsp;
+
+ FOR_EACH_EDGE (e, ei, to->succs)
+ e->src = to;
+
+ *succsp = 0;
+}
+
+/* Remove INSN from the instruction stream.
+ INSN should have any dependencies. */
+static void
+sched_remove_insn (rtx insn)
+{
+ sd_finish_insn (insn);
+
+ change_queue_index (insn, QUEUE_NOWHERE);
+ current_sched_info->add_remove_insn (insn, 1);
+ remove_insn (insn);
+}
+
+/* Clear priorities of all instructions, that are forward dependent on INSN.
+ Store in vector pointed to by ROOTS_PTR insns on which priority () should
+ be invoked to initialize all cleared priorities. */
+static void
+clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
+{
+ sd_iterator_def sd_it;
+ dep_t dep;
+ bool insn_is_root_p = true;
+
+ gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
+
+ FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
+ {
+ rtx pro = DEP_PRO (dep);
+
+ if (INSN_PRIORITY_STATUS (pro) >= 0
+ && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
+ {
+ /* If DEP doesn't contribute to priority then INSN itself should
+ be added to priority roots. */
+ if (contributes_to_priority_p (dep))
+ insn_is_root_p = false;
+
+ INSN_PRIORITY_STATUS (pro) = -1;
+ clear_priorities (pro, roots_ptr);
+ }
+ }
+
+ if (insn_is_root_p)
+ VEC_safe_push (rtx, heap, *roots_ptr, insn);
+}
+
+/* Recompute priorities of instructions, whose priorities might have been
+ changed. ROOTS is a vector of instructions whose priority computation will
+ trigger initialization of all cleared priorities. */
+static void
+calc_priorities (rtx_vec_t roots)
+{
+ int i;
+ rtx insn;
+
+ for (i = 0; VEC_iterate (rtx, roots, i, insn); i++)
+ priority (insn);
+}
+
+
+/* Add dependences between JUMP and other instructions in the recovery
+ block. INSN is the first insn the recovery block. */
+static void
+add_jump_dependencies (rtx insn, rtx jump)
+{
+ do
+ {
+ insn = NEXT_INSN (insn);
+ if (insn == jump)
+ break;
+
+ if (dep_list_size (insn) == 0)
+ {
+ dep_def _new_dep, *new_dep = &_new_dep;
+
+ init_dep (new_dep, insn, jump, REG_DEP_ANTI);
+ sd_add_dep (new_dep, false);
+ }
+ }
+ while (1);
+
+ gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
+}
+
+/* Return the NOTE_INSN_BASIC_BLOCK of BB. */
+rtx
+bb_note (basic_block bb)
+{
+ rtx note;
+
+ note = BB_HEAD (bb);
+ if (LABEL_P (note))
+ note = NEXT_INSN (note);
+
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
+ return note;
+}
+
+#ifdef ENABLE_CHECKING
+/* Helper function for check_cfg.
+ Return nonzero, if edge vector pointed to by EL has edge with TYPE in
+ its flags. */
+static int
+has_edge_p (VEC(edge,gc) *el, int type)
+{
+ edge e;
+ edge_iterator ei;
+
+ FOR_EACH_EDGE (e, ei, el)
+ if (e->flags & type)
+ return 1;
+ return 0;
+}
+
+/* Search back, starting at INSN, for an insn that is not a
+ NOTE_INSN_VAR_LOCATION. Don't search beyond HEAD, and return it if
+ no such insn can be found. */
+static inline rtx
+prev_non_location_insn (rtx insn, rtx head)
+{
+ while (insn != head && NOTE_P (insn)
+ && NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION)
+ insn = PREV_INSN (insn);
+
+ return insn;
+}
+
+/* Check few properties of CFG between HEAD and TAIL.
+ If HEAD (TAIL) is NULL check from the beginning (till the end) of the
+ instruction stream. */
+static void
+check_cfg (rtx head, rtx tail)
+{
+ rtx next_tail;
+ basic_block bb = 0;
+ int not_first = 0, not_last;
+
+ if (head == NULL)
+ head = get_insns ();
+ if (tail == NULL)
+ tail = get_last_insn ();
+ next_tail = NEXT_INSN (tail);
+
+ do
+ {
+ not_last = head != tail;
+
+ if (not_first)
+ gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
+ if (not_last)
+ gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
+
+ if (LABEL_P (head)
+ || (NOTE_INSN_BASIC_BLOCK_P (head)
+ && (!not_first
+ || (not_first && !LABEL_P (PREV_INSN (head))))))
+ {
+ gcc_assert (bb == 0);
+ bb = BLOCK_FOR_INSN (head);
+ if (bb != 0)
+ gcc_assert (BB_HEAD (bb) == head);
+ else
+ /* This is the case of jump table. See inside_basic_block_p (). */
+ gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
+ }
+
+ if (bb == 0)
+ {
+ gcc_assert (!inside_basic_block_p (head));
+ head = NEXT_INSN (head);
+ }
+ else
+ {
+ gcc_assert (inside_basic_block_p (head)
+ || NOTE_P (head));
+ gcc_assert (BLOCK_FOR_INSN (head) == bb);
+
+ if (LABEL_P (head))
+ {
+ head = NEXT_INSN (head);
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
+ }
+ else
+ {
+ if (control_flow_insn_p (head))
+ {
+ gcc_assert (prev_non_location_insn (BB_END (bb), head)
+ == head);
+
+ if (any_uncondjump_p (head))
+ gcc_assert (EDGE_COUNT (bb->succs) == 1
+ && BARRIER_P (NEXT_INSN (head)));
+ else if (any_condjump_p (head))
+ gcc_assert (/* Usual case. */
+ (EDGE_COUNT (bb->succs) > 1
+ && !BARRIER_P (NEXT_INSN (head)))
+ /* Or jump to the next instruction. */
+ || (EDGE_COUNT (bb->succs) == 1
+ && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
+ == JUMP_LABEL (head))));
+ }
+ if (BB_END (bb) == head)
+ {
+ if (EDGE_COUNT (bb->succs) > 1)
+ gcc_assert (control_flow_insn_p (prev_non_location_insn
+ (head, BB_HEAD (bb)))
+ || has_edge_p (bb->succs, EDGE_COMPLEX));
+ bb = 0;
+ }
+
+ head = NEXT_INSN (head);
+ }
+ }
+
+ not_first = 1;
+ }
+ while (head != next_tail);
+
+ gcc_assert (bb == 0);
+}
+
+#endif /* ENABLE_CHECKING */
+
+/* Extend per basic block data structures. */
+static void
+extend_bb (void)
+{
+ if (sched_scan_info->extend_bb)
+ sched_scan_info->extend_bb ();
+}
+
+/* Init data for BB. */
+static void
+init_bb (basic_block bb)
+{
+ if (sched_scan_info->init_bb)
+ sched_scan_info->init_bb (bb);
+}
+
+/* Extend per insn data structures. */
+static void
+extend_insn (void)
+{
+ if (sched_scan_info->extend_insn)
+ sched_scan_info->extend_insn ();
+}
+
+/* Init data structures for INSN. */
+static void
+init_insn (rtx insn)
+{
+ if (sched_scan_info->init_insn)
+ sched_scan_info->init_insn (insn);
+}
+
+/* Init all insns in BB. */
+static void
+init_insns_in_bb (basic_block bb)
+{
+ rtx insn;
+
+ FOR_BB_INSNS (bb, insn)
+ init_insn (insn);
+}
+
+/* A driver function to add a set of basic blocks (BBS),
+ a single basic block (BB), a set of insns (INSNS) or a single insn (INSN)
+ to the scheduling region. */
+void
+sched_scan (const struct sched_scan_info_def *ssi,
+ bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
+{
+ sched_scan_info = ssi;
+
+ if (bbs != NULL || bb != NULL)
+ {
+ extend_bb ();
+
+ if (bbs != NULL)
+ {
+ unsigned i;
+ basic_block x;
+
+ for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++)
+ init_bb (x);
+ }
+
+ if (bb != NULL)
+ init_bb (bb);
+ }
+
+ extend_insn ();
+
+ if (bbs != NULL)
+ {
+ unsigned i;
+ basic_block x;
+
+ for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++)
+ init_insns_in_bb (x);
+ }
+
+ if (bb != NULL)
+ init_insns_in_bb (bb);
+
+ if (insns != NULL)
+ {
+ unsigned i;
+ rtx x;
+
+ for (i = 0; VEC_iterate (rtx, insns, i, x); i++)
+ init_insn (x);
+ }
+
+ if (insn != NULL)
+ init_insn (insn);
+}
+
+
+/* Extend data structures for logical insn UID. */
+static void
+luids_extend_insn (void)
+{
+ int new_luids_max_uid = get_max_uid () + 1;
+
+ VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid);
+}
+
+/* Initialize LUID for INSN. */
+static void
+luids_init_insn (rtx insn)
+{
+ int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
+ int luid;
+
+ if (i >= 0)
+ {
+ luid = sched_max_luid;
+ sched_max_luid += i;
+ }
+ else
+ luid = -1;
+
+ SET_INSN_LUID (insn, luid);
+}
+
+/* Initialize luids for BBS, BB, INSNS and INSN.
+ The hook common_sched_info->luid_for_non_insn () is used to determine
+ if notes, labels, etc. need luids. */
+void
+sched_init_luids (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
+{
+ const struct sched_scan_info_def ssi =
+ {
+ NULL, /* extend_bb */
+ NULL, /* init_bb */
+ luids_extend_insn, /* extend_insn */
+ luids_init_insn /* init_insn */
+ };
+
+ sched_scan (&ssi, bbs, bb, insns, insn);
+}
+
+/* Free LUIDs. */
+void
+sched_finish_luids (void)
+{
+ VEC_free (int, heap, sched_luids);
+ sched_max_luid = 1;
+}
+
+/* Return logical uid of INSN. Helpful while debugging. */
+int
+insn_luid (rtx insn)
+{
+ return INSN_LUID (insn);
+}
+
+/* Extend per insn data in the target. */
+void
+sched_extend_target (void)
+{
+ if (targetm.sched.h_i_d_extended)
+ targetm.sched.h_i_d_extended ();
+}
+
+/* Extend global scheduler structures (those, that live across calls to
+ schedule_block) to include information about just emitted INSN. */
+static void
+extend_h_i_d (void)
+{
+ int reserve = (get_max_uid () + 1
+ - VEC_length (haifa_insn_data_def, h_i_d));
+ if (reserve > 0
+ && ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
+ {
+ VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
+ 3 * get_max_uid () / 2);
+ sched_extend_target ();
+ }
+}
+
+/* Initialize h_i_d entry of the INSN with default values.
+ Values, that are not explicitly initialized here, hold zero. */
+static void
+init_h_i_d (rtx insn)
+{
+ if (INSN_LUID (insn) > 0)
+ {
+ INSN_COST (insn) = -1;
+ QUEUE_INDEX (insn) = QUEUE_NOWHERE;
+ INSN_TICK (insn) = INVALID_TICK;
+ INTER_TICK (insn) = INVALID_TICK;
+ TODO_SPEC (insn) = HARD_DEP;
+ }
+}
+
+/* Initialize haifa_insn_data for BBS, BB, INSNS and INSN. */
+void
+haifa_init_h_i_d (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
+{
+ const struct sched_scan_info_def ssi =
+ {
+ NULL, /* extend_bb */
+ NULL, /* init_bb */
+ extend_h_i_d, /* extend_insn */
+ init_h_i_d /* init_insn */
+ };
+
+ sched_scan (&ssi, bbs, bb, insns, insn);
+}
+
+/* Finalize haifa_insn_data. */
+void
+haifa_finish_h_i_d (void)
+{
+ int i;
+ haifa_insn_data_t data;
+ struct reg_use_data *use, *next;
+
+ for (i = 0; VEC_iterate (haifa_insn_data_def, h_i_d, i, data); i++)
+ {
+ if (data->reg_pressure != NULL)
+ free (data->reg_pressure);
+ for (use = data->reg_use_list; use != NULL; use = next)
+ {
+ next = use->next_insn_use;
+ free (use);
+ }
+ }
+ VEC_free (haifa_insn_data_def, heap, h_i_d);
+}
+
+/* Init data for the new insn INSN. */
+static void
+haifa_init_insn (rtx insn)
+{
+ gcc_assert (insn != NULL);
+
+ sched_init_luids (NULL, NULL, NULL, insn);
+ sched_extend_target ();
+ sched_deps_init (false);
+ haifa_init_h_i_d (NULL, NULL, NULL, insn);
+
+ if (adding_bb_to_current_region_p)
+ {
+ sd_init_insn (insn);
+
+ /* Extend dependency caches by one element. */
+ extend_dependency_caches (1, false);
+ }
+}
+
+/* Init data for the new basic block BB which comes after AFTER. */
+static void
+haifa_init_only_bb (basic_block bb, basic_block after)
+{
+ gcc_assert (bb != NULL);
+
+ sched_init_bbs ();
+
+ if (common_sched_info->add_block)
+ /* This changes only data structures of the front-end. */
+ common_sched_info->add_block (bb, after);
+}
+
+/* A generic version of sched_split_block (). */
+basic_block
+sched_split_block_1 (basic_block first_bb, rtx after)
+{
+ edge e;
+
+ e = split_block (first_bb, after);
+ gcc_assert (e->src == first_bb);
+
+ /* sched_split_block emits note if *check == BB_END. Probably it
+ is better to rip that note off. */
+
+ return e->dest;
+}
+
+/* A generic version of sched_create_empty_bb (). */
+basic_block
+sched_create_empty_bb_1 (basic_block after)
+{
+ return create_empty_bb (after);
+}
+
+/* Insert PAT as an INSN into the schedule and update the necessary data
+ structures to account for it. */
+rtx
+sched_emit_insn (rtx pat)
+{
+ rtx insn = emit_insn_after (pat, last_scheduled_insn);
+ last_scheduled_insn = insn;
+ haifa_init_insn (insn);
+ return insn;
+}
+