+/* Threads edge E through E->dest to the edge THREAD_TARGET (E). Returns the
+ copy of E->dest created during threading, or E->dest if it was not necessary
+ to copy it (E is its single predecessor). */
+
+static basic_block
+thread_single_edge (edge e)
+{
+ basic_block bb = e->dest;
+ edge eto = THREAD_TARGET (e);
+ struct redirection_data rd;
+
+ free (e->aux);
+ e->aux = NULL;
+
+ thread_stats.num_threaded_edges++;
+
+ if (single_pred_p (bb))
+ {
+ /* If BB has just a single predecessor, we should only remove the
+ control statements at its end, and successors except for ETO. */
+ remove_ctrl_stmt_and_useless_edges (bb, eto->dest);
+
+ /* And fixup the flags on the single remaining edge. */
+ eto->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
+ eto->flags |= EDGE_FALLTHRU;
+
+ return bb;
+ }
+
+ /* Otherwise, we need to create a copy. */
+ if (e->dest == eto->src)
+ update_bb_profile_for_threading (bb, EDGE_FREQUENCY (e), e->count, eto);
+
+ rd.outgoing_edge = eto;
+
+ create_block_for_threading (bb, &rd);
+ remove_ctrl_stmt_and_useless_edges (rd.dup_block, NULL);
+ create_edge_and_update_destination_phis (&rd, rd.dup_block);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
+ e->src->index, e->dest->index, rd.dup_block->index);
+
+ rd.dup_block->count = e->count;
+ rd.dup_block->frequency = EDGE_FREQUENCY (e);
+ single_succ_edge (rd.dup_block)->count = e->count;
+ redirect_edge_and_branch (e, rd.dup_block);
+ flush_pending_stmts (e);
+
+ return rd.dup_block;
+}
+
+/* Callback for dfs_enumerate_from. Returns true if BB is different
+ from STOP and DBDS_CE_STOP. */
+
+static basic_block dbds_ce_stop;
+static bool
+dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
+{
+ return (bb != (const_basic_block) stop
+ && bb != dbds_ce_stop);
+}
+
+/* Evaluates the dominance relationship of latch of the LOOP and BB, and
+ returns the state. */
+
+enum bb_dom_status
+{
+ /* BB does not dominate latch of the LOOP. */
+ DOMST_NONDOMINATING,
+ /* The LOOP is broken (there is no path from the header to its latch. */
+ DOMST_LOOP_BROKEN,
+ /* BB dominates the latch of the LOOP. */
+ DOMST_DOMINATING
+};
+
+static enum bb_dom_status
+determine_bb_domination_status (struct loop *loop, basic_block bb)
+{
+ basic_block *bblocks;
+ unsigned nblocks, i;
+ bool bb_reachable = false;
+ edge_iterator ei;
+ edge e;
+
+ /* This function assumes BB is a successor of LOOP->header.
+ If that is not the case return DOMST_NONDOMINATING which
+ is always safe. */
+ {
+ bool ok = false;
+
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ {
+ if (e->src == loop->header)
+ {
+ ok = true;
+ break;
+ }
+ }
+
+ if (!ok)
+ return DOMST_NONDOMINATING;
+ }
+
+ if (bb == loop->latch)
+ return DOMST_DOMINATING;
+
+ /* Check that BB dominates LOOP->latch, and that it is back-reachable
+ from it. */
+
+ bblocks = XCNEWVEC (basic_block, loop->num_nodes);
+ dbds_ce_stop = loop->header;
+ nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
+ bblocks, loop->num_nodes, bb);
+ for (i = 0; i < nblocks; i++)
+ FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
+ {
+ if (e->src == loop->header)
+ {
+ free (bblocks);
+ return DOMST_NONDOMINATING;
+ }
+ if (e->src == bb)
+ bb_reachable = true;
+ }
+
+ free (bblocks);
+ return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
+}
+
+/* Thread jumps through the header of LOOP. Returns true if cfg changes.
+ If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
+ to the inside of the loop. */
+
+static bool
+thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
+{
+ basic_block header = loop->header;
+ edge e, tgt_edge, latch = loop_latch_edge (loop);
+ edge_iterator ei;
+ basic_block tgt_bb, atgt_bb;
+ enum bb_dom_status domst;
+
+ /* We have already threaded through headers to exits, so all the threading
+ requests now are to the inside of the loop. We need to avoid creating
+ irreducible regions (i.e., loops with more than one entry block), and
+ also loop with several latch edges, or new subloops of the loop (although
+ there are cases where it might be appropriate, it is difficult to decide,
+ and doing it wrongly may confuse other optimizers).
+
+ We could handle more general cases here. However, the intention is to
+ preserve some information about the loop, which is impossible if its
+ structure changes significantly, in a way that is not well understood.
+ Thus we only handle few important special cases, in which also updating
+ of the loop-carried information should be feasible:
+
+ 1) Propagation of latch edge to a block that dominates the latch block
+ of a loop. This aims to handle the following idiom:
+
+ first = 1;
+ while (1)
+ {
+ if (first)
+ initialize;
+ first = 0;
+ body;
+ }
+
+ After threading the latch edge, this becomes
+
+ first = 1;
+ if (first)
+ initialize;
+ while (1)
+ {
+ first = 0;
+ body;
+ }
+
+ The original header of the loop is moved out of it, and we may thread
+ the remaining edges through it without further constraints.
+
+ 2) All entry edges are propagated to a single basic block that dominates
+ the latch block of the loop. This aims to handle the following idiom
+ (normally created for "for" loops):
+
+ i = 0;
+ while (1)
+ {
+ if (i >= 100)
+ break;
+ body;
+ i++;
+ }
+
+ This becomes
+
+ i = 0;
+ while (1)
+ {
+ body;
+ i++;
+ if (i >= 100)
+ break;
+ }
+ */
+
+ /* Threading through the header won't improve the code if the header has just
+ one successor. */
+ if (single_succ_p (header))
+ goto fail;
+
+ if (latch->aux)
+ {
+ if (THREAD_TARGET2 (latch))
+ goto fail;
+ tgt_edge = THREAD_TARGET (latch);
+ tgt_bb = tgt_edge->dest;
+ }
+ else if (!may_peel_loop_headers
+ && !redirection_block_p (loop->header))
+ goto fail;
+ else
+ {
+ tgt_bb = NULL;
+ tgt_edge = NULL;
+ FOR_EACH_EDGE (e, ei, header->preds)
+ {
+ if (!e->aux)
+ {
+ if (e == latch)
+ continue;
+
+ /* If latch is not threaded, and there is a header
+ edge that is not threaded, we would create loop
+ with multiple entries. */
+ goto fail;
+ }