/* IRA allocation based on graph coloring.
- Copyright (C) 2006, 2007, 2008
+ Copyright (C) 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
Contributed by Vladimir Makarov <vmakarov@redhat.com>.
\f
+/* This page contains functions used to find conflicts using allocno
+ live ranges. */
+
+/* Return TRUE if live ranges of allocnos A1 and A2 intersect. It is
+ used to find a conflict for new allocnos or allocnos with the
+ different cover classes. */
+static bool
+allocnos_have_intersected_live_ranges_p (ira_allocno_t a1, ira_allocno_t a2)
+{
+ if (a1 == a2)
+ return false;
+ if (ALLOCNO_REG (a1) != NULL && ALLOCNO_REG (a2) != NULL
+ && (ORIGINAL_REGNO (ALLOCNO_REG (a1))
+ == ORIGINAL_REGNO (ALLOCNO_REG (a2))))
+ return false;
+ return ira_allocno_live_ranges_intersect_p (ALLOCNO_LIVE_RANGES (a1),
+ ALLOCNO_LIVE_RANGES (a2));
+}
+
+#ifdef ENABLE_IRA_CHECKING
+
+/* Return TRUE if live ranges of pseudo-registers REGNO1 and REGNO2
+ intersect. This should be used when there is only one region.
+ Currently this is used during reload. */
+static bool
+pseudos_have_intersected_live_ranges_p (int regno1, int regno2)
+{
+ ira_allocno_t a1, a2;
+
+ ira_assert (regno1 >= FIRST_PSEUDO_REGISTER
+ && regno2 >= FIRST_PSEUDO_REGISTER);
+ /* Reg info caclulated by dataflow infrastructure can be different
+ from one calculated by regclass. */
+ if ((a1 = ira_loop_tree_root->regno_allocno_map[regno1]) == NULL
+ || (a2 = ira_loop_tree_root->regno_allocno_map[regno2]) == NULL)
+ return false;
+ return allocnos_have_intersected_live_ranges_p (a1, a2);
+}
+
+#endif
+
+\f
+
/* This page contains functions used to choose hard registers for
allocnos. */
else
gcc_unreachable ();
- if (cover_class != ALLOCNO_COVER_CLASS (another_allocno)
+ cover_class = ALLOCNO_COVER_CLASS (another_allocno);
+ if (! ira_reg_classes_intersect_p[rclass][cover_class]
|| ALLOCNO_ASSIGNED_P (another_allocno))
continue;
cost = (cp->second == allocno
- ? ira_register_move_cost[mode][rclass][cover_class]
- : ira_register_move_cost[mode][cover_class][rclass]);
+ ? ira_get_register_move_cost (mode, rclass, cover_class)
+ : ira_get_register_move_cost (mode, cover_class, rclass));
if (decr_p)
cost = -cost;
(&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
cover_class, 0,
ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
+ i = ira_class_hard_reg_index[cover_class][hard_regno];
+ ira_assert (i >= 0);
ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno)[i] += update_cost;
ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno)[i]
+= update_cost;
while (get_next_update_cost (&allocno, &divisor));
}
-/* This function updates COSTS (decrease if DECR_P) by conflict costs
- of the unassigned allocnos connected by copies with allocnos in
- update_cost_queue. This update increases chances to remove some
- copies. */
+/* This function updates COSTS (decrease if DECR_P) for hard_registers
+ of COVER_CLASS by conflict costs of the unassigned allocnos
+ connected by copies with allocnos in update_cost_queue. This
+ update increases chances to remove some copies. */
static void
-update_conflict_hard_regno_costs (int *costs, bool decr_p)
+update_conflict_hard_regno_costs (int *costs, enum reg_class cover_class,
+ bool decr_p)
{
int i, cost, class_size, freq, mult, div, divisor;
+ int index, hard_regno;
int *conflict_costs;
bool cont_p;
- enum reg_class cover_class;
+ enum reg_class another_cover_class;
ira_allocno_t allocno, another_allocno;
ira_copy_t cp, next_cp;
}
else
gcc_unreachable ();
- cover_class = ALLOCNO_COVER_CLASS (allocno);
- if (cover_class != ALLOCNO_COVER_CLASS (another_allocno)
+ another_cover_class = ALLOCNO_COVER_CLASS (another_allocno);
+ if (! ira_reg_classes_intersect_p[cover_class][another_cover_class]
|| ALLOCNO_ASSIGNED_P (another_allocno)
- || ALLOCNO_MAY_BE_SPILLED_P (another_allocno))
+ || ALLOCNO_MAY_BE_SPILLED_P (ALLOCNO_FIRST_COALESCED_ALLOCNO
+ (another_allocno)))
continue;
- class_size = ira_class_hard_regs_num[cover_class];
+ class_size = ira_class_hard_regs_num[another_cover_class];
ira_allocate_and_copy_costs
(&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
- cover_class, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
+ another_cover_class,
+ ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
conflict_costs
= ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno);
if (conflict_costs == NULL)
cont_p = false;
for (i = class_size - 1; i >= 0; i--)
{
+ hard_regno = ira_class_hard_regs[another_cover_class][i];
+ ira_assert (hard_regno >= 0);
+ index = ira_class_hard_reg_index[cover_class][hard_regno];
+ if (index < 0)
+ continue;
cost = conflict_costs [i] * mult / div;
if (cost == 0)
continue;
cont_p = true;
if (decr_p)
cost = -cost;
- costs[i] += cost;
+ costs[index] += cost;
}
}
/* Probably 5 hops will be enough. */
assign_hard_reg (ira_allocno_t allocno, bool retry_p)
{
HARD_REG_SET conflicting_regs;
- int i, j, hard_regno, best_hard_regno, class_size;
+ int i, j, k, hard_regno, best_hard_regno, class_size;
int cost, mem_cost, min_cost, full_cost, min_full_cost, add_cost;
int *a_costs;
int *conflict_costs;
- enum reg_class cover_class, rclass;
+ enum reg_class cover_class, rclass, conflict_cover_class;
enum machine_mode mode;
ira_allocno_t a, conflict_allocno;
ira_allocno_conflict_iterator aci;
if (retry_p || bitmap_bit_p (consideration_allocno_bitmap,
ALLOCNO_NUM (conflict_allocno)))
{
- ira_assert (cover_class == ALLOCNO_COVER_CLASS (conflict_allocno));
+ conflict_cover_class = ALLOCNO_COVER_CLASS (conflict_allocno);
+ ira_assert (ira_reg_classes_intersect_p
+ [cover_class][conflict_cover_class]);
if (allocno_coalesced_p)
{
if (bitmap_bit_p (processed_coalesced_allocno_bitmap,
}
if (ALLOCNO_ASSIGNED_P (conflict_allocno))
{
- if ((hard_regno = ALLOCNO_HARD_REGNO (conflict_allocno)) >= 0)
+ if ((hard_regno = ALLOCNO_HARD_REGNO (conflict_allocno)) >= 0
+ && ira_class_hard_reg_index[cover_class][hard_regno] >= 0)
{
IOR_HARD_REG_SET
(conflicting_regs,
conflicting_regs))
goto fail;
}
- continue;
}
- else if (! ALLOCNO_MAY_BE_SPILLED_P (conflict_allocno))
+ else if (! ALLOCNO_MAY_BE_SPILLED_P (ALLOCNO_FIRST_COALESCED_ALLOCNO
+ (conflict_allocno)))
{
ira_allocate_and_copy_costs
(&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_allocno),
- cover_class,
+ conflict_cover_class,
ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_allocno));
conflict_costs
= ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_allocno);
if (conflict_costs != NULL)
for (j = class_size - 1; j >= 0; j--)
- full_costs[j] -= conflict_costs[j];
+ {
+ hard_regno = ira_class_hard_regs[cover_class][j];
+ ira_assert (hard_regno >= 0);
+ k = (ira_class_hard_reg_index
+ [conflict_cover_class][hard_regno]);
+ if (k < 0)
+ continue;
+ full_costs[j] -= conflict_costs[k];
+ }
queue_update_cost (conflict_allocno, COST_HOP_DIVISOR);
}
}
}
/* Take into account preferences of allocnos connected by copies to
the conflict allocnos. */
- update_conflict_hard_regno_costs (full_costs, true);
+ update_conflict_hard_regno_costs (full_costs, cover_class, true);
/* Take preferences of allocnos connected by copies into
account. */
if (a == allocno)
break;
}
- update_conflict_hard_regno_costs (full_costs, false);
+ update_conflict_hard_regno_costs (full_costs, cover_class, false);
min_cost = min_full_cost = INT_MAX;
/* We don't care about giving callee saved registers to allocnos no
living through calls because call clobbered registers are
best_hard_regno = -1;
}
fail:
- if (best_hard_regno < 0
+ if (flag_ira_algorithm != IRA_ALGORITHM_PRIORITY
+ && best_hard_regno < 0
&& ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno) != allocno)
{
for (j = 0, a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
{
+ ira_assert (! ALLOCNO_IN_GRAPH_P (a));
sorted_allocnos[j++] = a;
if (a == allocno)
break;
}
- qsort (sorted_allocnos, j, sizeof (ira_allocno_t),
+ qsort (sorted_allocnos, j, sizeof (ira_allocno_t),
allocno_cost_compare_func);
for (i = 0; i < j; i++)
{
of given *cover* class in the uncolorable_bucket. */
static int uncolorable_allocnos_num[N_REG_CLASSES];
+/* Return the current spill priority of allocno A. The less the
+ number, the more preferable the allocno for spilling. */
+static int
+allocno_spill_priority (ira_allocno_t a)
+{
+ return (ALLOCNO_TEMP (a)
+ / (ALLOCNO_LEFT_CONFLICTS_SIZE (a)
+ * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a)][ALLOCNO_MODE (a)]
+ + 1));
+}
+
/* Add ALLOCNO to bucket *BUCKET_PTR. ALLOCNO should be not in a bucket
before the call. */
static void
-add_ira_allocno_to_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
+add_allocno_to_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
{
ira_allocno_t first_allocno;
enum reg_class cover_class;
their priority. ALLOCNO should be not in a bucket before the
call. */
static void
-add_ira_allocno_to_ordered_bucket (ira_allocno_t allocno,
- ira_allocno_t *bucket_ptr)
+add_allocno_to_ordered_bucket (ira_allocno_t allocno,
+ ira_allocno_t *bucket_ptr)
{
ira_allocno_t before, after;
enum reg_class cover_class;
conflicting allocnos from the uncolorable bucket to the colorable
one. */
static void
-push_ira_allocno_to_stack (ira_allocno_t allocno)
+push_allocno_to_stack (ira_allocno_t allocno)
{
- int conflicts_num, conflict_size, size;
+ int left_conflicts_size, conflict_size, size;
ira_allocno_t a, conflict_allocno;
enum reg_class cover_class;
ira_allocno_conflict_iterator aci;
-
+
ALLOCNO_IN_GRAPH_P (allocno) = false;
VEC_safe_push (ira_allocno_t, heap, allocno_stack_vec, allocno);
cover_class = ALLOCNO_COVER_CLASS (allocno);
a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
{
FOR_EACH_ALLOCNO_CONFLICT (a, conflict_allocno, aci)
- if (bitmap_bit_p (coloring_allocno_bitmap,
- ALLOCNO_NUM (conflict_allocno)))
- {
- ira_assert (cover_class == ALLOCNO_COVER_CLASS (conflict_allocno));
- if (allocno_coalesced_p)
- {
- if (bitmap_bit_p (processed_coalesced_allocno_bitmap,
- ALLOCNO_NUM (conflict_allocno)))
- continue;
- bitmap_set_bit (processed_coalesced_allocno_bitmap,
- ALLOCNO_NUM (conflict_allocno));
- }
- if (ALLOCNO_IN_GRAPH_P (conflict_allocno)
- && ! ALLOCNO_ASSIGNED_P (conflict_allocno))
- {
- conflicts_num = ALLOCNO_LEFT_CONFLICTS_NUM (conflict_allocno);
- conflict_size
- = (ira_reg_class_nregs
- [cover_class][ALLOCNO_MODE (conflict_allocno)]);
- ira_assert
- (ALLOCNO_LEFT_CONFLICTS_NUM (conflict_allocno) >= size);
- if (conflicts_num + conflict_size
- <= ALLOCNO_AVAILABLE_REGS_NUM (conflict_allocno))
- {
- ALLOCNO_LEFT_CONFLICTS_NUM (conflict_allocno) -= size;
+ {
+ conflict_allocno = ALLOCNO_FIRST_COALESCED_ALLOCNO (conflict_allocno);
+ if (bitmap_bit_p (coloring_allocno_bitmap,
+ ALLOCNO_NUM (conflict_allocno)))
+ {
+ ira_assert (cover_class
+ == ALLOCNO_COVER_CLASS (conflict_allocno));
+ if (allocno_coalesced_p)
+ {
+ if (bitmap_bit_p (processed_coalesced_allocno_bitmap,
+ ALLOCNO_NUM (conflict_allocno)))
continue;
- }
- conflicts_num
- = ALLOCNO_LEFT_CONFLICTS_NUM (conflict_allocno) - size;
- if (uncolorable_allocnos_splay_tree[cover_class] != NULL
- && !ALLOCNO_SPLAY_REMOVED_P (conflict_allocno)
- && USE_SPLAY_P (cover_class))
- {
- ira_assert
+ bitmap_set_bit (processed_coalesced_allocno_bitmap,
+ ALLOCNO_NUM (conflict_allocno));
+ }
+ if (ALLOCNO_IN_GRAPH_P (conflict_allocno)
+ && ! ALLOCNO_ASSIGNED_P (conflict_allocno))
+ {
+ left_conflicts_size
+ = ALLOCNO_LEFT_CONFLICTS_SIZE (conflict_allocno);
+ conflict_size
+ = (ira_reg_class_nregs
+ [cover_class][ALLOCNO_MODE (conflict_allocno)]);
+ ira_assert
+ (ALLOCNO_LEFT_CONFLICTS_SIZE (conflict_allocno) >= size);
+ if (left_conflicts_size + conflict_size
+ <= ALLOCNO_AVAILABLE_REGS_NUM (conflict_allocno))
+ {
+ ALLOCNO_LEFT_CONFLICTS_SIZE (conflict_allocno) -= size;
+ continue;
+ }
+ left_conflicts_size
+ = ALLOCNO_LEFT_CONFLICTS_SIZE (conflict_allocno) - size;
+ if (uncolorable_allocnos_splay_tree[cover_class] != NULL
+ && !ALLOCNO_SPLAY_REMOVED_P (conflict_allocno)
+ && USE_SPLAY_P (cover_class))
+ {
+ ira_assert
(splay_tree_lookup
(uncolorable_allocnos_splay_tree[cover_class],
(splay_tree_key) conflict_allocno) != NULL);
- splay_tree_remove
- (uncolorable_allocnos_splay_tree[cover_class],
- (splay_tree_key) conflict_allocno);
- ALLOCNO_SPLAY_REMOVED_P (conflict_allocno) = true;
- VEC_safe_push (ira_allocno_t, heap,
- removed_splay_allocno_vec,
- conflict_allocno);
- }
- ALLOCNO_LEFT_CONFLICTS_NUM (conflict_allocno) = conflicts_num;
- if (conflicts_num + conflict_size
- <= ALLOCNO_AVAILABLE_REGS_NUM (conflict_allocno))
- {
- delete_allocno_from_bucket (conflict_allocno,
- &uncolorable_allocno_bucket);
- add_ira_allocno_to_ordered_bucket (conflict_allocno,
- &colorable_allocno_bucket);
- }
- }
- }
+ splay_tree_remove
+ (uncolorable_allocnos_splay_tree[cover_class],
+ (splay_tree_key) conflict_allocno);
+ ALLOCNO_SPLAY_REMOVED_P (conflict_allocno) = true;
+ VEC_safe_push (ira_allocno_t, heap,
+ removed_splay_allocno_vec,
+ conflict_allocno);
+ }
+ ALLOCNO_LEFT_CONFLICTS_SIZE (conflict_allocno)
+ = left_conflicts_size;
+ if (left_conflicts_size + conflict_size
+ <= ALLOCNO_AVAILABLE_REGS_NUM (conflict_allocno))
+ {
+ delete_allocno_from_bucket
+ (conflict_allocno, &uncolorable_allocno_bucket);
+ add_allocno_to_ordered_bucket
+ (conflict_allocno, &colorable_allocno_bucket);
+ }
+ }
+ }
+ }
if (a == allocno)
break;
}
{
fprintf (ira_dump_file, " Pushing");
print_coalesced_allocno (allocno);
- fprintf (ira_dump_file, "%s\n", colorable_p ? "" : "(potential spill)");
+ if (colorable_p)
+ fprintf (ira_dump_file, "\n");
+ else
+ fprintf (ira_dump_file, "(potential spill: %spri=%d, cost=%d)\n",
+ ALLOCNO_BAD_SPILL_P (allocno) ? "bad spill, " : "",
+ allocno_spill_priority (allocno), ALLOCNO_TEMP (allocno));
}
cover_class = ALLOCNO_COVER_CLASS (allocno);
ira_assert ((colorable_p
- && (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
+ && (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
+ ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)]
<= ALLOCNO_AVAILABLE_REGS_NUM (allocno)))
|| (! colorable_p
- && (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
+ && (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
+ ira_reg_class_nregs[cover_class][ALLOCNO_MODE
(allocno)]
> ALLOCNO_AVAILABLE_REGS_NUM (allocno))));
if (! colorable_p)
ALLOCNO_MAY_BE_SPILLED_P (allocno) = true;
- push_ira_allocno_to_stack (allocno);
+ push_allocno_to_stack (allocno);
}
/* Put all allocnos from colorable bucket onto the coloring stack. */
/* Puts ALLOCNO chosen for potential spilling onto the coloring
stack. */
static void
-push_ira_allocno_to_spill (ira_allocno_t allocno)
+push_allocno_to_spill (ira_allocno_t allocno)
{
delete_allocno_from_bucket (allocno, &uncolorable_allocno_bucket);
ALLOCNO_MAY_BE_SPILLED_P (allocno) = true;
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
- fprintf (ira_dump_file, " Pushing p%d(%d) (potential spill)\n",
+ fprintf (ira_dump_file, " Pushing p%d(%d) (spill for NO_REGS)\n",
ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno));
- push_ira_allocno_to_stack (allocno);
+ push_allocno_to_stack (allocno);
}
/* Return the frequency of exit edges (if EXIT_P) or entry from/to the
- loop given by its LOOP_NODE. */
+ loop given by its LOOP_NODE. */
int
ira_loop_edge_freq (ira_loop_tree_node_t loop_node, int regno, bool exit_p)
{
* ira_loop_edge_freq (loop_node, regno, true)
+ ira_memory_move_cost[mode][rclass][0]
* ira_loop_edge_freq (loop_node, regno, false))
- - (ira_register_move_cost[mode][rclass][rclass]
+ - (ira_get_register_move_cost (mode, rclass, rclass)
* (ira_loop_edge_freq (loop_node, regno, false)
+ ira_loop_edge_freq (loop_node, regno, true))));
return cost;
{
int pri1, pri2, diff;
ira_allocno_t a1 = (ira_allocno_t) k1, a2 = (ira_allocno_t) k2;
-
+
pri1 = (ALLOCNO_TEMP (a1)
- / (ALLOCNO_LEFT_CONFLICTS_NUM (a1)
+ / (ALLOCNO_LEFT_CONFLICTS_SIZE (a1)
* ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a1)][ALLOCNO_MODE (a1)]
+ 1));
pri2 = (ALLOCNO_TEMP (a2)
- / (ALLOCNO_LEFT_CONFLICTS_NUM (a2)
+ / (ALLOCNO_LEFT_CONFLICTS_SIZE (a2)
* ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a2)][ALLOCNO_MODE (a2)]
+ 1));
if ((diff = pri1 - pri2) != 0)
cover_class = ALLOCNO_COVER_CLASS (allocno);
if (cover_class == NO_REGS)
{
- push_ira_allocno_to_spill (allocno);
+ push_allocno_to_spill (allocno);
continue;
}
/* Potential spilling. */
allocno = VEC_pop (ira_allocno_t, removed_splay_allocno_vec);
ALLOCNO_SPLAY_REMOVED_P (allocno) = false;
rclass = ALLOCNO_COVER_CLASS (allocno);
- if (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
+ if (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
+ ira_reg_class_nregs [rclass][ALLOCNO_MODE (allocno)]
> ALLOCNO_AVAILABLE_REGS_NUM (allocno))
splay_tree_insert
if (ALLOCNO_IN_GRAPH_P (i_allocno))
{
i++;
- if (ALLOCNO_TEMP (i_allocno) == INT_MAX)
- {
- ira_allocno_t a;
- int cost = 0;
-
- for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (i_allocno);;
- a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
- {
- cost += calculate_allocno_spill_cost (i_allocno);
- if (a == i_allocno)
- break;
- }
- /* ??? Remove cost of copies between the coalesced
- allocnos. */
- ALLOCNO_TEMP (i_allocno) = cost;
- }
+ ira_assert (ALLOCNO_TEMP (i_allocno) != INT_MAX);
i_allocno_cost = ALLOCNO_TEMP (i_allocno);
- i_allocno_pri
- = (i_allocno_cost
- / (ALLOCNO_LEFT_CONFLICTS_NUM (i_allocno)
- * ira_reg_class_nregs[ALLOCNO_COVER_CLASS
- (i_allocno)]
- [ALLOCNO_MODE (i_allocno)] + 1));
- if (allocno == NULL || allocno_pri > i_allocno_pri
- || (allocno_pri == i_allocno_pri
- && (allocno_cost > i_allocno_cost
- || (allocno_cost == i_allocno_cost
- && (ALLOCNO_NUM (allocno)
- > ALLOCNO_NUM (i_allocno))))))
+ i_allocno_pri = allocno_spill_priority (i_allocno);
+ if (allocno == NULL
+ || (! ALLOCNO_BAD_SPILL_P (i_allocno)
+ && ALLOCNO_BAD_SPILL_P (allocno))
+ || (! (ALLOCNO_BAD_SPILL_P (i_allocno)
+ && ! ALLOCNO_BAD_SPILL_P (allocno))
+ && (allocno_pri > i_allocno_pri
+ || (allocno_pri == i_allocno_pri
+ && (allocno_cost > i_allocno_cost
+ || (allocno_cost == i_allocno_cost
+ && (ALLOCNO_NUM (allocno)
+ > ALLOCNO_NUM (i_allocno))))))))
{
allocno = i_allocno;
allocno_cost = i_allocno_cost;
}
ira_assert (ALLOCNO_IN_GRAPH_P (allocno)
&& ALLOCNO_COVER_CLASS (allocno) == cover_class
- && (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
+ && (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
+ ira_reg_class_nregs[cover_class][ALLOCNO_MODE
(allocno)]
> ALLOCNO_AVAILABLE_REGS_NUM (allocno)));
static void
setup_allocno_available_regs_num (ira_allocno_t allocno)
{
- int i, n, hard_regs_num;
+ int i, n, hard_regs_num, hard_regno;
+ enum machine_mode mode;
enum reg_class cover_class;
ira_allocno_t a;
HARD_REG_SET temp_set;
if (a == allocno)
break;
}
+ mode = ALLOCNO_MODE (allocno);
for (n = 0, i = hard_regs_num - 1; i >= 0; i--)
- if (TEST_HARD_REG_BIT (temp_set, ira_class_hard_regs[cover_class][i]))
- n++;
+ {
+ hard_regno = ira_class_hard_regs[cover_class][i];
+ if (TEST_HARD_REG_BIT (temp_set, hard_regno)
+ || TEST_HARD_REG_BIT (prohibited_class_mode_regs[cover_class][mode],
+ hard_regno))
+ n++;
+ }
if (internal_flag_ira_verbose > 2 && n > 0 && ira_dump_file != NULL)
fprintf (ira_dump_file, " Reg %d of %s has %d regs less\n",
ALLOCNO_REGNO (allocno), reg_class_names[cover_class], n);
ALLOCNO_AVAILABLE_REGS_NUM (allocno) -= n;
}
-/* Set up ALLOCNO_LEFT_CONFLICTS_NUM for ALLOCNO. */
+/* Set up ALLOCNO_LEFT_CONFLICTS_SIZE for ALLOCNO. */
static void
-setup_allocno_left_conflicts_num (ira_allocno_t allocno)
+setup_allocno_left_conflicts_size (ira_allocno_t allocno)
{
int i, hard_regs_num, hard_regno, conflict_allocnos_size;
ira_allocno_t a, conflict_allocno;
a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
{
FOR_EACH_ALLOCNO_CONFLICT (a, conflict_allocno, aci)
- if (bitmap_bit_p (consideration_allocno_bitmap,
- ALLOCNO_NUM (conflict_allocno)))
- {
- ira_assert (cover_class
- == ALLOCNO_COVER_CLASS (conflict_allocno));
- if (allocno_coalesced_p)
- {
- if (bitmap_bit_p (processed_coalesced_allocno_bitmap,
- ALLOCNO_NUM (conflict_allocno)))
- continue;
- bitmap_set_bit (processed_coalesced_allocno_bitmap,
- ALLOCNO_NUM (conflict_allocno));
- }
- if (! ALLOCNO_ASSIGNED_P (conflict_allocno))
- conflict_allocnos_size
- += (ira_reg_class_nregs
- [cover_class][ALLOCNO_MODE (conflict_allocno)]);
- else if ((hard_regno = ALLOCNO_HARD_REGNO (conflict_allocno))
- >= 0)
- {
- int last = (hard_regno
- + hard_regno_nregs
+ {
+ conflict_allocno
+ = ALLOCNO_FIRST_COALESCED_ALLOCNO (conflict_allocno);
+ if (bitmap_bit_p (consideration_allocno_bitmap,
+ ALLOCNO_NUM (conflict_allocno)))
+ {
+ ira_assert (cover_class
+ == ALLOCNO_COVER_CLASS (conflict_allocno));
+ if (allocno_coalesced_p)
+ {
+ if (bitmap_bit_p (processed_coalesced_allocno_bitmap,
+ ALLOCNO_NUM (conflict_allocno)))
+ continue;
+ bitmap_set_bit (processed_coalesced_allocno_bitmap,
+ ALLOCNO_NUM (conflict_allocno));
+ }
+ if (! ALLOCNO_ASSIGNED_P (conflict_allocno))
+ conflict_allocnos_size
+ += (ira_reg_class_nregs
+ [cover_class][ALLOCNO_MODE (conflict_allocno)]);
+ else if ((hard_regno = ALLOCNO_HARD_REGNO (conflict_allocno))
+ >= 0)
+ {
+ int last = (hard_regno
+ + hard_regno_nregs
[hard_regno][ALLOCNO_MODE (conflict_allocno)]);
-
- while (hard_regno < last)
- {
- if (! TEST_HARD_REG_BIT (temp_set, hard_regno))
- {
- conflict_allocnos_size++;
- SET_HARD_REG_BIT (temp_set, hard_regno);
- }
- hard_regno++;
- }
- }
- }
+
+ while (hard_regno < last)
+ {
+ if (! TEST_HARD_REG_BIT (temp_set, hard_regno))
+ {
+ conflict_allocnos_size++;
+ SET_HARD_REG_BIT (temp_set, hard_regno);
+ }
+ hard_regno++;
+ }
+ }
+ }
+ }
if (a == allocno)
break;
}
- ALLOCNO_LEFT_CONFLICTS_NUM (allocno) = conflict_allocnos_size;
+ ALLOCNO_LEFT_CONFLICTS_SIZE (allocno) = conflict_allocnos_size;
}
/* Put ALLOCNO in a bucket corresponding to its number and size of its
static void
put_allocno_into_bucket (ira_allocno_t allocno)
{
- int hard_regs_num;
enum reg_class cover_class;
cover_class = ALLOCNO_COVER_CLASS (allocno);
- hard_regs_num = ira_class_hard_regs_num[cover_class];
if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno)
return;
ALLOCNO_IN_GRAPH_P (allocno) = true;
- setup_allocno_left_conflicts_num (allocno);
+ setup_allocno_left_conflicts_size (allocno);
setup_allocno_available_regs_num (allocno);
- if (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
+ if (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
+ ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)]
<= ALLOCNO_AVAILABLE_REGS_NUM (allocno))
- add_ira_allocno_to_bucket (allocno, &colorable_allocno_bucket);
+ add_allocno_to_bucket (allocno, &colorable_allocno_bucket);
else
- add_ira_allocno_to_bucket (allocno, &uncolorable_allocno_bucket);
+ add_allocno_to_bucket (allocno, &uncolorable_allocno_bucket);
}
/* The function is used to sort allocnos according to their execution
conflict_allocno
= ALLOCNO_NEXT_COALESCED_ALLOCNO (conflict_allocno))
{
- if (ira_allocno_live_ranges_intersect_p (a, conflict_allocno))
+ if (allocnos_have_intersected_live_ranges_p (a,
+ conflict_allocno))
return true;
if (conflict_allocno == a1)
break;
{
next_cp = cp->next_first_allocno_copy;
regno = ALLOCNO_REGNO (cp->second);
+ /* For priority coloring we coalesce allocnos only with
+ the same cover class not with intersected cover
+ classes as it were possible. It is done for
+ simplicity. */
if ((reload_p
|| (ALLOCNO_COVER_CLASS (cp->second) == cover_class
&& ALLOCNO_MODE (cp->second) == mode))
- && cp->insn != NULL
+ && (cp->insn != NULL || cp->constraint_p)
&& ((! reload_p && ! ALLOCNO_ASSIGNED_P (cp->second))
|| (reload_p
&& ALLOCNO_ASSIGNED_P (cp->second)
ira_free (sorted_copies);
}
+/* Map: allocno number -> allocno priority. */
+static int *allocno_priorities;
+
+/* Set up priorities for N allocnos in array
+ CONSIDERATION_ALLOCNOS. */
+static void
+setup_allocno_priorities (ira_allocno_t *consideration_allocnos, int n)
+{
+ int i, length, nrefs, priority, max_priority, mult;
+ ira_allocno_t a;
+
+ max_priority = 0;
+ for (i = 0; i < n; i++)
+ {
+ a = consideration_allocnos[i];
+ nrefs = ALLOCNO_NREFS (a);
+ ira_assert (nrefs >= 0);
+ mult = floor_log2 (ALLOCNO_NREFS (a)) + 1;
+ ira_assert (mult >= 0);
+ allocno_priorities[ALLOCNO_NUM (a)]
+ = priority
+ = (mult
+ * (ALLOCNO_MEMORY_COST (a) - ALLOCNO_COVER_CLASS_COST (a))
+ * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a)][ALLOCNO_MODE (a)]);
+ if (priority < 0)
+ priority = -priority;
+ if (max_priority < priority)
+ max_priority = priority;
+ }
+ mult = max_priority == 0 ? 1 : INT_MAX / max_priority;
+ for (i = 0; i < n; i++)
+ {
+ a = consideration_allocnos[i];
+ length = ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
+ if (length <= 0)
+ length = 1;
+ allocno_priorities[ALLOCNO_NUM (a)]
+ = allocno_priorities[ALLOCNO_NUM (a)] * mult / length;
+ }
+}
+
+/* Sort allocnos according to their priorities which are calculated
+ analogous to ones in file `global.c'. */
+static int
+allocno_priority_compare_func (const void *v1p, const void *v2p)
+{
+ ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
+ ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
+ int pri1, pri2;
+
+ pri1 = allocno_priorities[ALLOCNO_NUM (a1)];
+ pri2 = allocno_priorities[ALLOCNO_NUM (a2)];
+ if (pri2 - pri1)
+ return pri2 - pri1;
+
+ /* If regs are equally good, sort by allocnos, so that the results of
+ qsort leave nothing to chance. */
+ return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
+}
+
/* Chaitin-Briggs coloring for allocnos in COLORING_ALLOCNO_BITMAP
taking into account allocnos in CONSIDERATION_ALLOCNO_BITMAP. */
static void
color_allocnos (void)
{
- unsigned int i;
+ unsigned int i, n;
bitmap_iterator bi;
ira_allocno_t a;
processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
if (flag_ira_coalesce)
coalesce_allocnos (false);
- /* Put the allocnos into the corresponding buckets. */
- colorable_allocno_bucket = NULL;
- uncolorable_allocno_bucket = NULL;
- EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
+ if (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
{
- a = ira_allocnos[i];
- if (ALLOCNO_COVER_CLASS (a) == NO_REGS)
+ n = 0;
+ EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
{
- ALLOCNO_HARD_REGNO (a) = -1;
- ALLOCNO_ASSIGNED_P (a) = true;
- ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
- ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
- if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
+ a = ira_allocnos[i];
+ if (ALLOCNO_COVER_CLASS (a) == NO_REGS)
{
- fprintf (ira_dump_file, " Spill");
- print_coalesced_allocno (a);
- fprintf (ira_dump_file, "\n");
+ ALLOCNO_HARD_REGNO (a) = -1;
+ ALLOCNO_ASSIGNED_P (a) = true;
+ ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
+ ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
+ if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
+ {
+ fprintf (ira_dump_file, " Spill");
+ print_coalesced_allocno (a);
+ fprintf (ira_dump_file, "\n");
+ }
+ continue;
}
- continue;
+ sorted_allocnos[n++] = a;
+ }
+ if (n != 0)
+ {
+ setup_allocno_priorities (sorted_allocnos, n);
+ qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
+ allocno_priority_compare_func);
+ for (i = 0; i < n; i++)
+ {
+ a = sorted_allocnos[i];
+ if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
+ {
+ fprintf (ira_dump_file, " ");
+ print_coalesced_allocno (a);
+ fprintf (ira_dump_file, " -- ");
+ }
+ if (assign_hard_reg (a, false))
+ {
+ if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
+ fprintf (ira_dump_file, "assign hard reg %d\n",
+ ALLOCNO_HARD_REGNO (a));
+ }
+ else
+ {
+ if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
+ fprintf (ira_dump_file, "assign memory\n");
+ }
+ }
+ }
+ }
+ else
+ {
+ /* Put the allocnos into the corresponding buckets. */
+ colorable_allocno_bucket = NULL;
+ uncolorable_allocno_bucket = NULL;
+ EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
+ {
+ a = ira_allocnos[i];
+ if (ALLOCNO_COVER_CLASS (a) == NO_REGS)
+ {
+ ALLOCNO_HARD_REGNO (a) = -1;
+ ALLOCNO_ASSIGNED_P (a) = true;
+ ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
+ ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
+ if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
+ {
+ fprintf (ira_dump_file, " Spill");
+ print_coalesced_allocno (a);
+ fprintf (ira_dump_file, "\n");
+ }
+ continue;
+ }
+ put_allocno_into_bucket (a);
}
- put_allocno_into_bucket (a);
+ push_allocnos_to_stack ();
+ pop_allocnos_from_stack ();
}
- push_allocnos_to_stack ();
- pop_allocnos_from_stack ();
if (flag_ira_coalesce)
/* We don't need coalesced allocnos for ira_reassign_pseudos. */
EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
{
unsigned int j;
bitmap_iterator bi;
+ ira_loop_tree_node_t subloop_node, dest_loop_node;
+ edge e;
+ edge_iterator ei;
ira_assert (loop_tree_node->loop != NULL);
fprintf (ira_dump_file,
- "\n Loop %d (parent %d, header bb%d, depth %d)\n all:",
+ "\n Loop %d (parent %d, header bb%d, depth %d)\n bbs:",
loop_tree_node->loop->num,
(loop_tree_node->parent == NULL
? -1 : loop_tree_node->parent->loop->num),
loop_tree_node->loop->header->index,
loop_depth (loop_tree_node->loop));
+ for (subloop_node = loop_tree_node->children;
+ subloop_node != NULL;
+ subloop_node = subloop_node->next)
+ if (subloop_node->bb != NULL)
+ {
+ fprintf (ira_dump_file, " %d", subloop_node->bb->index);
+ FOR_EACH_EDGE (e, ei, subloop_node->bb->succs)
+ if (e->dest != EXIT_BLOCK_PTR
+ && ((dest_loop_node = IRA_BB_NODE (e->dest)->parent)
+ != loop_tree_node))
+ fprintf (ira_dump_file, "(->%d:l%d)",
+ e->dest->index, dest_loop_node->loop->num);
+ }
+ fprintf (ira_dump_file, "\n all:");
EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
fprintf (ira_dump_file, "\n modified regnos:");
for (j = 0; (int) j < ira_reg_class_cover_size; j++)
{
enum reg_class cover_class;
-
+
cover_class = ira_reg_class_cover[j];
if (loop_tree_node->reg_pressure[cover_class] == 0)
continue;
/* Color all mentioned allocnos including transparent ones. */
color_allocnos ();
/* Process caps. They are processed just once. */
- if (flag_ira_algorithm == IRA_ALGORITHM_MIXED
- || flag_ira_algorithm == IRA_ALGORITHM_REGIONAL)
+ if (flag_ira_region == IRA_REGION_MIXED
+ || flag_ira_region == IRA_REGION_ALL)
EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
{
a = ira_allocnos[j];
/* Remove from processing in the next loop. */
bitmap_clear_bit (consideration_allocno_bitmap, j);
rclass = ALLOCNO_COVER_CLASS (a);
- if ((flag_ira_algorithm == IRA_ALGORITHM_MIXED
- && loop_tree_node->reg_pressure[rclass]
- <= ira_available_class_regs[rclass]))
+ if (flag_ira_region == IRA_REGION_MIXED
+ && (loop_tree_node->reg_pressure[rclass]
+ <= ira_available_class_regs[rclass]))
{
mode = ALLOCNO_MODE (a);
hard_regno = ALLOCNO_HARD_REGNO (a);
mode = ALLOCNO_MODE (a);
rclass = ALLOCNO_COVER_CLASS (a);
hard_regno = ALLOCNO_HARD_REGNO (a);
+ /* Use hard register class here. ??? */
if (hard_regno >= 0)
{
index = ira_class_hard_reg_index[rclass][hard_regno];
if (subloop_allocno == NULL
|| ALLOCNO_CAP (subloop_allocno) != NULL)
continue;
+ ira_assert (ALLOCNO_COVER_CLASS (subloop_allocno) == rclass);
ira_assert (bitmap_bit_p (subloop_node->all_allocnos,
ALLOCNO_NUM (subloop_allocno)));
- if (flag_ira_algorithm == IRA_ALGORITHM_MIXED
+ if ((flag_ira_region == IRA_REGION_MIXED)
&& (loop_tree_node->reg_pressure[rclass]
<= ira_available_class_regs[rclass]))
{
else
{
cover_class = ALLOCNO_COVER_CLASS (subloop_allocno);
- cost = (ira_register_move_cost[mode][rclass][rclass]
+ cost = (ira_get_register_move_cost (mode, rclass, rclass)
* (exit_freq + enter_freq));
ira_allocate_and_set_or_copy_costs
(&ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno), cover_class,
100);
if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
fprintf (ira_dump_file, "\n**** Allocnos coloring:\n\n");
-
+
ira_traverse_loop_tree (false, ira_loop_tree_root, color_pass, NULL);
if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
subloop_allocno = subloop_node->regno_allocno_map[regno];
if (subloop_allocno == NULL)
continue;
+ ira_assert (rclass == ALLOCNO_COVER_CLASS (subloop_allocno));
/* We have accumulated cost. To get the real cost of
allocno usage in the loop we should subtract costs of
the subloop allocnos. */
+= (ira_memory_move_cost[mode][rclass][0] * exit_freq
+ ira_memory_move_cost[mode][rclass][1] * enter_freq);
if (hard_regno2 != hard_regno)
- cost -= (ira_register_move_cost[mode][rclass][rclass]
+ cost -= (ira_get_register_move_cost (mode, rclass, rclass)
* (exit_freq + enter_freq));
}
}
if ((parent = loop_node->parent) != NULL
&& (parent_allocno = parent->regno_allocno_map[regno]) != NULL)
{
+ ira_assert (rclass == ALLOCNO_COVER_CLASS (parent_allocno));
exit_freq = ira_loop_edge_freq (loop_node, regno, true);
enter_freq = ira_loop_edge_freq (loop_node, regno, false);
if ((hard_regno2 = ALLOCNO_HARD_REGNO (parent_allocno)) < 0)
+= (ira_memory_move_cost[mode][rclass][1] * exit_freq
+ ira_memory_move_cost[mode][rclass][0] * enter_freq);
if (hard_regno2 != hard_regno)
- cost -= (ira_register_move_cost[mode][rclass][rclass]
+ cost -= (ira_get_register_move_cost (mode, rclass, rclass)
* (exit_freq + enter_freq));
}
}
}
else
gcc_unreachable ();
- if (cover_class != ALLOCNO_COVER_CLASS (another_a)
+ if (! ira_reg_classes_intersect_p[cover_class][ALLOCNO_COVER_CLASS
+ (another_a)]
|| ! ALLOCNO_ASSIGNED_P (another_a)
|| (hard_regno = ALLOCNO_HARD_REGNO (another_a)) < 0)
continue;
rclass = REGNO_REG_CLASS (hard_regno);
i = ira_class_hard_reg_index[cover_class][hard_regno];
- ira_assert (i >= 0);
+ if (i < 0)
+ continue;
cost = (cp->first == a
- ? ira_register_move_cost[mode][rclass][cover_class]
- : ira_register_move_cost[mode][cover_class][rclass]);
+ ? ira_get_register_move_cost (mode, rclass, cover_class)
+ : ira_get_register_move_cost (mode, cover_class, rclass));
ira_allocate_and_set_or_copy_costs
(&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
cover_class, ALLOCNO_COVER_CLASS_COST (a),
}
}
-/* Map: allocno number -> allocno priority. */
-static int *allocno_priorities;
-
-/* Set up priorities for N allocnos in array
- CONSIDERATION_ALLOCNOS. */
-static void
-setup_allocno_priorities (ira_allocno_t *consideration_allocnos, int n)
-{
- int i, length, nrefs, priority, max_priority, mult;
- ira_allocno_t a;
-
- max_priority = 0;
- for (i = 0; i < n; i++)
- {
- a = consideration_allocnos[i];
- nrefs = ALLOCNO_NREFS (a);
- ira_assert (nrefs >= 0);
- mult = floor_log2 (ALLOCNO_NREFS (a)) + 1;
- ira_assert (mult >= 0);
- allocno_priorities[ALLOCNO_NUM (a)]
- = priority
- = (mult
- * (ALLOCNO_MEMORY_COST (a) - ALLOCNO_COVER_CLASS_COST (a))
- * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a)][ALLOCNO_MODE (a)]);
- if (priority < 0)
- priority = -priority;
- if (max_priority < priority)
- max_priority = priority;
- }
- mult = max_priority == 0 ? 1 : INT_MAX / max_priority;
- for (i = 0; i < n; i++)
- {
- a = consideration_allocnos[i];
- length = ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
- if (length <= 0)
- length = 1;
- allocno_priorities[ALLOCNO_NUM (a)]
- = allocno_priorities[ALLOCNO_NUM (a)] * mult / length;
- }
-}
-
-/* Sort allocnos according to their priorities which are calculated
- analogous to ones in file `global.c'. */
-static int
-allocno_priority_compare_func (const void *v1p, const void *v2p)
-{
- ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
- ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
- int pri1, pri2;
-
- pri1 = allocno_priorities[ALLOCNO_NUM (a1)];
- pri2 = allocno_priorities[ALLOCNO_NUM (a2)];
- if (pri2 - pri1)
- return pri2 - pri1;
-
- /* If regs are equally good, sort by allocnos, so that the results of
- qsort leave nothing to chance. */
- return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
-}
-
/* Try to assign hard registers to the unassigned allocnos and
allocnos conflicting with them or conflicting with allocnos whose
regno >= START_REGNO. The function is called after ira_flattening,
continue;
FOR_EACH_ALLOCNO_CONFLICT (a, conflict_a, aci)
{
- ira_assert (cover_class == ALLOCNO_COVER_CLASS (conflict_a));
+ ira_assert (ira_reg_classes_intersect_p
+ [cover_class][ALLOCNO_COVER_CLASS (conflict_a)]);
if (bitmap_bit_p (allocnos_to_color, ALLOCNO_NUM (conflict_a)))
continue;
bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (conflict_a));
return num;
}
-/* Array of bitmaps of size IRA_MAX_POINT. Bitmap for given point
- contains numbers of coalesced allocnos living at this point. */
-static regset_head *coalesced_allocnos_living_at_program_points;
+/* Array of live ranges of size IRA_ALLOCNOS_NUM. Live range for
+ given slot contains live ranges of coalesced allocnos assigned to
+ given slot. */
+static allocno_live_range_t *slot_coalesced_allocnos_live_ranges;
-/* Return TRUE if coalesced allocnos represented by ALLOCNO live at
- program points of coalesced allocnos with number N. */
+/* Return TRUE if coalesced allocnos represented by ALLOCNO has live
+ ranges intersected with live ranges of coalesced allocnos assigned
+ to slot with number N. */
static bool
-coalesced_allocnos_live_at_points_p (ira_allocno_t allocno, int n)
+slot_coalesced_allocno_live_ranges_intersect_p (ira_allocno_t allocno, int n)
{
- int i;
ira_allocno_t a;
- allocno_live_range_t r;
for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
{
- for (r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = r->next)
- for (i = r->start; i <= r->finish; i++)
- if (bitmap_bit_p (&coalesced_allocnos_living_at_program_points[i], n))
- return true;
+ if (ira_allocno_live_ranges_intersect_p
+ (slot_coalesced_allocnos_live_ranges[n], ALLOCNO_LIVE_RANGES (a)))
+ return true;
if (a == allocno)
break;
}
return false;
}
-/* Mark program points where coalesced allocnos represented by ALLOCNO
- live. */
+/* Update live ranges of slot to which coalesced allocnos represented
+ by ALLOCNO were assigned. */
static void
-set_coalesced_allocnos_live_points (ira_allocno_t allocno)
+setup_slot_coalesced_allocno_live_ranges (ira_allocno_t allocno)
{
- int i, n;
+ int n;
ira_allocno_t a;
allocno_live_range_t r;
for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
{
- for (r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = r->next)
- for (i = r->start; i <= r->finish; i++)
- bitmap_set_bit (&coalesced_allocnos_living_at_program_points[i], n);
+ r = ira_copy_allocno_live_range_list (ALLOCNO_LIVE_RANGES (a));
+ slot_coalesced_allocnos_live_ranges[n]
+ = ira_merge_allocno_live_ranges
+ (slot_coalesced_allocnos_live_ranges[n], r);
if (a == allocno)
break;
}
static bool
coalesce_spill_slots (ira_allocno_t *spilled_coalesced_allocnos, int num)
{
- int i, j, last_coalesced_allocno_num;
+ int i, j, n, last_coalesced_allocno_num;
ira_allocno_t allocno, a;
bool merged_p = false;
+ bitmap set_jump_crosses = regstat_get_setjmp_crosses ();
- coalesced_allocnos_living_at_program_points
- = (regset_head *) ira_allocate (sizeof (regset_head) * ira_max_point);
- for (i = 0; i < ira_max_point; i++)
- INIT_REG_SET (&coalesced_allocnos_living_at_program_points[i]);
+ slot_coalesced_allocnos_live_ranges
+ = (allocno_live_range_t *) ira_allocate (sizeof (allocno_live_range_t)
+ * ira_allocnos_num);
+ memset (slot_coalesced_allocnos_live_ranges, 0,
+ sizeof (allocno_live_range_t) * ira_allocnos_num);
last_coalesced_allocno_num = 0;
/* Coalesce non-conflicting spilled allocnos preferring most
frequently used. */
{
allocno = spilled_coalesced_allocnos[i];
if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno
+ || bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (allocno))
|| (ALLOCNO_REGNO (allocno) < ira_reg_equiv_len
- && (ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)]
- || ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX)))
+ && (ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX
+ || ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)])))
continue;
for (j = 0; j < i; j++)
{
a = spilled_coalesced_allocnos[j];
+ n = ALLOCNO_TEMP (a);
if (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a
+ && ! bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (a))
&& (ALLOCNO_REGNO (a) >= ira_reg_equiv_len
|| (! ira_reg_equiv_invariant_p[ALLOCNO_REGNO (a)]
&& ira_reg_equiv_const[ALLOCNO_REGNO (a)] == NULL_RTX))
- && ! coalesced_allocnos_live_at_points_p (allocno,
- ALLOCNO_TEMP (a)))
+ && ! slot_coalesced_allocno_live_ranges_intersect_p (allocno, n))
break;
}
if (j >= i)
/* No coalescing: set up number for coalesced allocnos
represented by ALLOCNO. */
ALLOCNO_TEMP (allocno) = last_coalesced_allocno_num++;
- set_coalesced_allocnos_live_points (allocno);
+ setup_slot_coalesced_allocno_live_ranges (allocno);
}
else
{
ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno),
ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
ALLOCNO_TEMP (allocno) = ALLOCNO_TEMP (a);
- set_coalesced_allocnos_live_points (allocno);
+ setup_slot_coalesced_allocno_live_ranges (allocno);
merge_allocnos (a, allocno);
ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a);
}
}
- for (i = 0; i < ira_max_point; i++)
- CLEAR_REG_SET (&coalesced_allocnos_living_at_program_points[i]);
- ira_free (coalesced_allocnos_living_at_program_points);
+ for (i = 0; i < ira_allocnos_num; i++)
+ ira_finish_allocno_live_range_list
+ (slot_coalesced_allocnos_live_ranges[i]);
+ ira_free (slot_coalesced_allocnos_live_ranges);
return merged_p;
}
if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno
|| ALLOCNO_HARD_REGNO (allocno) >= 0
|| (ALLOCNO_REGNO (allocno) < ira_reg_equiv_len
- && (ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)]
- || ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX)))
+ && (ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX
+ || ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)])))
continue;
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
fprintf (ira_dump_file, " Slot %d (freq,size):", slot_num);
ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a),
MAX (PSEUDO_REGNO_BYTES (ALLOCNO_REGNO (a)),
reg_max_ref_width[ALLOCNO_REGNO (a)]));
-
+
if (a == allocno)
break;
}
}
/* Try to assign a hard register (except for FORBIDDEN_REGS) to
- allocno A and return TRUE in the case of success. That is an
- analog of retry_global_alloc for IRA. */
+ allocno A and return TRUE in the case of success. */
static bool
allocno_reload_assign (ira_allocno_t a, HARD_REG_SET forbidden_regs)
{
ira_reassign_pseudos (int *spilled_pseudo_regs, int num,
HARD_REG_SET bad_spill_regs,
HARD_REG_SET *pseudo_forbidden_regs,
- HARD_REG_SET *pseudo_previous_regs, bitmap spilled)
+ HARD_REG_SET *pseudo_previous_regs,
+ bitmap spilled)
{
int i, m, n, regno;
bool changed_p;
ira_allocno_t a, conflict_a;
HARD_REG_SET forbidden_regs;
ira_allocno_conflict_iterator aci;
+ bitmap temp = BITMAP_ALLOC (NULL);
+
+ /* Add pseudos which conflict with pseudos already in
+ SPILLED_PSEUDO_REGS to SPILLED_PSEUDO_REGS. This is preferable
+ to allocating in two steps as some of the conflicts might have
+ a higher priority than the pseudos passed in SPILLED_PSEUDO_REGS. */
+ for (i = 0; i < num; i++)
+ bitmap_set_bit (temp, spilled_pseudo_regs[i]);
+
+ for (i = 0, n = num; i < n; i++)
+ {
+ int regno = spilled_pseudo_regs[i];
+ bitmap_set_bit (temp, regno);
+
+ a = ira_regno_allocno_map[regno];
+ FOR_EACH_ALLOCNO_CONFLICT (a, conflict_a, aci)
+ if (ALLOCNO_HARD_REGNO (conflict_a) < 0
+ && ! ALLOCNO_DONT_REASSIGN_P (conflict_a)
+ && ! bitmap_bit_p (temp, ALLOCNO_REGNO (conflict_a)))
+ {
+ spilled_pseudo_regs[num++] = ALLOCNO_REGNO (conflict_a);
+ bitmap_set_bit (temp, ALLOCNO_REGNO (conflict_a));
+ /* ?!? This seems wrong. */
+ bitmap_set_bit (consideration_allocno_bitmap,
+ ALLOCNO_NUM (conflict_a));
+ }
+ }
if (num > 1)
qsort (spilled_pseudo_regs, num, sizeof (int), pseudo_reg_compare);
ira_assert (reg_renumber[regno] < 0);
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
fprintf (ira_dump_file,
- " Spill %d(a%d), cost=%d", regno, ALLOCNO_NUM (a),
+ " Try Assign %d(a%d), cost=%d", regno, ALLOCNO_NUM (a),
ALLOCNO_MEMORY_COST (a)
- ALLOCNO_COVER_CLASS_COST (a));
allocno_reload_assign (a, forbidden_regs);
CLEAR_REGNO_REG_SET (spilled, regno);
changed_p = true;
}
- else
- spilled_pseudo_regs[m++] = regno;
- }
- if (m == 0)
- return changed_p;
- if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
- {
- fprintf (ira_dump_file, " Spilled regs");
- for (i = 0; i < m; i++)
- fprintf (ira_dump_file, " %d", spilled_pseudo_regs[i]);
- fprintf (ira_dump_file, "\n");
- }
- /* Try to assign hard registers to pseudos conflicting with ones
- from SPILLED_PSEUDO_REGS. */
- for (i = n = 0; i < m; i++)
- {
- regno = spilled_pseudo_regs[i];
- a = ira_regno_allocno_map[regno];
- FOR_EACH_ALLOCNO_CONFLICT (a, conflict_a, aci)
- if (ALLOCNO_HARD_REGNO (conflict_a) < 0
- && ! ALLOCNO_DONT_REASSIGN_P (conflict_a)
- && ! bitmap_bit_p (consideration_allocno_bitmap,
- ALLOCNO_NUM (conflict_a)))
- {
- sorted_allocnos[n++] = conflict_a;
- bitmap_set_bit (consideration_allocno_bitmap,
- ALLOCNO_NUM (conflict_a));
- }
- }
- if (n != 0)
- {
- setup_allocno_priorities (sorted_allocnos, n);
- qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
- allocno_priority_compare_func);
- for (i = 0; i < n; i++)
- {
- a = sorted_allocnos[i];
- regno = ALLOCNO_REGNO (a);
- COPY_HARD_REG_SET (forbidden_regs, bad_spill_regs);
- IOR_HARD_REG_SET (forbidden_regs, pseudo_forbidden_regs[regno]);
- IOR_HARD_REG_SET (forbidden_regs, pseudo_previous_regs[regno]);
- if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
- fprintf (ira_dump_file,
- " Try assign %d(a%d), cost=%d",
- regno, ALLOCNO_NUM (a),
- ALLOCNO_MEMORY_COST (a)
- - ALLOCNO_COVER_CLASS_COST (a));
- if (allocno_reload_assign (a, forbidden_regs))
- {
- changed_p = true;
- bitmap_clear_bit (spilled, regno);
- }
- }
}
+ BITMAP_FREE (temp);
return changed_p;
}
bitmap_iterator bi;
struct ira_spilled_reg_stack_slot *slot = NULL;
- ira_assert (flag_ira && inherent_size == PSEUDO_REGNO_BYTES (regno)
+ ira_assert (inherent_size == PSEUDO_REGNO_BYTES (regno)
&& inherent_size <= total_size
&& ALLOCNO_HARD_REGNO (allocno) < 0);
if (! flag_ira_share_spill_slots)
if (slot->width < total_size
|| GET_MODE_SIZE (GET_MODE (slot->mem)) < inherent_size)
continue;
-
+
EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
FIRST_PSEUDO_REGISTER, i, bi)
{
another_allocno = ira_regno_allocno_map[i];
- if (ira_allocno_live_ranges_intersect_p (allocno,
- another_allocno))
+ if (allocnos_have_intersected_live_ranges_p (allocno,
+ another_allocno))
goto cont;
}
for (cost = 0, cp = ALLOCNO_COPIES (allocno);
if (x != NULL_RTX)
{
ira_assert (slot->width >= total_size);
+#ifdef ENABLE_IRA_CHECKING
EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
FIRST_PSEUDO_REGISTER, i, bi)
{
- ira_assert (! ira_pseudo_live_ranges_intersect_p (regno, i));
+ ira_assert (! pseudos_have_intersected_live_ranges_p (regno, i));
}
+#endif
SET_REGNO_REG_SET (&slot->spilled_regs, regno);
if (internal_flag_ira_verbose > 3 && ira_dump_file)
{
int slot_num;
ira_allocno_t allocno;
- ira_assert (flag_ira && PSEUDO_REGNO_BYTES (regno) <= total_size);
+ ira_assert (PSEUDO_REGNO_BYTES (regno) <= total_size);
allocno = ira_regno_allocno_map[regno];
slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
if (slot_num == -1)
int call_used_count, other_call_used_count;
int hard_regno, other_hard_regno;
- cost = calculate_spill_cost (regnos, in, out, insn,
+ cost = calculate_spill_cost (regnos, in, out, insn,
&length, &nrefs, &call_used_count, &hard_regno);
other_cost = calculate_spill_cost (other_regnos, in, out, insn,
&other_length, &other_nrefs,
* ira_max_point);
for (i = 0; i < ira_max_point; i++)
CLEAR_HARD_REG_SET (used_hard_regs[i]);
- qsort (sorted_allocnos, ira_allocnos_num, sizeof (ira_allocno_t),
+ qsort (sorted_allocnos, num, sizeof (ira_allocno_t),
allocno_priority_compare_func);
for (i = 0; i < num; i++)
{
ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
ALLOCNO_UPDATED_COVER_CLASS_COST (a) = ALLOCNO_COVER_CLASS_COST (a);
}
- if (optimize)
+ if (ira_conflicts_p)
color ();
else
fast_allocation ();