/* IRA allocation based on graph coloring.
- Copyright (C) 2006, 2007, 2008
+ Copyright (C) 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
Contributed by Vladimir Makarov <vmakarov@redhat.com>.
#include "hard-reg-set.h"
#include "basic-block.h"
#include "expr.h"
+#include "diagnostic-core.h"
#include "toplev.h"
#include "reload.h"
#include "params.h"
allocnos. */
static bitmap consideration_allocno_bitmap;
-/* TRUE if we coalesced some allocnos. In other words, if we got
- loops formed by members first_coalesced_allocno and
- next_coalesced_allocno containing more one allocno. */
-static bool allocno_coalesced_p;
-
-/* Bitmap used to prevent a repeated allocno processing because of
- coalescing. */
-static bitmap processed_coalesced_allocno_bitmap;
-
/* All allocnos sorted according their priorities. */
static ira_allocno_t *sorted_allocnos;
/* Vec representing the stack of allocnos used during coloring. */
static VEC(ira_allocno_t,heap) *allocno_stack_vec;
-/* Vec representing conflict allocnos used during assigning. */
-static VEC(ira_allocno_t,heap) *conflict_allocno_vec;
-
/* Array used to choose an allocno for spilling. */
static ira_allocno_t *allocnos_for_spilling;
more costly although simpler. */
static VEC(ira_allocno_t,heap) *removed_splay_allocno_vec;
+/* Helper for qsort comparison callbacks - return a positive integer if
+ X > Y, or a negative value otherwise. Use a conditional expression
+ instead of a difference computation to insulate from possible overflow
+ issues, e.g. X - Y < 0 for some X > 0 and Y < 0. */
+#define SORTGT(x,y) (((x) > (y)) ? 1 : -1)
+
+\f
+
+/* This page contains functions used to find conflicts using allocno
+ live ranges. */
+
+/* Return TRUE if live ranges of allocnos A1 and A2 intersect. It is
+ used to find a conflict for new allocnos or allocnos with the
+ different cover classes. */
+static bool
+allocnos_have_intersected_live_ranges_p (ira_allocno_t a1, ira_allocno_t a2)
+{
+ int i, j;
+ int n1 = ALLOCNO_NUM_OBJECTS (a1);
+ int n2 = ALLOCNO_NUM_OBJECTS (a2);
+
+ if (a1 == a2)
+ return false;
+ if (ALLOCNO_REG (a1) != NULL && ALLOCNO_REG (a2) != NULL
+ && (ORIGINAL_REGNO (ALLOCNO_REG (a1))
+ == ORIGINAL_REGNO (ALLOCNO_REG (a2))))
+ return false;
+
+ for (i = 0; i < n1; i++)
+ {
+ ira_object_t c1 = ALLOCNO_OBJECT (a1, i);
+ for (j = 0; j < n2; j++)
+ {
+ ira_object_t c2 = ALLOCNO_OBJECT (a2, j);
+ if (ira_live_ranges_intersect_p (OBJECT_LIVE_RANGES (c1),
+ OBJECT_LIVE_RANGES (c2)))
+ return true;
+ }
+ }
+ return false;
+}
+
+#ifdef ENABLE_IRA_CHECKING
+
+/* Return TRUE if live ranges of pseudo-registers REGNO1 and REGNO2
+ intersect. This should be used when there is only one region.
+ Currently this is used during reload. */
+static bool
+pseudos_have_intersected_live_ranges_p (int regno1, int regno2)
+{
+ ira_allocno_t a1, a2;
+
+ ira_assert (regno1 >= FIRST_PSEUDO_REGISTER
+ && regno2 >= FIRST_PSEUDO_REGISTER);
+ /* Reg info caclulated by dataflow infrastructure can be different
+ from one calculated by regclass. */
+ if ((a1 = ira_loop_tree_root->regno_allocno_map[regno1]) == NULL
+ || (a2 = ira_loop_tree_root->regno_allocno_map[regno2]) == NULL)
+ return false;
+ return allocnos_have_intersected_live_ranges_p (a1, a2);
+}
+
+#endif
+
\f
/* This page contains functions used to choose hard registers for
register was already allocated for an allocno. */
static bool allocated_hardreg_p[FIRST_PSEUDO_REGISTER];
-/* Array used to check already processed allocnos during the current
- update_copy_costs call. */
-static int *allocno_update_cost_check;
+/* Describes one element in a queue of allocnos whose costs need to be
+ updated. Each allocno in the queue is known to have a cover class. */
+struct update_cost_queue_elem
+{
+ /* This element is in the queue iff CHECK == update_cost_check. */
+ int check;
+
+ /* COST_HOP_DIVISOR**N, where N is the length of the shortest path
+ connecting this allocno to the one being allocated. */
+ int divisor;
+
+ /* The next allocno in the queue, or null if this is the last element. */
+ ira_allocno_t next;
+};
+
+/* The first element in a queue of allocnos whose copy costs need to be
+ updated. Null if the queue is empty. */
+static ira_allocno_t update_cost_queue;
+
+/* The last element in the queue described by update_cost_queue.
+ Not valid if update_cost_queue is null. */
+static struct update_cost_queue_elem *update_cost_queue_tail;
+
+/* A pool of elements in the queue described by update_cost_queue.
+ Elements are indexed by ALLOCNO_NUM. */
+static struct update_cost_queue_elem *update_cost_queue_elems;
/* The current value of update_copy_cost call count. */
static int update_cost_check;
static void
initiate_cost_update (void)
{
- allocno_update_cost_check
- = (int *) ira_allocate (ira_allocnos_num * sizeof (int));
- memset (allocno_update_cost_check, 0, ira_allocnos_num * sizeof (int));
+ size_t size;
+
+ size = ira_allocnos_num * sizeof (struct update_cost_queue_elem);
+ update_cost_queue_elems
+ = (struct update_cost_queue_elem *) ira_allocate (size);
+ memset (update_cost_queue_elems, 0, size);
update_cost_check = 0;
}
static void
finish_cost_update (void)
{
- ira_free (allocno_update_cost_check);
+ ira_free (update_cost_queue_elems);
}
/* When we traverse allocnos to update hard register costs, the cost
hop from given allocno to directly connected allocnos. */
#define COST_HOP_DIVISOR 4
-/* This recursive function updates costs (decrease if DECR_P) of the
- unassigned allocnos connected by copies with ALLOCNO. This update
- increases chances to remove some copies. Copy cost is proportional
- the copy frequency divided by DIVISOR. */
+/* Start a new cost-updating pass. */
static void
-update_copy_costs_1 (ira_allocno_t allocno, int hard_regno,
- bool decr_p, int divisor)
+start_update_cost (void)
{
- int i, cost, update_cost;
- enum machine_mode mode;
- enum reg_class rclass, cover_class;
- ira_allocno_t another_allocno;
- ira_copy_t cp, next_cp;
+ update_cost_check++;
+ update_cost_queue = NULL;
+}
- cover_class = ALLOCNO_COVER_CLASS (allocno);
- if (cover_class == NO_REGS)
- return;
- if (allocno_update_cost_check[ALLOCNO_NUM (allocno)] == update_cost_check)
- return;
- allocno_update_cost_check[ALLOCNO_NUM (allocno)] = update_cost_check;
- ira_assert (hard_regno >= 0);
- i = ira_class_hard_reg_index[cover_class][hard_regno];
- ira_assert (i >= 0);
- rclass = REGNO_REG_CLASS (hard_regno);
- mode = ALLOCNO_MODE (allocno);
- for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
+/* Add (ALLOCNO, DIVISOR) to the end of update_cost_queue,
+ unless ALLOCNO is already in the queue, or has no cover class. */
+static inline void
+queue_update_cost (ira_allocno_t allocno, int divisor)
+{
+ struct update_cost_queue_elem *elem;
+
+ elem = &update_cost_queue_elems[ALLOCNO_NUM (allocno)];
+ if (elem->check != update_cost_check
+ && ALLOCNO_COVER_CLASS (allocno) != NO_REGS)
{
- if (cp->first == allocno)
- {
- next_cp = cp->next_first_allocno_copy;
- another_allocno = cp->second;
- }
- else if (cp->second == allocno)
- {
- next_cp = cp->next_second_allocno_copy;
- another_allocno = cp->first;
- }
+ elem->check = update_cost_check;
+ elem->divisor = divisor;
+ elem->next = NULL;
+ if (update_cost_queue == NULL)
+ update_cost_queue = allocno;
else
- gcc_unreachable ();
- if (cover_class
- != ALLOCNO_COVER_CLASS (another_allocno)
- || ALLOCNO_ASSIGNED_P (another_allocno))
- continue;
- cost = (cp->second == allocno
- ? ira_register_move_cost[mode][rclass]
- [ALLOCNO_COVER_CLASS (another_allocno)]
- : ira_register_move_cost[mode]
- [ALLOCNO_COVER_CLASS (another_allocno)][rclass]);
- if (decr_p)
- cost = -cost;
- ira_allocate_and_set_or_copy_costs
- (&ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno), cover_class,
- ALLOCNO_COVER_CLASS_COST (another_allocno),
- ALLOCNO_HARD_REG_COSTS (another_allocno));
- ira_allocate_and_set_or_copy_costs
- (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
- cover_class, 0,
- ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
- update_cost = cp->freq * cost / divisor;
- ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno)[i] += update_cost;
- ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno)[i]
- += update_cost;
- if (update_cost != 0)
- update_copy_costs_1 (another_allocno, hard_regno,
- decr_p, divisor * COST_HOP_DIVISOR);
+ update_cost_queue_tail->next = allocno;
+ update_cost_queue_tail = elem;
}
}
-/* Update the cost of allocnos to increase chances to remove some
- copies as the result of subsequent assignment. */
-static void
-update_copy_costs (ira_allocno_t allocno, bool decr_p)
+/* Try to remove the first element from update_cost_queue. Return false
+ if the queue was empty, otherwise make (*ALLOCNO, *DIVISOR) describe
+ the removed element. */
+static inline bool
+get_next_update_cost (ira_allocno_t *allocno, int *divisor)
{
- update_cost_check++;
- update_copy_costs_1 (allocno, ALLOCNO_HARD_REGNO (allocno), decr_p, 1);
+ struct update_cost_queue_elem *elem;
+
+ if (update_cost_queue == NULL)
+ return false;
+
+ *allocno = update_cost_queue;
+ elem = &update_cost_queue_elems[ALLOCNO_NUM (*allocno)];
+ *divisor = elem->divisor;
+ update_cost_queue = elem->next;
+ return true;
}
-/* This recursive function updates COSTS (decrease if DECR_P) by
- conflict costs of the unassigned allocnos connected by copies with
- ALLOCNO. This update increases chances to remove some copies.
- Copy cost is proportional to the copy frequency divided by
- DIVISOR. */
+/* Update the cost of allocnos to increase chances to remove some
+ copies as the result of subsequent assignment. */
static void
-update_conflict_hard_regno_costs (int *costs, ira_allocno_t allocno,
- int divisor, bool decr_p)
+update_copy_costs (ira_allocno_t allocno, bool decr_p)
{
- int i, cost, class_size, freq, mult, div;
- int *conflict_costs;
- bool cont_p;
+ int i, cost, update_cost, hard_regno, divisor;
enum machine_mode mode;
- enum reg_class cover_class;
+ enum reg_class rclass, cover_class;
ira_allocno_t another_allocno;
ira_copy_t cp, next_cp;
+ hard_regno = ALLOCNO_HARD_REGNO (allocno);
+ ira_assert (hard_regno >= 0);
+
cover_class = ALLOCNO_COVER_CLASS (allocno);
- /* Probably 5 hops will be enough. */
- if (divisor > (COST_HOP_DIVISOR * COST_HOP_DIVISOR
- * COST_HOP_DIVISOR * COST_HOP_DIVISOR * COST_HOP_DIVISOR))
- return;
if (cover_class == NO_REGS)
return;
- /* Check that it was already visited. */
- if (allocno_update_cost_check[ALLOCNO_NUM (allocno)] == update_cost_check)
- return;
- allocno_update_cost_check[ALLOCNO_NUM (allocno)] = update_cost_check;
- mode = ALLOCNO_MODE (allocno);
- class_size = ira_class_hard_regs_num[cover_class];
- for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
+ i = ira_class_hard_reg_index[cover_class][hard_regno];
+ ira_assert (i >= 0);
+ rclass = REGNO_REG_CLASS (hard_regno);
+
+ start_update_cost ();
+ divisor = 1;
+ do
{
- if (cp->first == allocno)
- {
- next_cp = cp->next_first_allocno_copy;
- another_allocno = cp->second;
- }
- else if (cp->second == allocno)
+ mode = ALLOCNO_MODE (allocno);
+ for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
{
- next_cp = cp->next_second_allocno_copy;
- another_allocno = cp->first;
- }
- else
- gcc_unreachable ();
- if (cover_class != ALLOCNO_COVER_CLASS (another_allocno)
- || ALLOCNO_ASSIGNED_P (another_allocno)
- || ALLOCNO_MAY_BE_SPILLED_P (another_allocno))
- continue;
- ira_allocate_and_copy_costs
- (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
- cover_class, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
- conflict_costs
- = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno);
- if (conflict_costs == NULL)
- cont_p = true;
- else
- {
- mult = cp->freq;
- freq = ALLOCNO_FREQ (another_allocno);
- if (freq == 0)
- freq = 1;
- div = freq * divisor;
- cont_p = false;
- for (i = class_size - 1; i >= 0; i--)
+ if (cp->first == allocno)
{
- cost = conflict_costs [i] * mult / div;
- if (cost == 0)
- continue;
- cont_p = true;
- if (decr_p)
- cost = -cost;
- costs[i] += cost;
+ next_cp = cp->next_first_allocno_copy;
+ another_allocno = cp->second;
}
- }
- if (cont_p)
- update_conflict_hard_regno_costs (costs, another_allocno,
- divisor * COST_HOP_DIVISOR, decr_p);
- }
-}
+ else if (cp->second == allocno)
+ {
+ next_cp = cp->next_second_allocno_copy;
+ another_allocno = cp->first;
+ }
+ else
+ gcc_unreachable ();
-/* Sort allocnos according to the profit of usage of a hard register
- instead of memory for them. */
-static int
-allocno_cost_compare_func (const void *v1p, const void *v2p)
-{
- ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
- ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
- int c1, c2;
+ cover_class = ALLOCNO_COVER_CLASS (another_allocno);
+ if (! ira_reg_classes_intersect_p[rclass][cover_class]
+ || ALLOCNO_ASSIGNED_P (another_allocno))
+ continue;
- c1 = ALLOCNO_UPDATED_MEMORY_COST (p1) - ALLOCNO_COVER_CLASS_COST (p1);
- c2 = ALLOCNO_UPDATED_MEMORY_COST (p2) - ALLOCNO_COVER_CLASS_COST (p2);
- if (c1 - c2)
- return c1 - c2;
+ cost = (cp->second == allocno
+ ? ira_get_register_move_cost (mode, rclass, cover_class)
+ : ira_get_register_move_cost (mode, cover_class, rclass));
+ if (decr_p)
+ cost = -cost;
- /* If regs are equally good, sort by allocno numbers, so that the
- results of qsort leave nothing to chance. */
- return ALLOCNO_NUM (p1) - ALLOCNO_NUM (p2);
+ update_cost = cp->freq * cost / divisor;
+ if (update_cost == 0)
+ continue;
+
+ ira_allocate_and_set_or_copy_costs
+ (&ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno), cover_class,
+ ALLOCNO_UPDATED_COVER_CLASS_COST (another_allocno),
+ ALLOCNO_HARD_REG_COSTS (another_allocno));
+ ira_allocate_and_set_or_copy_costs
+ (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
+ cover_class, 0,
+ ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
+ i = ira_class_hard_reg_index[cover_class][hard_regno];
+ ira_assert (i >= 0);
+ ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno)[i] += update_cost;
+ ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno)[i]
+ += update_cost;
+
+ queue_update_cost (another_allocno, divisor * COST_HOP_DIVISOR);
+ }
+ }
+ while (get_next_update_cost (&allocno, &divisor));
}
-/* Print all allocnos coalesced with ALLOCNO. */
+/* This function updates COSTS (decrease if DECR_P) for hard_registers
+ of COVER_CLASS by conflict costs of the unassigned allocnos
+ connected by copies with allocnos in update_cost_queue. This
+ update increases chances to remove some copies. */
static void
-print_coalesced_allocno (ira_allocno_t allocno)
+update_conflict_hard_regno_costs (int *costs, enum reg_class cover_class,
+ bool decr_p)
{
- ira_allocno_t a;
+ int i, cost, class_size, freq, mult, div, divisor;
+ int index, hard_regno;
+ int *conflict_costs;
+ bool cont_p;
+ enum reg_class another_cover_class;
+ ira_allocno_t allocno, another_allocno;
+ ira_copy_t cp, next_cp;
- for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
- a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
- {
- ira_print_expanded_allocno (a);
- if (a == allocno)
- break;
- fprintf (ira_dump_file, "+");
- }
+ while (get_next_update_cost (&allocno, &divisor))
+ for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
+ {
+ if (cp->first == allocno)
+ {
+ next_cp = cp->next_first_allocno_copy;
+ another_allocno = cp->second;
+ }
+ else if (cp->second == allocno)
+ {
+ next_cp = cp->next_second_allocno_copy;
+ another_allocno = cp->first;
+ }
+ else
+ gcc_unreachable ();
+ another_cover_class = ALLOCNO_COVER_CLASS (another_allocno);
+ if (! ira_reg_classes_intersect_p[cover_class][another_cover_class]
+ || ALLOCNO_ASSIGNED_P (another_allocno)
+ || ALLOCNO_MAY_BE_SPILLED_P (another_allocno))
+ continue;
+ class_size = ira_class_hard_regs_num[another_cover_class];
+ ira_allocate_and_copy_costs
+ (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
+ another_cover_class,
+ ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
+ conflict_costs
+ = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno);
+ if (conflict_costs == NULL)
+ cont_p = true;
+ else
+ {
+ mult = cp->freq;
+ freq = ALLOCNO_FREQ (another_allocno);
+ if (freq == 0)
+ freq = 1;
+ div = freq * divisor;
+ cont_p = false;
+ for (i = class_size - 1; i >= 0; i--)
+ {
+ hard_regno = ira_class_hard_regs[another_cover_class][i];
+ ira_assert (hard_regno >= 0);
+ index = ira_class_hard_reg_index[cover_class][hard_regno];
+ if (index < 0)
+ continue;
+ cost = conflict_costs [i] * mult / div;
+ if (cost == 0)
+ continue;
+ cont_p = true;
+ if (decr_p)
+ cost = -cost;
+ costs[index] += cost;
+ }
+ }
+ /* Probably 5 hops will be enough. */
+ if (cont_p
+ && divisor <= (COST_HOP_DIVISOR
+ * COST_HOP_DIVISOR
+ * COST_HOP_DIVISOR
+ * COST_HOP_DIVISOR))
+ queue_update_cost (another_allocno, divisor * COST_HOP_DIVISOR);
+ }
}
-/* Choose a hard register for ALLOCNO (or for all coalesced allocnos
- represented by ALLOCNO). If RETRY_P is TRUE, it means that the
- function called from function `ira_reassign_conflict_allocnos' and
- `allocno_reload_assign'. This function implements the optimistic
- coalescing too: if we failed to assign a hard register to set of
- the coalesced allocnos, we put them onto the coloring stack for
- subsequent separate assigning. */
+/* Choose a hard register for allocno A. If RETRY_P is TRUE, it means
+ that the function called from function
+ `ira_reassign_conflict_allocnos' and `allocno_reload_assign'. */
static bool
-assign_hard_reg (ira_allocno_t allocno, bool retry_p)
+assign_hard_reg (ira_allocno_t a, bool retry_p)
{
- HARD_REG_SET conflicting_regs;
- int i, j, hard_regno, best_hard_regno, class_size;
- int cost, mem_cost, min_cost, full_cost, min_full_cost, add_cost;
+ HARD_REG_SET conflicting_regs[2];
+ int i, j, hard_regno, nregs, best_hard_regno, class_size;
+ int cost, mem_cost, min_cost, full_cost, min_full_cost, nwords, word;
int *a_costs;
- int *conflict_costs;
- enum reg_class cover_class, rclass;
+ enum reg_class cover_class;
enum machine_mode mode;
- ira_allocno_t a, conflict_allocno;
- ira_allocno_conflict_iterator aci;
static int costs[FIRST_PSEUDO_REGISTER], full_costs[FIRST_PSEUDO_REGISTER];
+#ifndef HONOR_REG_ALLOC_ORDER
+ enum reg_class rclass;
+ int add_cost;
+#endif
#ifdef STACK_REGS
bool no_stack_reg_p;
#endif
- ira_assert (! ALLOCNO_ASSIGNED_P (allocno));
- cover_class = ALLOCNO_COVER_CLASS (allocno);
+ nwords = ALLOCNO_NUM_OBJECTS (a);
+ ira_assert (! ALLOCNO_ASSIGNED_P (a));
+ cover_class = ALLOCNO_COVER_CLASS (a);
class_size = ira_class_hard_regs_num[cover_class];
- mode = ALLOCNO_MODE (allocno);
- CLEAR_HARD_REG_SET (conflicting_regs);
+ mode = ALLOCNO_MODE (a);
+ for (i = 0; i < nwords; i++)
+ CLEAR_HARD_REG_SET (conflicting_regs[i]);
best_hard_regno = -1;
memset (full_costs, 0, sizeof (int) * class_size);
mem_cost = 0;
- if (allocno_coalesced_p)
- bitmap_clear (processed_coalesced_allocno_bitmap);
memset (costs, 0, sizeof (int) * class_size);
memset (full_costs, 0, sizeof (int) * class_size);
#ifdef STACK_REGS
no_stack_reg_p = false;
#endif
- for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
- a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
- {
- mem_cost += ALLOCNO_UPDATED_MEMORY_COST (a);
- IOR_HARD_REG_SET (conflicting_regs,
- ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
- ira_allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
- cover_class, ALLOCNO_HARD_REG_COSTS (a));
- a_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
+ start_update_cost ();
+ mem_cost += ALLOCNO_UPDATED_MEMORY_COST (a);
+
+ ira_allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
+ cover_class, ALLOCNO_HARD_REG_COSTS (a));
+ a_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
#ifdef STACK_REGS
- no_stack_reg_p = no_stack_reg_p || ALLOCNO_TOTAL_NO_STACK_REG_P (a);
+ no_stack_reg_p = no_stack_reg_p || ALLOCNO_TOTAL_NO_STACK_REG_P (a);
#endif
- for (cost = ALLOCNO_COVER_CLASS_COST (a), i = 0; i < class_size; i++)
- if (a_costs != NULL)
- {
- costs[i] += a_costs[i];
- full_costs[i] += a_costs[i];
- }
- else
- {
- costs[i] += cost;
- full_costs[i] += cost;
- }
+ cost = ALLOCNO_UPDATED_COVER_CLASS_COST (a);
+ for (i = 0; i < class_size; i++)
+ if (a_costs != NULL)
+ {
+ costs[i] += a_costs[i];
+ full_costs[i] += a_costs[i];
+ }
+ else
+ {
+ costs[i] += cost;
+ full_costs[i] += cost;
+ }
+ for (word = 0; word < nwords; word++)
+ {
+ ira_object_t conflict_obj;
+ ira_object_t obj = ALLOCNO_OBJECT (a, word);
+ ira_object_conflict_iterator oci;
+
+ IOR_HARD_REG_SET (conflicting_regs[word],
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
/* Take preferences of conflicting allocnos into account. */
- FOR_EACH_ALLOCNO_CONFLICT (a, conflict_allocno, aci)
- /* Reload can give another class so we need to check all
- allocnos. */
- if (retry_p || bitmap_bit_p (consideration_allocno_bitmap,
- ALLOCNO_NUM (conflict_allocno)))
- {
- ira_assert (cover_class == ALLOCNO_COVER_CLASS (conflict_allocno));
- if (allocno_coalesced_p)
- {
- if (bitmap_bit_p (processed_coalesced_allocno_bitmap,
- ALLOCNO_NUM (conflict_allocno)))
- continue;
- bitmap_set_bit (processed_coalesced_allocno_bitmap,
- ALLOCNO_NUM (conflict_allocno));
- }
- if (ALLOCNO_ASSIGNED_P (conflict_allocno))
- {
- if ((hard_regno = ALLOCNO_HARD_REGNO (conflict_allocno)) >= 0)
- {
+ FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
+ {
+ ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
+ enum reg_class conflict_cover_class;
+
+ /* Reload can give another class so we need to check all
+ allocnos. */
+ if (!retry_p && !bitmap_bit_p (consideration_allocno_bitmap,
+ ALLOCNO_NUM (conflict_a)))
+ continue;
+ conflict_cover_class = ALLOCNO_COVER_CLASS (conflict_a);
+ ira_assert (ira_reg_classes_intersect_p
+ [cover_class][conflict_cover_class]);
+ if (ALLOCNO_ASSIGNED_P (conflict_a))
+ {
+ hard_regno = ALLOCNO_HARD_REGNO (conflict_a);
+ if (hard_regno >= 0
+ && ira_class_hard_reg_index[cover_class][hard_regno] >= 0)
+ {
+ enum machine_mode mode = ALLOCNO_MODE (conflict_a);
+ int conflict_nregs = hard_regno_nregs[hard_regno][mode];
+ int n_objects = ALLOCNO_NUM_OBJECTS (conflict_a);
+
+ if (conflict_nregs == n_objects && conflict_nregs > 1)
+ {
+ int num = OBJECT_SUBWORD (conflict_obj);
+
+ if (WORDS_BIG_ENDIAN)
+ SET_HARD_REG_BIT (conflicting_regs[word],
+ hard_regno + n_objects - num - 1);
+ else
+ SET_HARD_REG_BIT (conflicting_regs[word],
+ hard_regno + num);
+ }
+ else
IOR_HARD_REG_SET
- (conflicting_regs,
- ira_reg_mode_hard_regset
- [hard_regno][ALLOCNO_MODE (conflict_allocno)]);
- if (hard_reg_set_subset_p (reg_class_contents[cover_class],
- conflicting_regs))
- goto fail;
+ (conflicting_regs[word],
+ ira_reg_mode_hard_regset[hard_regno][mode]);
+ if (hard_reg_set_subset_p (reg_class_contents[cover_class],
+ conflicting_regs[word]))
+ goto fail;
+ }
+ }
+ else if (! ALLOCNO_MAY_BE_SPILLED_P (conflict_a))
+ {
+ int k, *conflict_costs;
+
+ ira_allocate_and_copy_costs
+ (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a),
+ conflict_cover_class,
+ ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_a));
+ conflict_costs
+ = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a);
+ if (conflict_costs != NULL)
+ for (j = class_size - 1; j >= 0; j--)
+ {
+ hard_regno = ira_class_hard_regs[cover_class][j];
+ ira_assert (hard_regno >= 0);
+ k = (ira_class_hard_reg_index
+ [conflict_cover_class][hard_regno]);
+ if (k < 0)
+ continue;
+ full_costs[j] -= conflict_costs[k];
}
- continue;
- }
- else if (! ALLOCNO_MAY_BE_SPILLED_P (conflict_allocno))
- {
- ira_allocate_and_copy_costs
- (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_allocno),
- cover_class,
- ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_allocno));
- conflict_costs
- = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_allocno);
- if (conflict_costs != NULL)
- for (j = class_size - 1; j >= 0; j--)
- full_costs[j] -= conflict_costs[j];
- VEC_safe_push (ira_allocno_t, heap, conflict_allocno_vec,
- conflict_allocno);
- }
- }
- if (a == allocno)
- break;
+ queue_update_cost (conflict_a, COST_HOP_DIVISOR);
+ }
+ }
}
/* Take into account preferences of allocnos connected by copies to
the conflict allocnos. */
- update_cost_check++;
- while (VEC_length (ira_allocno_t, conflict_allocno_vec) != 0)
- {
- conflict_allocno = VEC_pop (ira_allocno_t, conflict_allocno_vec);
- update_conflict_hard_regno_costs (full_costs, conflict_allocno,
- COST_HOP_DIVISOR, true);
- }
- update_cost_check++;
+ update_conflict_hard_regno_costs (full_costs, cover_class, true);
+
/* Take preferences of allocnos connected by copies into
account. */
- for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
- a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
- {
- update_conflict_hard_regno_costs (full_costs, a,
- COST_HOP_DIVISOR, false);
- if (a == allocno)
- break;
- }
+ start_update_cost ();
+ queue_update_cost (a, COST_HOP_DIVISOR);
+ update_conflict_hard_regno_costs (full_costs, cover_class, false);
min_cost = min_full_cost = INT_MAX;
+
/* We don't care about giving callee saved registers to allocnos no
living through calls because call clobbered registers are
allocated first (it is usual practice to put them first in
for (i = 0; i < class_size; i++)
{
hard_regno = ira_class_hard_regs[cover_class][i];
+ nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
#ifdef STACK_REGS
if (no_stack_reg_p
&& FIRST_STACK_REG <= hard_regno && hard_regno <= LAST_STACK_REG)
continue;
#endif
- if (! ira_hard_reg_not_in_set_p (hard_regno, mode, conflicting_regs)
- || TEST_HARD_REG_BIT (prohibited_class_mode_regs[cover_class][mode],
- hard_regno))
+ if (TEST_HARD_REG_BIT (prohibited_class_mode_regs[cover_class][mode],
+ hard_regno))
+ continue;
+ for (j = 0; j < nregs; j++)
+ {
+ int k;
+ int set_to_test_start = 0, set_to_test_end = nwords;
+ if (nregs == nwords)
+ {
+ if (WORDS_BIG_ENDIAN)
+ set_to_test_start = nwords - j - 1;
+ else
+ set_to_test_start = j;
+ set_to_test_end = set_to_test_start + 1;
+ }
+ for (k = set_to_test_start; k < set_to_test_end; k++)
+ if (TEST_HARD_REG_BIT (conflicting_regs[k], hard_regno + j))
+ break;
+ if (k != set_to_test_end)
+ break;
+ }
+ if (j != nregs)
continue;
cost = costs[i];
full_cost = full_costs[i];
+#ifndef HONOR_REG_ALLOC_ORDER
if (! allocated_hardreg_p[hard_regno]
&& ira_hard_reg_not_in_set_p (hard_regno, mode, call_used_reg_set))
/* We need to save/restore the hard register in
cost += add_cost;
full_cost += add_cost;
}
+#endif
if (min_cost > cost)
min_cost = cost;
if (min_full_cost > full_cost)
best_hard_regno = -1;
}
fail:
- if (best_hard_regno < 0
- && ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno) != allocno)
- {
- for (j = 0, a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
- a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
- {
- sorted_allocnos[j++] = a;
- if (a == allocno)
- break;
- }
- qsort (sorted_allocnos, j, sizeof (ira_allocno_t),
- allocno_cost_compare_func);
- for (i = 0; i < j; i++)
- {
- a = sorted_allocnos[i];
- ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = a;
- ALLOCNO_NEXT_COALESCED_ALLOCNO (a) = a;
- VEC_safe_push (ira_allocno_t, heap, allocno_stack_vec, a);
- if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
- {
- fprintf (ira_dump_file, " Pushing");
- print_coalesced_allocno (a);
- fprintf (ira_dump_file, "\n");
- }
- }
- return false;
- }
if (best_hard_regno >= 0)
allocated_hardreg_p[best_hard_regno] = true;
- for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
- a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
- {
- ALLOCNO_HARD_REGNO (a) = best_hard_regno;
- ALLOCNO_ASSIGNED_P (a) = true;
- if (best_hard_regno >= 0)
- update_copy_costs (a, true);
- ira_assert (ALLOCNO_COVER_CLASS (a) == cover_class);
- /* We don't need updated costs anymore: */
- ira_free_allocno_updated_costs (a);
- if (a == allocno)
- break;
- }
+ ALLOCNO_HARD_REGNO (a) = best_hard_regno;
+ ALLOCNO_ASSIGNED_P (a) = true;
+ if (best_hard_regno >= 0)
+ update_copy_costs (a, true);
+ /* We don't need updated costs anymore: */
+ ira_free_allocno_updated_costs (a);
return best_hard_regno >= 0;
}
of given *cover* class in the uncolorable_bucket. */
static int uncolorable_allocnos_num[N_REG_CLASSES];
+/* Return the current spill priority of allocno A. The less the
+ number, the more preferable the allocno for spilling. */
+static int
+allocno_spill_priority (ira_allocno_t a)
+{
+ return (ALLOCNO_TEMP (a)
+ / (ALLOCNO_LEFT_CONFLICTS_SIZE (a)
+ * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a)][ALLOCNO_MODE (a)]
+ + 1));
+}
+
/* Add ALLOCNO to bucket *BUCKET_PTR. ALLOCNO should be not in a bucket
before the call. */
static void
-add_ira_allocno_to_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
+add_allocno_to_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
{
ira_allocno_t first_allocno;
enum reg_class cover_class;
*bucket_ptr = allocno;
}
-/* The function returns frequency and number of available hard
- registers for allocnos coalesced with ALLOCNO. */
-static void
-get_coalesced_allocnos_attributes (ira_allocno_t allocno, int *freq, int *num)
-{
- ira_allocno_t a;
-
- *freq = 0;
- *num = 0;
- for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
- a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
- {
- *freq += ALLOCNO_FREQ (a);
- *num += ALLOCNO_AVAILABLE_REGS_NUM (a);
- if (a == allocno)
- break;
- }
-}
-
/* Compare two allocnos to define which allocno should be pushed first
into the coloring stack. If the return is a negative number, the
allocno given by the first parameter will be pushed first. In this
if ((diff = (int) ALLOCNO_COVER_CLASS (a2) - ALLOCNO_COVER_CLASS (a1)) != 0)
return diff;
- get_coalesced_allocnos_attributes (a1, &a1_freq, &a1_num);
- get_coalesced_allocnos_attributes (a2, &a2_freq, &a2_num);
+ a1_freq = ALLOCNO_FREQ (a1);
+ a1_num = ALLOCNO_AVAILABLE_REGS_NUM (a1);
+ a2_freq = ALLOCNO_FREQ (a2);
+ a2_num = ALLOCNO_AVAILABLE_REGS_NUM (a2);
if ((diff = a2_num - a1_num) != 0)
return diff;
else if ((diff = a1_freq - a2_freq) != 0)
their priority. ALLOCNO should be not in a bucket before the
call. */
static void
-add_ira_allocno_to_ordered_bucket (ira_allocno_t allocno,
- ira_allocno_t *bucket_ptr)
+add_allocno_to_ordered_bucket (ira_allocno_t allocno,
+ ira_allocno_t *bucket_ptr)
{
ira_allocno_t before, after;
enum reg_class cover_class;
into account. */
#define USE_SPLAY_P(CLASS) (uncolorable_allocnos_num[CLASS] > 4000)
-/* Put ALLOCNO onto the coloring stack without removing it from its
+/* Put allocno A onto the coloring stack without removing it from its
bucket. Pushing allocno to the coloring stack can result in moving
conflicting allocnos from the uncolorable bucket to the colorable
one. */
static void
-push_ira_allocno_to_stack (ira_allocno_t allocno)
+push_allocno_to_stack (ira_allocno_t a)
{
- int conflicts_num, conflict_size, size;
- ira_allocno_t a, conflict_allocno;
+ int size;
enum reg_class cover_class;
- ira_allocno_conflict_iterator aci;
-
- ALLOCNO_IN_GRAPH_P (allocno) = false;
- VEC_safe_push (ira_allocno_t, heap, allocno_stack_vec, allocno);
- cover_class = ALLOCNO_COVER_CLASS (allocno);
+ int i, n = ALLOCNO_NUM_OBJECTS (a);
+
+ ALLOCNO_IN_GRAPH_P (a) = false;
+ VEC_safe_push (ira_allocno_t, heap, allocno_stack_vec, a);
+ cover_class = ALLOCNO_COVER_CLASS (a);
if (cover_class == NO_REGS)
return;
- size = ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)];
- if (allocno_coalesced_p)
- bitmap_clear (processed_coalesced_allocno_bitmap);
- for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
- a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
+ size = ira_reg_class_nregs[cover_class][ALLOCNO_MODE (a)];
+ if (ALLOCNO_NUM_OBJECTS (a) > 1)
{
- FOR_EACH_ALLOCNO_CONFLICT (a, conflict_allocno, aci)
- if (bitmap_bit_p (coloring_allocno_bitmap,
- ALLOCNO_NUM (conflict_allocno)))
- {
- ira_assert (cover_class == ALLOCNO_COVER_CLASS (conflict_allocno));
- if (allocno_coalesced_p)
- {
- if (bitmap_bit_p (processed_coalesced_allocno_bitmap,
- ALLOCNO_NUM (conflict_allocno)))
- continue;
- bitmap_set_bit (processed_coalesced_allocno_bitmap,
- ALLOCNO_NUM (conflict_allocno));
- }
- if (ALLOCNO_IN_GRAPH_P (conflict_allocno)
- && ! ALLOCNO_ASSIGNED_P (conflict_allocno))
- {
- conflicts_num = ALLOCNO_LEFT_CONFLICTS_NUM (conflict_allocno);
- conflict_size
- = (ira_reg_class_nregs
- [cover_class][ALLOCNO_MODE (conflict_allocno)]);
- ira_assert
- (ALLOCNO_LEFT_CONFLICTS_NUM (conflict_allocno) >= size);
- if (conflicts_num + conflict_size
- <= ALLOCNO_AVAILABLE_REGS_NUM (conflict_allocno))
- {
- ALLOCNO_LEFT_CONFLICTS_NUM (conflict_allocno) -= size;
- continue;
- }
- conflicts_num
- = ALLOCNO_LEFT_CONFLICTS_NUM (conflict_allocno) - size;
- if (uncolorable_allocnos_splay_tree[cover_class] != NULL
- && !ALLOCNO_SPLAY_REMOVED_P (conflict_allocno)
- && USE_SPLAY_P (cover_class))
- {
- ira_assert
- (splay_tree_lookup
- (uncolorable_allocnos_splay_tree[cover_class],
- (splay_tree_key) conflict_allocno) != NULL);
- splay_tree_remove
- (uncolorable_allocnos_splay_tree[cover_class],
- (splay_tree_key) conflict_allocno);
- ALLOCNO_SPLAY_REMOVED_P (conflict_allocno) = true;
- VEC_safe_push (ira_allocno_t, heap,
- removed_splay_allocno_vec,
- conflict_allocno);
- }
- ALLOCNO_LEFT_CONFLICTS_NUM (conflict_allocno) = conflicts_num;
- if (conflicts_num + conflict_size
- <= ALLOCNO_AVAILABLE_REGS_NUM (conflict_allocno))
- {
- delete_allocno_from_bucket (conflict_allocno,
- &uncolorable_allocno_bucket);
- add_ira_allocno_to_ordered_bucket (conflict_allocno,
- &colorable_allocno_bucket);
- }
- }
- }
- if (a == allocno)
- break;
+ /* We will deal with the subwords individually. */
+ gcc_assert (size == ALLOCNO_NUM_OBJECTS (a));
+ size = 1;
+ }
+ for (i = 0; i < n; i++)
+ {
+ ira_object_t obj = ALLOCNO_OBJECT (a, i);
+ int conflict_size;
+ ira_object_t conflict_obj;
+ ira_object_conflict_iterator oci;
+
+ FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
+ {
+ ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
+ int left_conflicts_size;
+
+ conflict_a = conflict_a;
+ if (!bitmap_bit_p (coloring_allocno_bitmap,
+ ALLOCNO_NUM (conflict_a)))
+ continue;
+
+ ira_assert (cover_class
+ == ALLOCNO_COVER_CLASS (conflict_a));
+ if (!ALLOCNO_IN_GRAPH_P (conflict_a)
+ || ALLOCNO_ASSIGNED_P (conflict_a))
+ continue;
+
+ left_conflicts_size = ALLOCNO_LEFT_CONFLICTS_SIZE (conflict_a);
+ conflict_size
+ = (ira_reg_class_nregs
+ [cover_class][ALLOCNO_MODE (conflict_a)]);
+ ira_assert (left_conflicts_size >= size);
+ if (left_conflicts_size + conflict_size
+ <= ALLOCNO_AVAILABLE_REGS_NUM (conflict_a))
+ {
+ ALLOCNO_LEFT_CONFLICTS_SIZE (conflict_a) -= size;
+ continue;
+ }
+ left_conflicts_size -= size;
+ if (uncolorable_allocnos_splay_tree[cover_class] != NULL
+ && !ALLOCNO_SPLAY_REMOVED_P (conflict_a)
+ && USE_SPLAY_P (cover_class))
+ {
+ ira_assert
+ (splay_tree_lookup
+ (uncolorable_allocnos_splay_tree[cover_class],
+ (splay_tree_key) conflict_a) != NULL);
+ splay_tree_remove
+ (uncolorable_allocnos_splay_tree[cover_class],
+ (splay_tree_key) conflict_a);
+ ALLOCNO_SPLAY_REMOVED_P (conflict_a) = true;
+ VEC_safe_push (ira_allocno_t, heap,
+ removed_splay_allocno_vec,
+ conflict_a);
+ }
+ ALLOCNO_LEFT_CONFLICTS_SIZE (conflict_a)
+ = left_conflicts_size;
+ if (left_conflicts_size + conflict_size
+ <= ALLOCNO_AVAILABLE_REGS_NUM (conflict_a))
+ {
+ delete_allocno_from_bucket
+ (conflict_a, &uncolorable_allocno_bucket);
+ add_allocno_to_ordered_bucket
+ (conflict_a, &colorable_allocno_bucket);
+ }
+ }
}
}
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
{
fprintf (ira_dump_file, " Pushing");
- print_coalesced_allocno (allocno);
- fprintf (ira_dump_file, "%s\n", colorable_p ? "" : "(potential spill)");
+ ira_print_expanded_allocno (allocno);
+ if (colorable_p)
+ fprintf (ira_dump_file, "\n");
+ else
+ fprintf (ira_dump_file, "(potential spill: %spri=%d, cost=%d)\n",
+ ALLOCNO_BAD_SPILL_P (allocno) ? "bad spill, " : "",
+ allocno_spill_priority (allocno), ALLOCNO_TEMP (allocno));
}
cover_class = ALLOCNO_COVER_CLASS (allocno);
ira_assert ((colorable_p
- && (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
+ && (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
+ ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)]
<= ALLOCNO_AVAILABLE_REGS_NUM (allocno)))
|| (! colorable_p
- && (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
+ && (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
+ ira_reg_class_nregs[cover_class][ALLOCNO_MODE
(allocno)]
> ALLOCNO_AVAILABLE_REGS_NUM (allocno))));
if (! colorable_p)
ALLOCNO_MAY_BE_SPILLED_P (allocno) = true;
- push_ira_allocno_to_stack (allocno);
+ push_allocno_to_stack (allocno);
}
/* Put all allocnos from colorable bucket onto the coloring stack. */
/* Puts ALLOCNO chosen for potential spilling onto the coloring
stack. */
static void
-push_ira_allocno_to_spill (ira_allocno_t allocno)
+push_allocno_to_spill (ira_allocno_t allocno)
{
delete_allocno_from_bucket (allocno, &uncolorable_allocno_bucket);
ALLOCNO_MAY_BE_SPILLED_P (allocno) = true;
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
- fprintf (ira_dump_file, " Pushing p%d(%d) (potential spill)\n",
+ fprintf (ira_dump_file, " Pushing p%d(%d) (spill for NO_REGS)\n",
ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno));
- push_ira_allocno_to_stack (allocno);
+ push_allocno_to_stack (allocno);
}
/* Return the frequency of exit edges (if EXIT_P) or entry from/to the
- loop given by its LOOP_NODE. */
+ loop given by its LOOP_NODE. */
int
ira_loop_edge_freq (ira_loop_tree_node_t loop_node, int regno, bool exit_p)
{
else
{
edges = get_loop_exit_edges (loop_node->loop);
- for (i = 0; VEC_iterate (edge, edges, i, e); i++)
+ FOR_EACH_VEC_ELT (edge, edges, i, e)
if (regno < 0
|| (bitmap_bit_p (DF_LR_OUT (e->src), regno)
&& bitmap_bit_p (DF_LR_IN (e->dest), regno)))
ira_loop_tree_node_t parent_node, loop_node;
regno = ALLOCNO_REGNO (a);
- cost = ALLOCNO_UPDATED_MEMORY_COST (a) - ALLOCNO_COVER_CLASS_COST (a);
+ cost = ALLOCNO_UPDATED_MEMORY_COST (a) - ALLOCNO_UPDATED_COVER_CLASS_COST (a);
if (ALLOCNO_CAP (a) != NULL)
return cost;
loop_node = ALLOCNO_LOOP_TREE_NODE (a);
* ira_loop_edge_freq (loop_node, regno, true)
+ ira_memory_move_cost[mode][rclass][0]
* ira_loop_edge_freq (loop_node, regno, false))
- - (ira_register_move_cost[mode][rclass][rclass]
+ - (ira_get_register_move_cost (mode, rclass, rclass)
* (ira_loop_edge_freq (loop_node, regno, false)
+ ira_loop_edge_freq (loop_node, regno, true))));
return cost;
{
int pri1, pri2, diff;
ira_allocno_t a1 = (ira_allocno_t) k1, a2 = (ira_allocno_t) k2;
-
- pri1 = (IRA_ALLOCNO_TEMP (a1)
- / (ALLOCNO_LEFT_CONFLICTS_NUM (a1)
+
+ pri1 = (ALLOCNO_TEMP (a1)
+ / (ALLOCNO_LEFT_CONFLICTS_SIZE (a1)
* ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a1)][ALLOCNO_MODE (a1)]
+ 1));
- pri2 = (IRA_ALLOCNO_TEMP (a2)
- / (ALLOCNO_LEFT_CONFLICTS_NUM (a2)
+ pri2 = (ALLOCNO_TEMP (a2)
+ / (ALLOCNO_LEFT_CONFLICTS_SIZE (a2)
* ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a2)][ALLOCNO_MODE (a2)]
+ 1));
if ((diff = pri1 - pri2) != 0)
return diff;
- if ((diff = IRA_ALLOCNO_TEMP (a1) - IRA_ALLOCNO_TEMP (a2)) != 0)
+ if ((diff = ALLOCNO_TEMP (a1) - ALLOCNO_TEMP (a2)) != 0)
return diff;
return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
}
static void
push_allocnos_to_stack (void)
{
- ira_allocno_t allocno, a, i_allocno, *allocno_vec;
+ ira_allocno_t allocno, i_allocno, *allocno_vec;
enum reg_class cover_class, rclass;
int allocno_pri, i_allocno_pri, allocno_cost, i_allocno_cost;
int i, j, num, cover_class_allocnos_num[N_REG_CLASSES];
if ((cover_class = ALLOCNO_COVER_CLASS (allocno)) != NO_REGS)
{
cover_class_allocnos_num[cover_class]++;
- cost = 0;
- for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
- a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
- {
- cost += calculate_allocno_spill_cost (a);
- if (a == allocno)
- break;
- }
- /* ??? Remove cost of copies between the coalesced
- allocnos. */
- IRA_ALLOCNO_TEMP (allocno) = cost;
+ cost = calculate_allocno_spill_cost (allocno);
+ ALLOCNO_TEMP (allocno) = cost;
}
/* Define place where to put uncolorable allocnos of the same cover
class. */
cover_class = ALLOCNO_COVER_CLASS (allocno);
if (cover_class == NO_REGS)
{
- push_ira_allocno_to_spill (allocno);
+ push_allocno_to_spill (allocno);
continue;
}
/* Potential spilling. */
allocno = VEC_pop (ira_allocno_t, removed_splay_allocno_vec);
ALLOCNO_SPLAY_REMOVED_P (allocno) = false;
rclass = ALLOCNO_COVER_CLASS (allocno);
- if (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
+ if (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
+ ira_reg_class_nregs [rclass][ALLOCNO_MODE (allocno)]
> ALLOCNO_AVAILABLE_REGS_NUM (allocno))
splay_tree_insert
if (ALLOCNO_IN_GRAPH_P (i_allocno))
{
i++;
- if (IRA_ALLOCNO_TEMP (i_allocno) == INT_MAX)
- {
- ira_allocno_t a;
- int cost = 0;
-
- for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (i_allocno);;
- a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
- {
- cost += calculate_allocno_spill_cost (i_allocno);
- if (a == i_allocno)
- break;
- }
- /* ??? Remove cost of copies between the coalesced
- allocnos. */
- IRA_ALLOCNO_TEMP (i_allocno) = cost;
- }
- i_allocno_cost = IRA_ALLOCNO_TEMP (i_allocno);
- i_allocno_pri
- = (i_allocno_cost
- / (ALLOCNO_LEFT_CONFLICTS_NUM (i_allocno)
- * ira_reg_class_nregs[ALLOCNO_COVER_CLASS
- (i_allocno)]
- [ALLOCNO_MODE (i_allocno)] + 1));
- if (allocno == NULL || allocno_pri > i_allocno_pri
- || (allocno_pri == i_allocno_pri
- && (allocno_cost > i_allocno_cost
- || (allocno_cost == i_allocno_cost
- && (ALLOCNO_NUM (allocno)
- > ALLOCNO_NUM (i_allocno))))))
+ ira_assert (ALLOCNO_TEMP (i_allocno) != INT_MAX);
+ i_allocno_cost = ALLOCNO_TEMP (i_allocno);
+ i_allocno_pri = allocno_spill_priority (i_allocno);
+ if (allocno == NULL
+ || (! ALLOCNO_BAD_SPILL_P (i_allocno)
+ && ALLOCNO_BAD_SPILL_P (allocno))
+ || (! (ALLOCNO_BAD_SPILL_P (i_allocno)
+ && ! ALLOCNO_BAD_SPILL_P (allocno))
+ && (allocno_pri > i_allocno_pri
+ || (allocno_pri == i_allocno_pri
+ && (allocno_cost > i_allocno_cost
+ || (allocno_cost == i_allocno_cost
+ && (ALLOCNO_NUM (allocno)
+ > ALLOCNO_NUM (i_allocno))))))))
{
allocno = i_allocno;
allocno_cost = i_allocno_cost;
}
ira_assert (ALLOCNO_IN_GRAPH_P (allocno)
&& ALLOCNO_COVER_CLASS (allocno) == cover_class
- && (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
+ && (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
+ ira_reg_class_nregs[cover_class][ALLOCNO_MODE
(allocno)]
> ALLOCNO_AVAILABLE_REGS_NUM (allocno)));
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
{
fprintf (ira_dump_file, " Popping");
- print_coalesced_allocno (allocno);
+ ira_print_expanded_allocno (allocno);
fprintf (ira_dump_file, " -- ");
}
if (cover_class == NO_REGS)
}
}
-/* Set up number of available hard registers for ALLOCNO. */
+/* Loop over all subobjects of allocno A, collecting total hard
+ register conflicts in PSET (which the caller must initialize). */
static void
-setup_allocno_available_regs_num (ira_allocno_t allocno)
+all_conflicting_hard_regs (ira_allocno_t a, HARD_REG_SET *pset)
{
- int i, n, hard_regs_num;
+ int i;
+ int n = ALLOCNO_NUM_OBJECTS (a);
+
+ for (i = 0; i < n; i++)
+ {
+ ira_object_t obj = ALLOCNO_OBJECT (a, i);
+
+ IOR_HARD_REG_SET (*pset, OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
+ }
+}
+
+/* Set up number of available hard registers for allocno A. */
+static void
+setup_allocno_available_regs_num (ira_allocno_t a)
+{
+ int i, n, hard_regs_num, hard_regno;
+ enum machine_mode mode;
enum reg_class cover_class;
- ira_allocno_t a;
HARD_REG_SET temp_set;
- cover_class = ALLOCNO_COVER_CLASS (allocno);
- ALLOCNO_AVAILABLE_REGS_NUM (allocno) = ira_available_class_regs[cover_class];
+ cover_class = ALLOCNO_COVER_CLASS (a);
+ ALLOCNO_AVAILABLE_REGS_NUM (a) = ira_available_class_regs[cover_class];
if (cover_class == NO_REGS)
return;
CLEAR_HARD_REG_SET (temp_set);
- ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) == allocno);
+ ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a);
hard_regs_num = ira_class_hard_regs_num[cover_class];
- for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
- a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
+ all_conflicting_hard_regs (a, &temp_set);
+
+ mode = ALLOCNO_MODE (a);
+ for (n = 0, i = hard_regs_num - 1; i >= 0; i--)
{
- IOR_HARD_REG_SET (temp_set, ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
- if (a == allocno)
- break;
+ hard_regno = ira_class_hard_regs[cover_class][i];
+ if (TEST_HARD_REG_BIT (temp_set, hard_regno)
+ || TEST_HARD_REG_BIT (prohibited_class_mode_regs[cover_class][mode],
+ hard_regno))
+ n++;
}
- for (n = 0, i = hard_regs_num - 1; i >= 0; i--)
- if (TEST_HARD_REG_BIT (temp_set, ira_class_hard_regs[cover_class][i]))
- n++;
if (internal_flag_ira_verbose > 2 && n > 0 && ira_dump_file != NULL)
fprintf (ira_dump_file, " Reg %d of %s has %d regs less\n",
- ALLOCNO_REGNO (allocno), reg_class_names[cover_class], n);
- ALLOCNO_AVAILABLE_REGS_NUM (allocno) -= n;
+ ALLOCNO_REGNO (a), reg_class_names[cover_class], n);
+ ALLOCNO_AVAILABLE_REGS_NUM (a) -= n;
}
-/* Set up ALLOCNO_LEFT_CONFLICTS_NUM for ALLOCNO. */
+/* Set up ALLOCNO_LEFT_CONFLICTS_SIZE for allocno A. */
static void
-setup_allocno_left_conflicts_num (ira_allocno_t allocno)
+setup_allocno_left_conflicts_size (ira_allocno_t a)
{
int i, hard_regs_num, hard_regno, conflict_allocnos_size;
- ira_allocno_t a, conflict_allocno;
enum reg_class cover_class;
HARD_REG_SET temp_set;
- ira_allocno_conflict_iterator aci;
- cover_class = ALLOCNO_COVER_CLASS (allocno);
+ cover_class = ALLOCNO_COVER_CLASS (a);
hard_regs_num = ira_class_hard_regs_num[cover_class];
CLEAR_HARD_REG_SET (temp_set);
- ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) == allocno);
- for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
- a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
- {
- IOR_HARD_REG_SET (temp_set, ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
- if (a == allocno)
- break;
- }
+ ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a);
+ all_conflicting_hard_regs (a, &temp_set);
+
AND_HARD_REG_SET (temp_set, reg_class_contents[cover_class]);
AND_COMPL_HARD_REG_SET (temp_set, ira_no_alloc_regs);
+
conflict_allocnos_size = 0;
if (! hard_reg_set_empty_p (temp_set))
for (i = 0; i < (int) hard_regs_num; i++)
}
}
CLEAR_HARD_REG_SET (temp_set);
- if (allocno_coalesced_p)
- bitmap_clear (processed_coalesced_allocno_bitmap);
if (cover_class != NO_REGS)
- for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
- a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
- {
- FOR_EACH_ALLOCNO_CONFLICT (a, conflict_allocno, aci)
- if (bitmap_bit_p (consideration_allocno_bitmap,
- ALLOCNO_NUM (conflict_allocno)))
+ {
+ int n = ALLOCNO_NUM_OBJECTS (a);
+
+ for (i = 0; i < n; i++)
+ {
+ ira_object_t obj = ALLOCNO_OBJECT (a, i);
+ ira_object_t conflict_obj;
+ ira_object_conflict_iterator oci;
+
+ FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
{
+ ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
+
ira_assert (cover_class
- == ALLOCNO_COVER_CLASS (conflict_allocno));
- if (allocno_coalesced_p)
- {
- if (bitmap_bit_p (processed_coalesced_allocno_bitmap,
- ALLOCNO_NUM (conflict_allocno)))
- continue;
- bitmap_set_bit (processed_coalesced_allocno_bitmap,
- ALLOCNO_NUM (conflict_allocno));
- }
- if (! ALLOCNO_ASSIGNED_P (conflict_allocno))
+ == ALLOCNO_COVER_CLASS (conflict_a));
+ if (! ALLOCNO_ASSIGNED_P (conflict_a))
conflict_allocnos_size
+= (ira_reg_class_nregs
- [cover_class][ALLOCNO_MODE (conflict_allocno)]);
- else if ((hard_regno = ALLOCNO_HARD_REGNO (conflict_allocno))
+ [cover_class][ALLOCNO_MODE (conflict_a)]);
+ else if ((hard_regno = ALLOCNO_HARD_REGNO (conflict_a))
>= 0)
{
int last = (hard_regno
+ hard_regno_nregs
- [hard_regno][ALLOCNO_MODE (conflict_allocno)]);
+ [hard_regno][ALLOCNO_MODE (conflict_a)]);
while (hard_regno < last)
{
}
}
}
- if (a == allocno)
- break;
- }
- ALLOCNO_LEFT_CONFLICTS_NUM (allocno) = conflict_allocnos_size;
+ }
+ }
+ ALLOCNO_LEFT_CONFLICTS_SIZE (a) = conflict_allocnos_size;
}
/* Put ALLOCNO in a bucket corresponding to its number and size of its
static void
put_allocno_into_bucket (ira_allocno_t allocno)
{
- int hard_regs_num;
enum reg_class cover_class;
cover_class = ALLOCNO_COVER_CLASS (allocno);
- hard_regs_num = ira_class_hard_regs_num[cover_class];
- if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno)
- return;
ALLOCNO_IN_GRAPH_P (allocno) = true;
- setup_allocno_left_conflicts_num (allocno);
+ setup_allocno_left_conflicts_size (allocno);
setup_allocno_available_regs_num (allocno);
- if (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
+ if (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
+ ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)]
<= ALLOCNO_AVAILABLE_REGS_NUM (allocno))
- add_ira_allocno_to_bucket (allocno, &colorable_allocno_bucket);
+ add_allocno_to_bucket (allocno, &colorable_allocno_bucket);
else
- add_ira_allocno_to_bucket (allocno, &uncolorable_allocno_bucket);
+ add_allocno_to_bucket (allocno, &uncolorable_allocno_bucket);
}
-/* The function is used to sort allocnos according to their execution
- frequencies. */
-static int
-copy_freq_compare_func (const void *v1p, const void *v2p)
-{
- ira_copy_t cp1 = *(const ira_copy_t *) v1p, cp2 = *(const ira_copy_t *) v2p;
- int pri1, pri2;
-
- pri1 = cp1->freq;
- pri2 = cp2->freq;
- if (pri2 - pri1)
- return pri2 - pri1;
-
- /* If freqencies are equal, sort by copies, so that the results of
- qsort leave nothing to chance. */
- return cp1->num - cp2->num;
-}
+/* Map: allocno number -> allocno priority. */
+static int *allocno_priorities;
-/* Merge two sets of coalesced allocnos given correspondingly by
- allocnos A1 and A2 (more accurately merging A2 set into A1
- set). */
+/* Set up priorities for N allocnos in array
+ CONSIDERATION_ALLOCNOS. */
static void
-merge_allocnos (ira_allocno_t a1, ira_allocno_t a2)
+setup_allocno_priorities (ira_allocno_t *consideration_allocnos, int n)
{
- ira_allocno_t a, first, last, next;
+ int i, length, nrefs, priority, max_priority, mult;
+ ira_allocno_t a;
- first = ALLOCNO_FIRST_COALESCED_ALLOCNO (a1);
- if (first == ALLOCNO_FIRST_COALESCED_ALLOCNO (a2))
- return;
- for (last = a2, a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a2);;
- a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
+ max_priority = 0;
+ for (i = 0; i < n; i++)
{
- ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = first;
- if (a == a2)
- break;
- last = a;
+ a = consideration_allocnos[i];
+ nrefs = ALLOCNO_NREFS (a);
+ ira_assert (nrefs >= 0);
+ mult = floor_log2 (ALLOCNO_NREFS (a)) + 1;
+ ira_assert (mult >= 0);
+ allocno_priorities[ALLOCNO_NUM (a)]
+ = priority
+ = (mult
+ * (ALLOCNO_MEMORY_COST (a) - ALLOCNO_COVER_CLASS_COST (a))
+ * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a)][ALLOCNO_MODE (a)]);
+ if (priority < 0)
+ priority = -priority;
+ if (max_priority < priority)
+ max_priority = priority;
+ }
+ mult = max_priority == 0 ? 1 : INT_MAX / max_priority;
+ for (i = 0; i < n; i++)
+ {
+ a = consideration_allocnos[i];
+ length = ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
+ if (ALLOCNO_NUM_OBJECTS (a) > 1)
+ length /= ALLOCNO_NUM_OBJECTS (a);
+ if (length <= 0)
+ length = 1;
+ allocno_priorities[ALLOCNO_NUM (a)]
+ = allocno_priorities[ALLOCNO_NUM (a)] * mult / length;
}
- next = ALLOCNO_NEXT_COALESCED_ALLOCNO (first);
- ALLOCNO_NEXT_COALESCED_ALLOCNO (first) = a2;
- ALLOCNO_NEXT_COALESCED_ALLOCNO (last) = next;
}
-/* Return TRUE if there are conflicting allocnos from two sets of
- coalesced allocnos given correspondingly by allocnos A1 and A2. If
- RELOAD_P is TRUE, we use live ranges to find conflicts because
- conflicts are represented only for allocnos of the same cover class
- and during the reload pass we coalesce allocnos for sharing stack
- memory slots. */
-static bool
-coalesced_allocno_conflict_p (ira_allocno_t a1, ira_allocno_t a2,
- bool reload_p)
+/* Sort allocnos according to their priorities which are calculated
+ analogous to ones in file `global.c'. */
+static int
+allocno_priority_compare_func (const void *v1p, const void *v2p)
{
- ira_allocno_t a, conflict_allocno;
- ira_allocno_conflict_iterator aci;
+ ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
+ ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
+ int pri1, pri2;
- if (allocno_coalesced_p)
- {
- bitmap_clear (processed_coalesced_allocno_bitmap);
- for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a1);;
- a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
- {
- bitmap_set_bit (processed_coalesced_allocno_bitmap, ALLOCNO_NUM (a));
- if (a == a1)
- break;
- }
- }
- for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a2);;
- a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
- {
- if (reload_p)
- {
- for (conflict_allocno = ALLOCNO_NEXT_COALESCED_ALLOCNO (a1);;
- conflict_allocno
- = ALLOCNO_NEXT_COALESCED_ALLOCNO (conflict_allocno))
- {
- if (ira_allocno_live_ranges_intersect_p (a, conflict_allocno))
- return true;
- if (conflict_allocno == a1)
- break;
- }
- }
- else
- {
- FOR_EACH_ALLOCNO_CONFLICT (a, conflict_allocno, aci)
- if (conflict_allocno == a1
- || (allocno_coalesced_p
- && bitmap_bit_p (processed_coalesced_allocno_bitmap,
- ALLOCNO_NUM (conflict_allocno))))
- return true;
- }
- if (a == a2)
- break;
- }
- return false;
+ pri1 = allocno_priorities[ALLOCNO_NUM (a1)];
+ pri2 = allocno_priorities[ALLOCNO_NUM (a2)];
+ if (pri2 != pri1)
+ return SORTGT (pri2, pri1);
+
+ /* If regs are equally good, sort by allocnos, so that the results of
+ qsort leave nothing to chance. */
+ return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
}
-/* The major function for aggressive allocno coalescing. For the
- reload pass (RELOAD_P) we coalesce only spilled allocnos. If some
- allocnos have been coalesced, we set up flag
- allocno_coalesced_p. */
+/* Chaitin-Briggs coloring for allocnos in COLORING_ALLOCNO_BITMAP
+ taking into account allocnos in CONSIDERATION_ALLOCNO_BITMAP. */
static void
-coalesce_allocnos (bool reload_p)
+color_allocnos (void)
{
- ira_allocno_t a;
- ira_copy_t cp, next_cp, *sorted_copies;
- enum reg_class cover_class;
- enum machine_mode mode;
- unsigned int j;
- int i, n, cp_num, regno;
+ unsigned int i, n;
bitmap_iterator bi;
+ ira_allocno_t a;
- sorted_copies = (ira_copy_t *) ira_allocate (ira_copies_num
- * sizeof (ira_copy_t));
- cp_num = 0;
- /* Collect copies. */
- EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, j, bi)
+ if (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
{
- a = ira_allocnos[j];
- regno = ALLOCNO_REGNO (a);
- if ((! reload_p && ALLOCNO_ASSIGNED_P (a))
- || (reload_p
- && (! ALLOCNO_ASSIGNED_P (a) || ALLOCNO_HARD_REGNO (a) >= 0
- || (regno < ira_reg_equiv_len
- && (ira_reg_equiv_const[regno] != NULL_RTX
- || ira_reg_equiv_invariant_p[regno])))))
- continue;
- cover_class = ALLOCNO_COVER_CLASS (a);
- mode = ALLOCNO_MODE (a);
- for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
+ n = 0;
+ EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
{
- if (cp->first == a)
+ a = ira_allocnos[i];
+ if (ALLOCNO_COVER_CLASS (a) == NO_REGS)
{
- next_cp = cp->next_first_allocno_copy;
- regno = ALLOCNO_REGNO (cp->second);
- if ((reload_p
- || (ALLOCNO_COVER_CLASS (cp->second) == cover_class
- && ALLOCNO_MODE (cp->second) == mode))
- && cp->insn != NULL
- && ((! reload_p && ! ALLOCNO_ASSIGNED_P (cp->second))
- || (reload_p
- && ALLOCNO_ASSIGNED_P (cp->second)
- && ALLOCNO_HARD_REGNO (cp->second) < 0
- && (regno >= ira_reg_equiv_len
- || (! ira_reg_equiv_invariant_p[regno]
- && ira_reg_equiv_const[regno] == NULL_RTX)))))
- sorted_copies[cp_num++] = cp;
+ ALLOCNO_HARD_REGNO (a) = -1;
+ ALLOCNO_ASSIGNED_P (a) = true;
+ ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
+ ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
+ if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
+ {
+ fprintf (ira_dump_file, " Spill");
+ ira_print_expanded_allocno (a);
+ fprintf (ira_dump_file, "\n");
+ }
+ continue;
}
- else if (cp->second == a)
- next_cp = cp->next_second_allocno_copy;
- else
- gcc_unreachable ();
+ sorted_allocnos[n++] = a;
}
- }
- qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
- /* Coalesced copies, most frequently executed first. */
- for (; cp_num != 0;)
- {
- for (i = 0; i < cp_num; i++)
+ if (n != 0)
{
- cp = sorted_copies[i];
- if (! coalesced_allocno_conflict_p (cp->first, cp->second, reload_p))
+ setup_allocno_priorities (sorted_allocnos, n);
+ qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
+ allocno_priority_compare_func);
+ for (i = 0; i < n; i++)
{
- allocno_coalesced_p = true;
+ a = sorted_allocnos[i];
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
- fprintf
- (ira_dump_file,
- " Coalescing copy %d:a%dr%d-a%dr%d (freq=%d)\n",
- cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
- ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
- cp->freq);
- merge_allocnos (cp->first, cp->second);
- i++;
- break;
+ {
+ fprintf (ira_dump_file, " ");
+ ira_print_expanded_allocno (a);
+ fprintf (ira_dump_file, " -- ");
+ }
+ if (assign_hard_reg (a, false))
+ {
+ if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
+ fprintf (ira_dump_file, "assign hard reg %d\n",
+ ALLOCNO_HARD_REGNO (a));
+ }
+ else
+ {
+ if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
+ fprintf (ira_dump_file, "assign memory\n");
+ }
}
}
- /* Collect the rest of copies. */
- for (n = 0; i < cp_num; i++)
- {
- cp = sorted_copies[i];
- if (ALLOCNO_FIRST_COALESCED_ALLOCNO (cp->first)
- != ALLOCNO_FIRST_COALESCED_ALLOCNO (cp->second))
- sorted_copies[n++] = cp;
- }
- cp_num = n;
}
- ira_free (sorted_copies);
-}
-
-/* Chaitin-Briggs coloring for allocnos in COLORING_ALLOCNO_BITMAP
- taking into account allocnos in CONSIDERATION_ALLOCNO_BITMAP. */
-static void
-color_allocnos (void)
-{
- unsigned int i;
- bitmap_iterator bi;
- ira_allocno_t a;
-
- allocno_coalesced_p = false;
- processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
- if (flag_ira_coalesce)
- coalesce_allocnos (false);
- /* Put the allocnos into the corresponding buckets. */
- colorable_allocno_bucket = NULL;
- uncolorable_allocno_bucket = NULL;
- EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
- {
- a = ira_allocnos[i];
- if (ALLOCNO_COVER_CLASS (a) == NO_REGS)
+ else
+ {
+ /* Put the allocnos into the corresponding buckets. */
+ colorable_allocno_bucket = NULL;
+ uncolorable_allocno_bucket = NULL;
+ EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
{
- ALLOCNO_HARD_REGNO (a) = -1;
- ALLOCNO_ASSIGNED_P (a) = true;
- ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
- ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
- if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
+ a = ira_allocnos[i];
+ if (ALLOCNO_COVER_CLASS (a) == NO_REGS)
{
- fprintf (ira_dump_file, " Spill");
- print_coalesced_allocno (a);
- fprintf (ira_dump_file, "\n");
+ ALLOCNO_HARD_REGNO (a) = -1;
+ ALLOCNO_ASSIGNED_P (a) = true;
+ ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
+ ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
+ if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
+ {
+ fprintf (ira_dump_file, " Spill");
+ ira_print_expanded_allocno (a);
+ fprintf (ira_dump_file, "\n");
+ }
+ continue;
}
- continue;
+ put_allocno_into_bucket (a);
}
- put_allocno_into_bucket (a);
+ push_allocnos_to_stack ();
+ pop_allocnos_from_stack ();
}
- push_allocnos_to_stack ();
- pop_allocnos_from_stack ();
- if (flag_ira_coalesce)
- /* We don't need coalesced allocnos for ira_reassign_pseudos. */
- EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
- {
- a = ira_allocnos[i];
- ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = a;
- ALLOCNO_NEXT_COALESCED_ALLOCNO (a) = a;
- }
- ira_free_bitmap (processed_coalesced_allocno_bitmap);
- allocno_coalesced_p = false;
}
\f
{
unsigned int j;
bitmap_iterator bi;
+ ira_loop_tree_node_t subloop_node, dest_loop_node;
+ edge e;
+ edge_iterator ei;
ira_assert (loop_tree_node->loop != NULL);
fprintf (ira_dump_file,
- "\n Loop %d (parent %d, header bb%d, depth %d)\n all:",
+ "\n Loop %d (parent %d, header bb%d, depth %d)\n bbs:",
loop_tree_node->loop->num,
(loop_tree_node->parent == NULL
? -1 : loop_tree_node->parent->loop->num),
loop_tree_node->loop->header->index,
loop_depth (loop_tree_node->loop));
+ for (subloop_node = loop_tree_node->children;
+ subloop_node != NULL;
+ subloop_node = subloop_node->next)
+ if (subloop_node->bb != NULL)
+ {
+ fprintf (ira_dump_file, " %d", subloop_node->bb->index);
+ FOR_EACH_EDGE (e, ei, subloop_node->bb->succs)
+ if (e->dest != EXIT_BLOCK_PTR
+ && ((dest_loop_node = IRA_BB_NODE (e->dest)->parent)
+ != loop_tree_node))
+ fprintf (ira_dump_file, "(->%d:l%d)",
+ e->dest->index, dest_loop_node->loop->num);
+ }
+ fprintf (ira_dump_file, "\n all:");
EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
fprintf (ira_dump_file, "\n modified regnos:");
for (j = 0; (int) j < ira_reg_class_cover_size; j++)
{
enum reg_class cover_class;
-
+
cover_class = ira_reg_class_cover[j];
if (loop_tree_node->reg_pressure[cover_class] == 0)
continue;
EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
{
a = ira_allocnos[j];
- if (! ALLOCNO_ASSIGNED_P (a))
- continue;
- bitmap_clear_bit (coloring_allocno_bitmap, ALLOCNO_NUM (a));
+ if (ALLOCNO_ASSIGNED_P (a))
+ bitmap_clear_bit (coloring_allocno_bitmap, ALLOCNO_NUM (a));
}
/* Color all mentioned allocnos including transparent ones. */
color_allocnos ();
/* Process caps. They are processed just once. */
- if (flag_ira_algorithm == IRA_ALGORITHM_MIXED
- || flag_ira_algorithm == IRA_ALGORITHM_REGIONAL)
+ if (flag_ira_region == IRA_REGION_MIXED
+ || flag_ira_region == IRA_REGION_ALL)
EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
{
a = ira_allocnos[j];
/* Remove from processing in the next loop. */
bitmap_clear_bit (consideration_allocno_bitmap, j);
rclass = ALLOCNO_COVER_CLASS (a);
- if ((flag_ira_algorithm == IRA_ALGORITHM_MIXED
- && loop_tree_node->reg_pressure[rclass]
- <= ira_available_class_regs[rclass]))
+ if (flag_ira_region == IRA_REGION_MIXED
+ && (loop_tree_node->reg_pressure[rclass]
+ <= ira_available_class_regs[rclass]))
{
mode = ALLOCNO_MODE (a);
hard_regno = ALLOCNO_HARD_REGNO (a);
mode = ALLOCNO_MODE (a);
rclass = ALLOCNO_COVER_CLASS (a);
hard_regno = ALLOCNO_HARD_REGNO (a);
+ /* Use hard register class here. ??? */
if (hard_regno >= 0)
{
index = ira_class_hard_reg_index[rclass][hard_regno];
if (subloop_allocno == NULL
|| ALLOCNO_CAP (subloop_allocno) != NULL)
continue;
+ ira_assert (ALLOCNO_COVER_CLASS (subloop_allocno) == rclass);
ira_assert (bitmap_bit_p (subloop_node->all_allocnos,
ALLOCNO_NUM (subloop_allocno)));
- if (flag_ira_algorithm == IRA_ALGORITHM_MIXED
+ if ((flag_ira_region == IRA_REGION_MIXED)
&& (loop_tree_node->reg_pressure[rclass]
<= ira_available_class_regs[rclass]))
{
else
{
cover_class = ALLOCNO_COVER_CLASS (subloop_allocno);
- ira_allocate_and_set_costs
- (&ALLOCNO_HARD_REG_COSTS (subloop_allocno), cover_class,
- ALLOCNO_COVER_CLASS_COST (subloop_allocno));
- ira_allocate_and_set_costs
- (&ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno),
- cover_class, 0);
- cost = (ira_register_move_cost[mode][rclass][rclass]
+ cost = (ira_get_register_move_cost (mode, rclass, rclass)
* (exit_freq + enter_freq));
- ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index] -= cost;
- ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno)[index]
+ ira_allocate_and_set_or_copy_costs
+ (&ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno), cover_class,
+ ALLOCNO_UPDATED_COVER_CLASS_COST (subloop_allocno),
+ ALLOCNO_HARD_REG_COSTS (subloop_allocno));
+ ira_allocate_and_set_or_copy_costs
+ (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno),
+ cover_class, 0,
+ ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno));
+ ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index] -= cost;
+ ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno)[index]
-= cost;
+ if (ALLOCNO_UPDATED_COVER_CLASS_COST (subloop_allocno)
+ > ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index])
+ ALLOCNO_UPDATED_COVER_CLASS_COST (subloop_allocno)
+ = ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index];
ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
+= (ira_memory_move_cost[mode][rclass][0] * enter_freq
+ ira_memory_move_cost[mode][rclass][1] * exit_freq);
- if (ALLOCNO_COVER_CLASS_COST (subloop_allocno)
- > ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index])
- ALLOCNO_COVER_CLASS_COST (subloop_allocno)
- = ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index];
}
}
}
100);
if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
fprintf (ira_dump_file, "\n**** Allocnos coloring:\n\n");
-
+
ira_traverse_loop_tree (false, ira_loop_tree_root, color_pass, NULL);
if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
subloop_allocno = subloop_node->regno_allocno_map[regno];
if (subloop_allocno == NULL)
continue;
+ ira_assert (rclass == ALLOCNO_COVER_CLASS (subloop_allocno));
/* We have accumulated cost. To get the real cost of
allocno usage in the loop we should subtract costs of
the subloop allocnos. */
+= (ira_memory_move_cost[mode][rclass][0] * exit_freq
+ ira_memory_move_cost[mode][rclass][1] * enter_freq);
if (hard_regno2 != hard_regno)
- cost -= (ira_register_move_cost[mode][rclass][rclass]
+ cost -= (ira_get_register_move_cost (mode, rclass, rclass)
* (exit_freq + enter_freq));
}
}
if ((parent = loop_node->parent) != NULL
&& (parent_allocno = parent->regno_allocno_map[regno]) != NULL)
{
+ ira_assert (rclass == ALLOCNO_COVER_CLASS (parent_allocno));
exit_freq = ira_loop_edge_freq (loop_node, regno, true);
enter_freq = ira_loop_edge_freq (loop_node, regno, false);
if ((hard_regno2 = ALLOCNO_HARD_REGNO (parent_allocno)) < 0)
+= (ira_memory_move_cost[mode][rclass][1] * exit_freq
+ ira_memory_move_cost[mode][rclass][0] * enter_freq);
if (hard_regno2 != hard_regno)
- cost -= (ira_register_move_cost[mode][rclass][rclass]
+ cost -= (ira_get_register_move_cost (mode, rclass, rclass)
* (exit_freq + enter_freq));
}
}
ira_allocno_t another_a;
ira_copy_t cp, next_cp;
+ ira_free_allocno_updated_costs (a);
ira_assert (! ALLOCNO_ASSIGNED_P (a));
cover_class = ALLOCNO_COVER_CLASS (a);
if (cover_class == NO_REGS)
}
else
gcc_unreachable ();
- if (cover_class != ALLOCNO_COVER_CLASS (another_a)
+ if (! ira_reg_classes_intersect_p[cover_class][ALLOCNO_COVER_CLASS
+ (another_a)]
|| ! ALLOCNO_ASSIGNED_P (another_a)
|| (hard_regno = ALLOCNO_HARD_REGNO (another_a)) < 0)
continue;
rclass = REGNO_REG_CLASS (hard_regno);
i = ira_class_hard_reg_index[cover_class][hard_regno];
- ira_assert (i >= 0);
+ if (i < 0)
+ continue;
cost = (cp->first == a
- ? ira_register_move_cost[mode][rclass][cover_class]
- : ira_register_move_cost[mode][cover_class][rclass]);
+ ? ira_get_register_move_cost (mode, rclass, cover_class)
+ : ira_get_register_move_cost (mode, cover_class, rclass));
ira_allocate_and_set_or_copy_costs
(&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
cover_class, ALLOCNO_COVER_CLASS_COST (a),
}
}
-/* Map: allocno number -> allocno priority. */
-static int *allocno_priorities;
-
-/* Allocate array ALLOCNO_PRIORITIES and set up priorities for N allocnos in
- array CONSIDERATION_ALLOCNOS. */
-static void
-start_allocno_priorities (ira_allocno_t *consideration_allocnos, int n)
+/* Try to assign hard registers to the unassigned allocnos and
+ allocnos conflicting with them or conflicting with allocnos whose
+ regno >= START_REGNO. The function is called after ira_flattening,
+ so more allocnos (including ones created in ira-emit.c) will have a
+ chance to get a hard register. We use simple assignment algorithm
+ based on priorities. */
+void
+ira_reassign_conflict_allocnos (int start_regno)
{
- int i, length;
+ int i, allocnos_to_color_num;
ira_allocno_t a;
- allocno_live_range_t r;
+ enum reg_class cover_class;
+ bitmap allocnos_to_color;
+ ira_allocno_iterator ai;
- for (i = 0; i < n; i++)
+ allocnos_to_color = ira_allocate_bitmap ();
+ allocnos_to_color_num = 0;
+ FOR_EACH_ALLOCNO (a, ai)
{
- a = consideration_allocnos[i];
- for (length = 0, r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = r->next)
- length += r->finish - r->start + 1;
- if (length == 0)
+ int n = ALLOCNO_NUM_OBJECTS (a);
+
+ if (! ALLOCNO_ASSIGNED_P (a)
+ && ! bitmap_bit_p (allocnos_to_color, ALLOCNO_NUM (a)))
{
- allocno_priorities[ALLOCNO_NUM (a)] = 0;
- continue;
+ if (ALLOCNO_COVER_CLASS (a) != NO_REGS)
+ sorted_allocnos[allocnos_to_color_num++] = a;
+ else
+ {
+ ALLOCNO_ASSIGNED_P (a) = true;
+ ALLOCNO_HARD_REGNO (a) = -1;
+ ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
+ ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
+ }
+ bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (a));
+ }
+ if (ALLOCNO_REGNO (a) < start_regno
+ || (cover_class = ALLOCNO_COVER_CLASS (a)) == NO_REGS)
+ continue;
+ for (i = 0; i < n; i++)
+ {
+ ira_object_t obj = ALLOCNO_OBJECT (a, i);
+ ira_object_t conflict_obj;
+ ira_object_conflict_iterator oci;
+ FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
+ {
+ ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
+ ira_assert (ira_reg_classes_intersect_p
+ [cover_class][ALLOCNO_COVER_CLASS (conflict_a)]);
+ if (!bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (conflict_a)))
+ continue;
+ sorted_allocnos[allocnos_to_color_num++] = conflict_a;
+ }
+ }
+ }
+ ira_free_bitmap (allocnos_to_color);
+ if (allocnos_to_color_num > 1)
+ {
+ setup_allocno_priorities (sorted_allocnos, allocnos_to_color_num);
+ qsort (sorted_allocnos, allocnos_to_color_num, sizeof (ira_allocno_t),
+ allocno_priority_compare_func);
+ }
+ for (i = 0; i < allocnos_to_color_num; i++)
+ {
+ a = sorted_allocnos[i];
+ ALLOCNO_ASSIGNED_P (a) = false;
+ update_curr_costs (a);
+ }
+ for (i = 0; i < allocnos_to_color_num; i++)
+ {
+ a = sorted_allocnos[i];
+ if (assign_hard_reg (a, true))
+ {
+ if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
+ fprintf
+ (ira_dump_file,
+ " Secondary allocation: assign hard reg %d to reg %d\n",
+ ALLOCNO_HARD_REGNO (a), ALLOCNO_REGNO (a));
}
- ira_assert (length > 0 && ALLOCNO_NREFS (a) >= 0);
- allocno_priorities[ALLOCNO_NUM (a)]
- = (((double) (floor_log2 (ALLOCNO_NREFS (a)) * ALLOCNO_FREQ (a))
- / length)
- * (10000 / REG_FREQ_MAX) * PSEUDO_REGNO_SIZE (ALLOCNO_REGNO (a)));
}
}
-/* Sort allocnos according to their priorities which are calculated
- analogous to ones in file `global.c'. */
+\f
+
+/* This page contains code to coalesce memory stack slots used by
+ spilled allocnos. This results in smaller stack frame, better data
+ locality, and in smaller code for some architectures like
+ x86/x86_64 where insn size depends on address displacement value.
+ On the other hand, it can worsen insn scheduling after the RA but
+ in practice it is less important than smaller stack frames. */
+
+/* TRUE if we coalesced some allocnos. In other words, if we got
+ loops formed by members first_coalesced_allocno and
+ next_coalesced_allocno containing more one allocno. */
+static bool allocno_coalesced_p;
+
+/* Bitmap used to prevent a repeated allocno processing because of
+ coalescing. */
+static bitmap processed_coalesced_allocno_bitmap;
+
+/* The function is used to sort allocnos according to their execution
+ frequencies. */
static int
-allocno_priority_compare_func (const void *v1p, const void *v2p)
+copy_freq_compare_func (const void *v1p, const void *v2p)
{
- ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
- ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
+ ira_copy_t cp1 = *(const ira_copy_t *) v1p, cp2 = *(const ira_copy_t *) v2p;
int pri1, pri2;
- pri1 = allocno_priorities[ALLOCNO_NUM (a1)];
- pri2 = allocno_priorities[ALLOCNO_NUM (a2)];
+ pri1 = cp1->freq;
+ pri2 = cp2->freq;
if (pri2 - pri1)
return pri2 - pri1;
- /* If regs are equally good, sort by allocnos, so that the results of
- qsort leave nothing to chance. */
- return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
+ /* If freqencies are equal, sort by copies, so that the results of
+ qsort leave nothing to chance. */
+ return cp1->num - cp2->num;
+}
+
+/* Merge two sets of coalesced allocnos given correspondingly by
+ allocnos A1 and A2 (more accurately merging A2 set into A1
+ set). */
+static void
+merge_allocnos (ira_allocno_t a1, ira_allocno_t a2)
+{
+ ira_allocno_t a, first, last, next;
+
+ first = ALLOCNO_FIRST_COALESCED_ALLOCNO (a1);
+ if (first == ALLOCNO_FIRST_COALESCED_ALLOCNO (a2))
+ return;
+ for (last = a2, a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a2);;
+ a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
+ {
+ ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = first;
+ if (a == a2)
+ break;
+ last = a;
+ }
+ next = ALLOCNO_NEXT_COALESCED_ALLOCNO (first);
+ ALLOCNO_NEXT_COALESCED_ALLOCNO (first) = a2;
+ ALLOCNO_NEXT_COALESCED_ALLOCNO (last) = next;
+}
+
+/* Given two sets of coalesced sets of allocnos, A1 and A2, this
+ function determines if any conflicts exist between the two sets.
+ We use live ranges to find conflicts because conflicts are
+ represented only for allocnos of the same cover class and during
+ the reload pass we coalesce allocnos for sharing stack memory
+ slots. */
+static bool
+coalesced_allocno_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
+{
+ ira_allocno_t a, conflict_allocno;
+
+ bitmap_clear (processed_coalesced_allocno_bitmap);
+ if (allocno_coalesced_p)
+ {
+ for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a1);;
+ a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
+ {
+ bitmap_set_bit (processed_coalesced_allocno_bitmap,
+ OBJECT_CONFLICT_ID (ALLOCNO_OBJECT (a, 0)));
+ if (a == a1)
+ break;
+ }
+ }
+ for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a2);;
+ a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
+ {
+ for (conflict_allocno = ALLOCNO_NEXT_COALESCED_ALLOCNO (a1);;
+ conflict_allocno
+ = ALLOCNO_NEXT_COALESCED_ALLOCNO (conflict_allocno))
+ {
+ if (allocnos_have_intersected_live_ranges_p (a, conflict_allocno))
+ return true;
+ if (conflict_allocno == a1)
+ break;
+ }
+
+ if (a == a2)
+ break;
+ }
+ return false;
}
-/* Try to assign hard registers to the unassigned allocnos and
- allocnos conflicting with them or conflicting with allocnos whose
- regno >= START_REGNO. The function is called after ira_flattening,
- so more allocnos (including ones created in ira-emit.c) will have a
- chance to get a hard register. We use simple assignment algorithm
- based on priorities. */
-void
-ira_reassign_conflict_allocnos (int start_regno)
+/* The major function for aggressive allocno coalescing. We coalesce
+ only spilled allocnos. If some allocnos have been coalesced, we
+ set up flag allocno_coalesced_p. */
+static void
+coalesce_allocnos (void)
{
- int i, allocnos_to_color_num;
- ira_allocno_t a, conflict_a;
- ira_allocno_conflict_iterator aci;
- enum reg_class cover_class;
- bitmap allocnos_to_color;
- ira_allocno_iterator ai;
+ ira_allocno_t a;
+ ira_copy_t cp, next_cp, *sorted_copies;
+ unsigned int j;
+ int i, n, cp_num, regno;
+ bitmap_iterator bi;
- allocnos_to_color = ira_allocate_bitmap ();
- allocnos_to_color_num = 0;
- FOR_EACH_ALLOCNO (a, ai)
+ sorted_copies = (ira_copy_t *) ira_allocate (ira_copies_num
+ * sizeof (ira_copy_t));
+ cp_num = 0;
+ /* Collect copies. */
+ EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, j, bi)
{
- if (! ALLOCNO_ASSIGNED_P (a)
- && ! bitmap_bit_p (allocnos_to_color, ALLOCNO_NUM (a)))
+ a = ira_allocnos[j];
+ regno = ALLOCNO_REGNO (a);
+ if (! ALLOCNO_ASSIGNED_P (a) || ALLOCNO_HARD_REGNO (a) >= 0
+ || (regno < ira_reg_equiv_len
+ && (ira_reg_equiv_const[regno] != NULL_RTX
+ || ira_reg_equiv_invariant_p[regno])))
+ continue;
+ for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
{
- if (ALLOCNO_COVER_CLASS (a) != NO_REGS)
- sorted_allocnos[allocnos_to_color_num++] = a;
- else
+ if (cp->first == a)
{
- ALLOCNO_ASSIGNED_P (a) = true;
- ALLOCNO_HARD_REGNO (a) = -1;
- ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
- ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
+ next_cp = cp->next_first_allocno_copy;
+ regno = ALLOCNO_REGNO (cp->second);
+ /* For priority coloring we coalesce allocnos only with
+ the same cover class not with intersected cover
+ classes as it were possible. It is done for
+ simplicity. */
+ if ((cp->insn != NULL || cp->constraint_p)
+ && ALLOCNO_ASSIGNED_P (cp->second)
+ && ALLOCNO_HARD_REGNO (cp->second) < 0
+ && (regno >= ira_reg_equiv_len
+ || (! ira_reg_equiv_invariant_p[regno]
+ && ira_reg_equiv_const[regno] == NULL_RTX)))
+ sorted_copies[cp_num++] = cp;
}
- bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (a));
- }
- if (ALLOCNO_REGNO (a) < start_regno
- || (cover_class = ALLOCNO_COVER_CLASS (a)) == NO_REGS)
- continue;
- FOR_EACH_ALLOCNO_CONFLICT (a, conflict_a, aci)
- {
- ira_assert (cover_class == ALLOCNO_COVER_CLASS (conflict_a));
- if (bitmap_bit_p (allocnos_to_color, ALLOCNO_NUM (conflict_a)))
- continue;
- bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (conflict_a));
- sorted_allocnos[allocnos_to_color_num++] = conflict_a;
+ else if (cp->second == a)
+ next_cp = cp->next_second_allocno_copy;
+ else
+ gcc_unreachable ();
}
}
- ira_free_bitmap (allocnos_to_color);
- if (allocnos_to_color_num > 1)
- {
- start_allocno_priorities (sorted_allocnos, allocnos_to_color_num);
- qsort (sorted_allocnos, allocnos_to_color_num, sizeof (ira_allocno_t),
- allocno_priority_compare_func);
- }
- for (i = 0; i < allocnos_to_color_num; i++)
- {
- a = sorted_allocnos[i];
- ALLOCNO_ASSIGNED_P (a) = false;
- ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
- ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
- update_curr_costs (a);
- }
- for (i = 0; i < allocnos_to_color_num; i++)
+ qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
+ /* Coalesced copies, most frequently executed first. */
+ for (; cp_num != 0;)
{
- a = sorted_allocnos[i];
- if (assign_hard_reg (a, true))
+ for (i = 0; i < cp_num; i++)
{
- if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
- fprintf
- (ira_dump_file,
- " Secondary allocation: assign hard reg %d to reg %d\n",
- ALLOCNO_HARD_REGNO (a), ALLOCNO_REGNO (a));
+ cp = sorted_copies[i];
+ if (! coalesced_allocno_conflict_p (cp->first, cp->second))
+ {
+ allocno_coalesced_p = true;
+ if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
+ fprintf
+ (ira_dump_file,
+ " Coalescing copy %d:a%dr%d-a%dr%d (freq=%d)\n",
+ cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
+ ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
+ cp->freq);
+ merge_allocnos (cp->first, cp->second);
+ i++;
+ break;
+ }
+ }
+ /* Collect the rest of copies. */
+ for (n = 0; i < cp_num; i++)
+ {
+ cp = sorted_copies[i];
+ if (ALLOCNO_FIRST_COALESCED_ALLOCNO (cp->first)
+ != ALLOCNO_FIRST_COALESCED_ALLOCNO (cp->second))
+ sorted_copies[n++] = cp;
}
+ cp_num = n;
}
+ ira_free (sorted_copies);
}
-\f
-
-/* This page contains code to coalesce memory stack slots used by
- spilled allocnos. This results in smaller stack frame, better data
- locality, and in smaller code for some architectures like
- x86/x86_64 where insn size depends on address displacement value.
- On the other hand, it can worsen insn scheduling after the RA but
- in practice it is less important than smaller stack frames. */
-
/* Usage cost and order number of coalesced allocno set to which
given pseudo register belongs to. */
static int *regno_coalesced_allocno_cost;
return num;
}
+/* Array of live ranges of size IRA_ALLOCNOS_NUM. Live range for
+ given slot contains live ranges of coalesced allocnos assigned to
+ given slot. */
+static live_range_t *slot_coalesced_allocnos_live_ranges;
+
+/* Return TRUE if coalesced allocnos represented by ALLOCNO has live
+ ranges intersected with live ranges of coalesced allocnos assigned
+ to slot with number N. */
+static bool
+slot_coalesced_allocno_live_ranges_intersect_p (ira_allocno_t allocno, int n)
+{
+ ira_allocno_t a;
+
+ for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
+ a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
+ {
+ int i;
+ int nr = ALLOCNO_NUM_OBJECTS (a);
+ for (i = 0; i < nr; i++)
+ {
+ ira_object_t obj = ALLOCNO_OBJECT (a, i);
+ if (ira_live_ranges_intersect_p (slot_coalesced_allocnos_live_ranges[n],
+ OBJECT_LIVE_RANGES (obj)))
+ return true;
+ }
+ if (a == allocno)
+ break;
+ }
+ return false;
+}
+
+/* Update live ranges of slot to which coalesced allocnos represented
+ by ALLOCNO were assigned. */
+static void
+setup_slot_coalesced_allocno_live_ranges (ira_allocno_t allocno)
+{
+ int i, n;
+ ira_allocno_t a;
+ live_range_t r;
+
+ n = ALLOCNO_TEMP (allocno);
+ for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
+ a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
+ {
+ int nr = ALLOCNO_NUM_OBJECTS (a);
+ for (i = 0; i < nr; i++)
+ {
+ ira_object_t obj = ALLOCNO_OBJECT (a, i);
+ r = ira_copy_live_range_list (OBJECT_LIVE_RANGES (obj));
+ slot_coalesced_allocnos_live_ranges[n]
+ = ira_merge_live_ranges
+ (slot_coalesced_allocnos_live_ranges[n], r);
+ }
+ if (a == allocno)
+ break;
+ }
+}
+
/* We have coalesced allocnos involving in copies. Coalesce allocnos
further in order to share the same memory stack slot. Allocnos
representing sets of allocnos coalesced before the call are given
static bool
coalesce_spill_slots (ira_allocno_t *spilled_coalesced_allocnos, int num)
{
- int i, j;
+ int i, j, n, last_coalesced_allocno_num;
ira_allocno_t allocno, a;
bool merged_p = false;
+ bitmap set_jump_crosses = regstat_get_setjmp_crosses ();
+ slot_coalesced_allocnos_live_ranges
+ = (live_range_t *) ira_allocate (sizeof (live_range_t) * ira_allocnos_num);
+ memset (slot_coalesced_allocnos_live_ranges, 0,
+ sizeof (live_range_t) * ira_allocnos_num);
+ last_coalesced_allocno_num = 0;
/* Coalesce non-conflicting spilled allocnos preferring most
frequently used. */
for (i = 0; i < num; i++)
{
allocno = spilled_coalesced_allocnos[i];
if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno
+ || bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (allocno))
|| (ALLOCNO_REGNO (allocno) < ira_reg_equiv_len
- && (ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)]
- || ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX)))
+ && (ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX
+ || ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)])))
continue;
for (j = 0; j < i; j++)
{
a = spilled_coalesced_allocnos[j];
- if (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) != a
- || (ALLOCNO_REGNO (a) < ira_reg_equiv_len
- && (ira_reg_equiv_invariant_p[ALLOCNO_REGNO (a)]
- || ira_reg_equiv_const[ALLOCNO_REGNO (a)] != NULL_RTX))
- || coalesced_allocno_conflict_p (allocno, a, true))
- continue;
+ n = ALLOCNO_TEMP (a);
+ if (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a
+ && ! bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (a))
+ && (ALLOCNO_REGNO (a) >= ira_reg_equiv_len
+ || (! ira_reg_equiv_invariant_p[ALLOCNO_REGNO (a)]
+ && ira_reg_equiv_const[ALLOCNO_REGNO (a)] == NULL_RTX))
+ && ! slot_coalesced_allocno_live_ranges_intersect_p (allocno, n))
+ break;
+ }
+ if (j >= i)
+ {
+ /* No coalescing: set up number for coalesced allocnos
+ represented by ALLOCNO. */
+ ALLOCNO_TEMP (allocno) = last_coalesced_allocno_num++;
+ setup_slot_coalesced_allocno_live_ranges (allocno);
+ }
+ else
+ {
allocno_coalesced_p = true;
merged_p = true;
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
" Coalescing spilled allocnos a%dr%d->a%dr%d\n",
ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno),
ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
+ ALLOCNO_TEMP (allocno) = ALLOCNO_TEMP (a);
+ setup_slot_coalesced_allocno_live_ranges (allocno);
merge_allocnos (a, allocno);
ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a);
}
}
+ for (i = 0; i < ira_allocnos_num; i++)
+ ira_finish_live_range_list (slot_coalesced_allocnos_live_ranges[i]);
+ ira_free (slot_coalesced_allocnos_live_ranges);
return merged_p;
}
ira_allocno_iterator ai;
ira_allocno_t *spilled_coalesced_allocnos;
- processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
/* Set up allocnos can be coalesced. */
coloring_allocno_bitmap = ira_allocate_bitmap ();
for (i = 0; i < n; i++)
ALLOCNO_NUM (allocno));
}
allocno_coalesced_p = false;
- coalesce_allocnos (true);
+ processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
+ coalesce_allocnos ();
ira_free_bitmap (coloring_allocno_bitmap);
regno_coalesced_allocno_cost
= (int *) ira_allocate (max_regno * sizeof (int));
if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno
|| ALLOCNO_HARD_REGNO (allocno) >= 0
|| (ALLOCNO_REGNO (allocno) < ira_reg_equiv_len
- && (ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)]
- || ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX)))
+ && (ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX
+ || ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)])))
continue;
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
fprintf (ira_dump_file, " Slot %d (freq,size):", slot_num);
ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a),
MAX (PSEUDO_REGNO_BYTES (ALLOCNO_REGNO (a)),
reg_max_ref_width[ALLOCNO_REGNO (a)]));
-
+
if (a == allocno)
break;
}
}
/* Try to assign a hard register (except for FORBIDDEN_REGS) to
- allocno A and return TRUE in the case of success. That is an
- analog of retry_global_alloc for IRA. */
+ allocno A and return TRUE in the case of success. */
static bool
allocno_reload_assign (ira_allocno_t a, HARD_REG_SET forbidden_regs)
{
int hard_regno;
enum reg_class cover_class;
int regno = ALLOCNO_REGNO (a);
+ HARD_REG_SET saved[2];
+ int i, n;
- IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a), forbidden_regs);
- if (! flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
- IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a), call_used_reg_set);
+ n = ALLOCNO_NUM_OBJECTS (a);
+ for (i = 0; i < n; i++)
+ {
+ ira_object_t obj = ALLOCNO_OBJECT (a, i);
+ COPY_HARD_REG_SET (saved[i], OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
+ IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), forbidden_regs);
+ if (! flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
+ IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
+ call_used_reg_set);
+ }
ALLOCNO_ASSIGNED_P (a) = false;
- ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
- ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
cover_class = ALLOCNO_COVER_CLASS (a);
update_curr_costs (a);
assign_hard_reg (a, true);
}
else if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
fprintf (ira_dump_file, "\n");
-
+ for (i = 0; i < n; i++)
+ {
+ ira_object_t obj = ALLOCNO_OBJECT (a, i);
+ COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), saved[i]);
+ }
return reg_renumber[regno] >= 0;
}
ira_reassign_pseudos (int *spilled_pseudo_regs, int num,
HARD_REG_SET bad_spill_regs,
HARD_REG_SET *pseudo_forbidden_regs,
- HARD_REG_SET *pseudo_previous_regs, bitmap spilled)
+ HARD_REG_SET *pseudo_previous_regs,
+ bitmap spilled)
{
- int i, m, n, regno;
+ int i, n, regno;
bool changed_p;
- ira_allocno_t a, conflict_a;
+ ira_allocno_t a;
HARD_REG_SET forbidden_regs;
- ira_allocno_conflict_iterator aci;
+ bitmap temp = BITMAP_ALLOC (NULL);
+
+ /* Add pseudos which conflict with pseudos already in
+ SPILLED_PSEUDO_REGS to SPILLED_PSEUDO_REGS. This is preferable
+ to allocating in two steps as some of the conflicts might have
+ a higher priority than the pseudos passed in SPILLED_PSEUDO_REGS. */
+ for (i = 0; i < num; i++)
+ bitmap_set_bit (temp, spilled_pseudo_regs[i]);
+
+ for (i = 0, n = num; i < n; i++)
+ {
+ int nr, j;
+ int regno = spilled_pseudo_regs[i];
+ bitmap_set_bit (temp, regno);
+
+ a = ira_regno_allocno_map[regno];
+ nr = ALLOCNO_NUM_OBJECTS (a);
+ for (j = 0; j < nr; j++)
+ {
+ ira_object_t conflict_obj;
+ ira_object_t obj = ALLOCNO_OBJECT (a, j);
+ ira_object_conflict_iterator oci;
+
+ FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
+ {
+ ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
+ if (ALLOCNO_HARD_REGNO (conflict_a) < 0
+ && ! ALLOCNO_DONT_REASSIGN_P (conflict_a)
+ && bitmap_set_bit (temp, ALLOCNO_REGNO (conflict_a)))
+ {
+ spilled_pseudo_regs[num++] = ALLOCNO_REGNO (conflict_a);
+ /* ?!? This seems wrong. */
+ bitmap_set_bit (consideration_allocno_bitmap,
+ ALLOCNO_NUM (conflict_a));
+ }
+ }
+ }
+ }
if (num > 1)
qsort (spilled_pseudo_regs, num, sizeof (int), pseudo_reg_compare);
changed_p = false;
/* Try to assign hard registers to pseudos from
SPILLED_PSEUDO_REGS. */
- for (m = i = 0; i < num; i++)
+ for (i = 0; i < num; i++)
{
regno = spilled_pseudo_regs[i];
COPY_HARD_REG_SET (forbidden_regs, bad_spill_regs);
ira_assert (reg_renumber[regno] < 0);
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
fprintf (ira_dump_file,
- " Spill %d(a%d), cost=%d", regno, ALLOCNO_NUM (a),
+ " Try Assign %d(a%d), cost=%d", regno, ALLOCNO_NUM (a),
ALLOCNO_MEMORY_COST (a)
- ALLOCNO_COVER_CLASS_COST (a));
allocno_reload_assign (a, forbidden_regs);
CLEAR_REGNO_REG_SET (spilled, regno);
changed_p = true;
}
- else
- spilled_pseudo_regs[m++] = regno;
- }
- if (m == 0)
- return changed_p;
- if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
- {
- fprintf (ira_dump_file, " Spilled regs");
- for (i = 0; i < m; i++)
- fprintf (ira_dump_file, " %d", spilled_pseudo_regs[i]);
- fprintf (ira_dump_file, "\n");
- }
- /* Try to assign hard registers to pseudos conflicting with ones
- from SPILLED_PSEUDO_REGS. */
- for (i = n = 0; i < m; i++)
- {
- regno = spilled_pseudo_regs[i];
- a = ira_regno_allocno_map[regno];
- FOR_EACH_ALLOCNO_CONFLICT (a, conflict_a, aci)
- if (ALLOCNO_HARD_REGNO (conflict_a) < 0
- && ! ALLOCNO_DONT_REASSIGN_P (conflict_a)
- && ! bitmap_bit_p (consideration_allocno_bitmap,
- ALLOCNO_NUM (conflict_a)))
- {
- sorted_allocnos[n++] = conflict_a;
- bitmap_set_bit (consideration_allocno_bitmap,
- ALLOCNO_NUM (conflict_a));
- }
- }
- if (n != 0)
- {
- start_allocno_priorities (sorted_allocnos, n);
- qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
- allocno_priority_compare_func);
- for (i = 0; i < n; i++)
- {
- a = sorted_allocnos[i];
- regno = ALLOCNO_REGNO (a);
- COPY_HARD_REG_SET (forbidden_regs, bad_spill_regs);
- IOR_HARD_REG_SET (forbidden_regs, pseudo_forbidden_regs[regno]);
- IOR_HARD_REG_SET (forbidden_regs, pseudo_previous_regs[regno]);
- if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
- fprintf (ira_dump_file,
- " Try assign %d(a%d), cost=%d",
- regno, ALLOCNO_NUM (a),
- ALLOCNO_MEMORY_COST (a)
- - ALLOCNO_COVER_CLASS_COST (a));
- if (allocno_reload_assign (a, forbidden_regs))
- {
- changed_p = true;
- bitmap_clear_bit (spilled, regno);
- }
- }
}
+ BITMAP_FREE (temp);
return changed_p;
}
bitmap_iterator bi;
struct ira_spilled_reg_stack_slot *slot = NULL;
- ira_assert (flag_ira && inherent_size == PSEUDO_REGNO_BYTES (regno)
+ ira_assert (inherent_size == PSEUDO_REGNO_BYTES (regno)
&& inherent_size <= total_size
&& ALLOCNO_HARD_REGNO (allocno) < 0);
if (! flag_ira_share_spill_slots)
if (slot->width < total_size
|| GET_MODE_SIZE (GET_MODE (slot->mem)) < inherent_size)
continue;
-
+
EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
FIRST_PSEUDO_REGISTER, i, bi)
{
another_allocno = ira_regno_allocno_map[i];
- if (ira_allocno_live_ranges_intersect_p (allocno,
- another_allocno))
+ if (allocnos_have_intersected_live_ranges_p (allocno,
+ another_allocno))
goto cont;
}
for (cost = 0, cp = ALLOCNO_COPIES (allocno);
if (x != NULL_RTX)
{
ira_assert (slot->width >= total_size);
+#ifdef ENABLE_IRA_CHECKING
EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
FIRST_PSEUDO_REGISTER, i, bi)
{
- ira_assert (! ira_pseudo_live_ranges_intersect_p (regno, i));
+ ira_assert (! pseudos_have_intersected_live_ranges_p (regno, i));
}
+#endif
SET_REGNO_REG_SET (&slot->spilled_regs, regno);
if (internal_flag_ira_verbose > 3 && ira_dump_file)
{
int slot_num;
ira_allocno_t allocno;
- ira_assert (flag_ira && PSEUDO_REGNO_BYTES (regno) <= total_size);
+ ira_assert (PSEUDO_REGNO_BYTES (regno) <= total_size);
allocno = ira_regno_allocno_map[regno];
slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
if (slot_num == -1)
hard_regno = reg_renumber[regno];
ira_assert (hard_regno >= 0);
a = ira_regno_allocno_map[regno];
- length += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
+ length += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) / ALLOCNO_NUM_OBJECTS (a);
cost += ALLOCNO_MEMORY_COST (a) - ALLOCNO_COVER_CLASS_COST (a);
nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
for (j = 0; j < nregs; j++)
int call_used_count, other_call_used_count;
int hard_regno, other_hard_regno;
- cost = calculate_spill_cost (regnos, in, out, insn,
+ cost = calculate_spill_cost (regnos, in, out, insn,
&length, &nrefs, &call_used_count, &hard_regno);
other_cost = calculate_spill_cost (other_regnos, in, out, insn,
&other_length, &other_nrefs,
\f
/* Entry function doing color-based register allocation. */
-void
-ira_color (void)
+static void
+color (void)
{
allocno_stack_vec = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
- conflict_allocno_vec = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
removed_splay_allocno_vec
= VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p));
do_coloring ();
ira_finish_assign ();
VEC_free (ira_allocno_t, heap, removed_splay_allocno_vec);
- VEC_free (ira_allocno_t, heap, conflict_allocno_vec);
VEC_free (ira_allocno_t, heap, allocno_stack_vec);
move_spill_restore ();
}
/* Do register allocation by not using allocno conflicts. It uses
only allocno live ranges. The algorithm is close to Chow's
priority coloring. */
-void
-ira_fast_allocation (void)
+static void
+fast_allocation (void)
{
- int i, j, k, l, num, class_size, hard_regno;
+ int i, j, k, num, class_size, hard_regno;
#ifdef STACK_REGS
bool no_stack_reg_p;
#endif
enum machine_mode mode;
ira_allocno_t a;
ira_allocno_iterator ai;
- allocno_live_range_t r;
+ live_range_t r;
HARD_REG_SET conflict_hard_regs, *used_hard_regs;
- allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
- FOR_EACH_ALLOCNO (a, ai)
- {
- l = ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
- if (l <= 0)
- l = 1;
- allocno_priorities[ALLOCNO_NUM (a)]
- = (((double) (floor_log2 (ALLOCNO_NREFS (a))
- * (ALLOCNO_MEMORY_COST (a)
- - ALLOCNO_COVER_CLASS_COST (a))) / l)
- * (10000 / REG_FREQ_MAX)
- * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a)][ALLOCNO_MODE (a)]);
- }
- used_hard_regs = (HARD_REG_SET *) ira_allocate (sizeof (HARD_REG_SET)
- * ira_max_point);
- for (i = 0; i < ira_max_point; i++)
- CLEAR_HARD_REG_SET (used_hard_regs[i]);
sorted_allocnos = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
* ira_allocnos_num);
num = 0;
FOR_EACH_ALLOCNO (a, ai)
sorted_allocnos[num++] = a;
- qsort (sorted_allocnos, ira_allocnos_num, sizeof (ira_allocno_t),
+ allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
+ setup_allocno_priorities (sorted_allocnos, num);
+ used_hard_regs = (HARD_REG_SET *) ira_allocate (sizeof (HARD_REG_SET)
+ * ira_max_point);
+ for (i = 0; i < ira_max_point; i++)
+ CLEAR_HARD_REG_SET (used_hard_regs[i]);
+ qsort (sorted_allocnos, num, sizeof (ira_allocno_t),
allocno_priority_compare_func);
for (i = 0; i < num; i++)
{
+ int nr, l;
+
a = sorted_allocnos[i];
- COPY_HARD_REG_SET (conflict_hard_regs, ALLOCNO_CONFLICT_HARD_REGS (a));
- for (r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = r->next)
- for (j = r->start; j <= r->finish; j++)
- IOR_HARD_REG_SET (conflict_hard_regs, used_hard_regs[j]);
+ nr = ALLOCNO_NUM_OBJECTS (a);
+ CLEAR_HARD_REG_SET (conflict_hard_regs);
+ for (l = 0; l < nr; l++)
+ {
+ ira_object_t obj = ALLOCNO_OBJECT (a, l);
+ IOR_HARD_REG_SET (conflict_hard_regs,
+ OBJECT_CONFLICT_HARD_REGS (obj));
+ for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
+ for (j = r->start; j <= r->finish; j++)
+ IOR_HARD_REG_SET (conflict_hard_regs, used_hard_regs[j]);
+ }
cover_class = ALLOCNO_COVER_CLASS (a);
ALLOCNO_ASSIGNED_P (a) = true;
ALLOCNO_HARD_REGNO (a) = -1;
(prohibited_class_mode_regs[cover_class][mode], hard_regno)))
continue;
ALLOCNO_HARD_REGNO (a) = hard_regno;
- for (r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = r->next)
- for (k = r->start; k <= r->finish; k++)
- IOR_HARD_REG_SET (used_hard_regs[k],
- ira_reg_mode_hard_regset[hard_regno][mode]);
+ for (l = 0; l < nr; l++)
+ {
+ ira_object_t obj = ALLOCNO_OBJECT (a, l);
+ for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
+ for (k = r->start; k <= r->finish; k++)
+ IOR_HARD_REG_SET (used_hard_regs[k],
+ ira_reg_mode_hard_regset[hard_regno][mode]);
+ }
break;
}
}
if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
ira_print_disposition (ira_dump_file);
}
+
+\f
+
+/* Entry function doing coloring. */
+void
+ira_color (void)
+{
+ ira_allocno_t a;
+ ira_allocno_iterator ai;
+
+ /* Setup updated costs. */
+ FOR_EACH_ALLOCNO (a, ai)
+ {
+ ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
+ ALLOCNO_UPDATED_COVER_CLASS_COST (a) = ALLOCNO_COVER_CLASS_COST (a);
+ }
+ if (ira_conflicts_p)
+ color ();
+ else
+ fast_allocation ();
+}