/* IRA allocation based on graph coloring.
- Copyright (C) 2006, 2007, 2008, 2009
+ Copyright (C) 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
Contributed by Vladimir Makarov <vmakarov@redhat.com>.
continue;
cost = (cp->second == allocno
- ? ira_register_move_cost[mode][rclass][cover_class]
- : ira_register_move_cost[mode][cover_class][rclass]);
+ ? ira_get_register_move_cost (mode, rclass, cover_class)
+ : ira_get_register_move_cost (mode, cover_class, rclass));
if (decr_p)
cost = -cost;
if (a == allocno)
break;
}
- qsort (sorted_allocnos, j, sizeof (ira_allocno_t),
+ qsort (sorted_allocnos, j, sizeof (ira_allocno_t),
allocno_cost_compare_func);
for (i = 0; i < j; i++)
{
ira_allocno_t a, conflict_allocno;
enum reg_class cover_class;
ira_allocno_conflict_iterator aci;
-
+
ALLOCNO_IN_GRAPH_P (allocno) = false;
VEC_safe_push (ira_allocno_t, heap, allocno_stack_vec, allocno);
cover_class = ALLOCNO_COVER_CLASS (allocno);
}
/* Return the frequency of exit edges (if EXIT_P) or entry from/to the
- loop given by its LOOP_NODE. */
+ loop given by its LOOP_NODE. */
int
ira_loop_edge_freq (ira_loop_tree_node_t loop_node, int regno, bool exit_p)
{
* ira_loop_edge_freq (loop_node, regno, true)
+ ira_memory_move_cost[mode][rclass][0]
* ira_loop_edge_freq (loop_node, regno, false))
- - (ira_register_move_cost[mode][rclass][rclass]
+ - (ira_get_register_move_cost (mode, rclass, rclass)
* (ira_loop_edge_freq (loop_node, regno, false)
+ ira_loop_edge_freq (loop_node, regno, true))));
return cost;
{
int pri1, pri2, diff;
ira_allocno_t a1 = (ira_allocno_t) k1, a2 = (ira_allocno_t) k2;
-
+
pri1 = (ALLOCNO_TEMP (a1)
/ (ALLOCNO_LEFT_CONFLICTS_SIZE (a1)
* ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a1)][ALLOCNO_MODE (a1)]
&& (allocno_pri > i_allocno_pri
|| (allocno_pri == i_allocno_pri
&& (allocno_cost > i_allocno_cost
- || (allocno_cost == i_allocno_cost
+ || (allocno_cost == i_allocno_cost
&& (ALLOCNO_NUM (allocno)
> ALLOCNO_NUM (i_allocno))))))))
{
static void
setup_allocno_available_regs_num (ira_allocno_t allocno)
{
- int i, n, hard_regs_num;
+ int i, n, hard_regs_num, hard_regno;
+ enum machine_mode mode;
enum reg_class cover_class;
ira_allocno_t a;
HARD_REG_SET temp_set;
if (a == allocno)
break;
}
+ mode = ALLOCNO_MODE (allocno);
for (n = 0, i = hard_regs_num - 1; i >= 0; i--)
- if (TEST_HARD_REG_BIT (temp_set, ira_class_hard_regs[cover_class][i]))
- n++;
+ {
+ hard_regno = ira_class_hard_regs[cover_class][i];
+ if (TEST_HARD_REG_BIT (temp_set, hard_regno)
+ || TEST_HARD_REG_BIT (prohibited_class_mode_regs[cover_class][mode],
+ hard_regno))
+ n++;
+ }
if (internal_flag_ira_verbose > 2 && n > 0 && ira_dump_file != NULL)
fprintf (ira_dump_file, " Reg %d of %s has %d regs less\n",
ALLOCNO_REGNO (allocno), reg_class_names[cover_class], n);
int last = (hard_regno
+ hard_regno_nregs
[hard_regno][ALLOCNO_MODE (conflict_allocno)]);
-
+
while (hard_regno < last)
{
if (! TEST_HARD_REG_BIT (temp_set, hard_regno))
static void
put_allocno_into_bucket (ira_allocno_t allocno)
{
- int hard_regs_num;
enum reg_class cover_class;
cover_class = ALLOCNO_COVER_CLASS (allocno);
- hard_regs_num = ira_class_hard_regs_num[cover_class];
if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno)
return;
ALLOCNO_IN_GRAPH_P (allocno) = true;
for (j = 0; (int) j < ira_reg_class_cover_size; j++)
{
enum reg_class cover_class;
-
+
cover_class = ira_reg_class_cover[j];
if (loop_tree_node->reg_pressure[cover_class] == 0)
continue;
else
{
cover_class = ALLOCNO_COVER_CLASS (subloop_allocno);
- cost = (ira_register_move_cost[mode][rclass][rclass]
+ cost = (ira_get_register_move_cost (mode, rclass, rclass)
* (exit_freq + enter_freq));
ira_allocate_and_set_or_copy_costs
(&ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno), cover_class,
100);
if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
fprintf (ira_dump_file, "\n**** Allocnos coloring:\n\n");
-
+
ira_traverse_loop_tree (false, ira_loop_tree_root, color_pass, NULL);
if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
+= (ira_memory_move_cost[mode][rclass][0] * exit_freq
+ ira_memory_move_cost[mode][rclass][1] * enter_freq);
if (hard_regno2 != hard_regno)
- cost -= (ira_register_move_cost[mode][rclass][rclass]
+ cost -= (ira_get_register_move_cost (mode, rclass, rclass)
* (exit_freq + enter_freq));
}
}
+= (ira_memory_move_cost[mode][rclass][1] * exit_freq
+ ira_memory_move_cost[mode][rclass][0] * enter_freq);
if (hard_regno2 != hard_regno)
- cost -= (ira_register_move_cost[mode][rclass][rclass]
+ cost -= (ira_get_register_move_cost (mode, rclass, rclass)
* (exit_freq + enter_freq));
}
}
if (i < 0)
continue;
cost = (cp->first == a
- ? ira_register_move_cost[mode][rclass][cover_class]
- : ira_register_move_cost[mode][cover_class][rclass]);
+ ? ira_get_register_move_cost (mode, rclass, cover_class)
+ : ira_get_register_move_cost (mode, cover_class, rclass));
ira_allocate_and_set_or_copy_costs
(&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
cover_class, ALLOCNO_COVER_CLASS_COST (a),
ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a),
MAX (PSEUDO_REGNO_BYTES (ALLOCNO_REGNO (a)),
reg_max_ref_width[ALLOCNO_REGNO (a)]));
-
+
if (a == allocno)
break;
}
if (slot->width < total_size
|| GET_MODE_SIZE (GET_MODE (slot->mem)) < inherent_size)
continue;
-
+
EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
FIRST_PSEUDO_REGISTER, i, bi)
{
int call_used_count, other_call_used_count;
int hard_regno, other_hard_regno;
- cost = calculate_spill_cost (regnos, in, out, insn,
+ cost = calculate_spill_cost (regnos, in, out, insn,
&length, &nrefs, &call_used_count, &hard_regno);
other_cost = calculate_spill_cost (other_regnos, in, out, insn,
&other_length, &other_nrefs,