/* Allocated buffers for allocno_pref. */
static enum reg_class *allocno_pref_buffer;
+/* Record register class of each allocno with the same regno. */
+static enum reg_class *common_classes;
+
/* Execution frequency of the current insn. */
static int frequency;
ira_init_register_move_cost (mode);
if (secondary_class != NO_REGS)
- return (move_cost[mode][secondary_class][rclass] + sri.extra_cost
- + copy_cost (x, mode, secondary_class, to_p, &sri));
+ {
+ if (!move_cost[mode])
+ init_move_cost (mode);
+ return (move_cost[mode][secondary_class][rclass] + sri.extra_cost
+ + copy_cost (x, mode, secondary_class, to_p, &sri));
+ }
/* For memory, use the memory move cost, for (hard) registers, use
the cost to move between the register classes, and use 2 for
if (MEM_P (x) || rclass == NO_REGS)
return sri.extra_cost + ira_memory_move_cost[mode][rclass][to_p != 0];
else if (REG_P (x))
- return
- (sri.extra_cost + move_cost[mode][REGNO_REG_CLASS (REGNO (x))][rclass]);
+ {
+ if (!move_cost[mode])
+ init_move_cost (mode);
+ return (sri.extra_cost + move_cost[mode][REGNO_REG_CLASS (REGNO (x))][rclass]);
+ }
else
/* If this is a constant, we may eventually want to call rtx_cost
here. */
int alt;
int i, j, k;
rtx set;
+ int insn_allows_mem[MAX_RECOG_OPERANDS];
+
+ for (i = 0; i < n_ops; i++)
+ insn_allows_mem[i] = 0;
/* Process each alternative, each time minimizing an operand's cost
with the cost for each operand in that alternative. */
j = p[0] - '0';
classes[i] = classes[j];
allows_mem[i] = allows_mem[j];
+ if (allows_mem[i])
+ insn_allows_mem[i] = 1;
if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
{
+ (recog_data.operand_type[i] != OP_OUT
? ira_memory_move_cost[mode][classes[i]][1] : 0)
- allows_mem[i]) * frequency;
+
/* If we have assigned a class to this allocno in our
first pass, add a cost to this alternative
corresponding to what we would add if this allocno
/* It doesn't seem worth distinguishing between
offsettable and non-offsettable addresses
here. */
- allows_mem[i] = 1;
+ insn_allows_mem[i] = allows_mem[i] = 1;
if (MEM_P (op))
win = 1;
break;
|| (CONSTANT_P (op)
&& (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))))
win = 1;
- allows_mem[i] = 1;
+ insn_allows_mem[i] = allows_mem[i] = 1;
case 'r':
classes[i] = ira_reg_class_union[classes[i]][GENERAL_REGS];
break;
if (EXTRA_MEMORY_CONSTRAINT (c, p))
{
/* Every MEM can be reloaded to fit. */
- allows_mem[i] = 1;
+ insn_allows_mem[i] = allows_mem[i] = 1;
if (MEM_P (op))
win = 1;
}
}
}
+ for (i = 0; i < n_ops; i++)
+ {
+ ira_allocno_t a;
+ rtx op = ops[i];
+
+ if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
+ continue;
+ a = ira_curr_regno_allocno_map [REGNO (op)];
+ if (! ALLOCNO_BAD_SPILL_P (a) && insn_allows_mem[i] == 0)
+ ALLOCNO_BAD_SPILL_P (a) = true;
+ }
+
/* If this insn is a single set copying operand 1 to operand 0 and
one operand is an allocno with the other a hard reg or an allocno
that prefers a hard register that is in its own register class
if (REGNO (x) < FIRST_PSEUDO_REGISTER)
break;
+ ALLOCNO_BAD_SPILL_P (ira_curr_regno_allocno_map[REGNO (x)]) = true;
pp = COSTS_OF_ALLOCNO (allocno_costs,
ALLOCNO_NUM (ira_curr_regno_allocno_map
[REGNO (x)]));
&& (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL_RTX
&& MEM_P (XEXP (note, 0)))
{
- COSTS_OF_ALLOCNO (allocno_costs,
- ALLOCNO_NUM (ira_curr_regno_allocno_map
- [REGNO (SET_DEST (set))]))->mem_cost
- -= (ira_memory_move_cost[GET_MODE (SET_DEST (set))][GENERAL_REGS][1]
- * frequency);
+ enum reg_class cl = GENERAL_REGS;
+ rtx reg = SET_DEST (set);
+ int num = ALLOCNO_NUM (ira_curr_regno_allocno_map[REGNO (reg)]);
+
+ if (allocno_pref)
+ cl = allocno_pref[num];
+ COSTS_OF_ALLOCNO (allocno_costs, num)->mem_cost
+ -= ira_memory_move_cost[GET_MODE (reg)][cl][1] * frequency;
record_address_regs (GET_MODE (SET_SRC (set)), XEXP (SET_SRC (set), 0),
0, MEM, SCRATCH, frequency * 2);
}
{
fprintf (f, " %s:%d", reg_class_names[rclass],
COSTS_OF_ALLOCNO (allocno_costs, i)->cost[k]);
- if (flag_ira_algorithm == IRA_ALGORITHM_REGIONAL
- || flag_ira_algorithm == IRA_ALGORITHM_MIXED)
+ if (flag_ira_region == IRA_REGION_ALL
+ || flag_ira_region == IRA_REGION_MIXED)
fprintf (f, ",%d", COSTS_OF_ALLOCNO (total_costs, i)->cost[k]);
}
}
ira_allocno_t a, parent_a;
int rclass, a_num, parent_a_num;
ira_loop_tree_node_t parent;
- int best_cost;
- enum reg_class best, alt_class, common_class;
+ int best_cost, allocno_cost;
+ enum reg_class best, alt_class;
#ifdef FORBIDDEN_INC_DEC_CLASSES
int inc_dec_p = false;
#endif
a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
{
a_num = ALLOCNO_NUM (a);
- if ((flag_ira_algorithm == IRA_ALGORITHM_REGIONAL
- || flag_ira_algorithm == IRA_ALGORITHM_MIXED)
+ if ((flag_ira_region == IRA_REGION_ALL
+ || flag_ira_region == IRA_REGION_MIXED)
&& (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
&& (parent_a = parent->regno_allocno_map[i]) != NULL
/* There are no caps yet. */
> reg_class_size[alt_class]))
alt_class = reg_class_subunion[alt_class][rclass];
}
+ alt_class = ira_class_translate[alt_class];
if (pass == flag_expensive_optimizations)
{
if (best_cost > temp_costs->mem_cost)
i, reg_class_names[best], reg_class_names[alt_class]);
}
if (best_cost > temp_costs->mem_cost)
- common_class = NO_REGS;
+ common_classes[i] = NO_REGS;
+ else if (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
+ /* Make the common class the biggest class of best and
+ alt_class. */
+ common_classes[i] = alt_class == NO_REGS ? best : alt_class;
else
/* Make the common class a cover class. Remember all
allocnos with the same regno should have the same cover
class. */
- common_class = ira_class_translate[best];
+ common_classes[i] = ira_class_translate[best];
for (a = ira_regno_allocno_map[i];
a != NULL;
a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
{
a_num = ALLOCNO_NUM (a);
- if (common_class == NO_REGS)
+ if (common_classes[i] == NO_REGS)
best = NO_REGS;
else
{
/* Finding best class which is subset of the common
class. */
best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
+ allocno_cost = best_cost;
best = ALL_REGS;
for (k = 0; k < cost_classes_num; k++)
{
rclass = cost_classes[k];
- if (! ira_class_subset_p[rclass][common_class])
+ if (! ira_class_subset_p[rclass][common_classes[i]])
continue;
/* Ignore classes that are too small for this
operand or invalid for an operand that was
{
best_cost
= COSTS_OF_ALLOCNO (total_costs, a_num)->cost[k];
+ allocno_cost
+ = COSTS_OF_ALLOCNO (allocno_costs, a_num)->cost[k];
best = (enum reg_class) rclass;
}
else if (COSTS_OF_ALLOCNO (total_costs, a_num)->cost[k]
== best_cost)
- best = ira_reg_class_union[best][rclass];
+ {
+ best = ira_reg_class_union[best][rclass];
+ allocno_cost
+ = MAX (allocno_cost,
+ COSTS_OF_ALLOCNO (allocno_costs,
+ a_num)->cost[k]);
+ }
}
+ ALLOCNO_COVER_CLASS_COST (a) = allocno_cost;
}
+ ira_assert (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY
+ || ira_class_translate[best] == common_classes[i]);
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL
&& (pass == 0 || allocno_pref[a_num] != best))
{
ALLOCNO_LOOP_TREE_NODE (a)->loop->num);
fprintf (ira_dump_file, ") best %s, cover %s\n",
reg_class_names[best],
- reg_class_names[ira_class_translate[best]]);
+ reg_class_names[common_classes[i]]);
}
allocno_pref[a_num] = best;
}
int *reg_costs;
enum reg_class cover_class, rclass;
enum machine_mode mode;
+ HARD_REG_SET *pref;
ira_allocno_t a;
ira_allocno_iterator ai;
{
i = ALLOCNO_NUM (a);
mode = ALLOCNO_MODE (a);
- cover_class = ira_class_translate[allocno_pref[i]];
+ cover_class = common_classes[ALLOCNO_REGNO (a)];
ira_assert (allocno_pref[i] == NO_REGS || cover_class != NO_REGS);
ALLOCNO_MEMORY_COST (a) = COSTS_OF_ALLOCNO (allocno_costs, i)->mem_cost;
ira_set_allocno_cover_class (a, cover_class);
if (cover_class == NO_REGS)
continue;
ALLOCNO_AVAILABLE_REGS_NUM (a) = ira_available_class_regs[cover_class];
- num = cost_class_nums[allocno_pref[i]];
- ira_assert (num >= 0);
- ALLOCNO_COVER_CLASS_COST (a)
- = COSTS_OF_ALLOCNO (allocno_costs, i)->cost[num];
+ pref = ®_class_contents[allocno_pref[i]];
if (optimize && ALLOCNO_COVER_CLASS (a) != allocno_pref[i])
{
n = ira_class_hard_regs_num[cover_class];
for (j = n - 1; j >= 0; j--)
{
regno = ira_class_hard_regs[cover_class][j];
- rclass = REGNO_REG_CLASS (regno);
- num = cost_class_nums[rclass];
- if (num < 0)
+ if (TEST_HARD_REG_BIT (*pref, regno))
+ reg_costs[j] = ALLOCNO_COVER_CLASS_COST (a);
+ else
{
- /* The hard register class is not a cover class or a
- class not fully inside in a cover class -- use
- the allocno cover class. */
- ira_assert (ira_hard_regno_cover_class[regno] == cover_class);
- num = cost_class_nums[cover_class];
+ rclass = REGNO_REG_CLASS (regno);
+ num = cost_class_nums[rclass];
+ if (num < 0)
+ {
+ /* The hard register class is not a cover class or a
+ class not fully inside in a cover class -- use
+ the allocno cover class. */
+ ira_assert (ira_hard_regno_cover_class[regno]
+ == cover_class);
+ num = cost_class_nums[cover_class];
+ }
+ reg_costs[j] = COSTS_OF_ALLOCNO (allocno_costs, i)->cost[num];
}
- reg_costs[j] = COSTS_OF_ALLOCNO (allocno_costs, i)->cost[num];
}
}
}
allocno_pref_buffer
= (enum reg_class *) ira_allocate (sizeof (enum reg_class)
* ira_allocnos_num);
+ common_classes
+ = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
+ * max_reg_num ());
find_allocno_class_costs ();
setup_allocno_cover_class_and_costs ();
/* Because we could process operands only as subregs, check mode of
if (ira_register_move_cost[ALLOCNO_MODE (a)] == NULL
&& have_regs_of_mode[ALLOCNO_MODE (a)])
ira_init_register_move_cost (ALLOCNO_MODE (a));
+ ira_free (common_classes);
ira_free (allocno_pref_buffer);
ira_free (total_costs);
ira_free (allocno_costs);