/* IRA hard register and memory cost calculation for allocnos or pseudos.
- Copyright (C) 2006, 2007, 2008, 2009
+ Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
Contributed by Vladimir Makarov <vmakarov@redhat.com>.
#include "addresses.h"
#include "insn-config.h"
#include "recog.h"
-#include "toplev.h"
+#include "reload.h"
+#include "diagnostic-core.h"
#include "target.h"
#include "params.h"
#include "ira-int.h"
{
int mem_cost;
/* Costs for register classes start here. We process only some
- register classes (cover classes on the 1st cost calculation
- iteration and important classes on the 2nd iteration). */
+ allocno classes. */
int cost[1];
};
-/* Initialized once. It is a maximal possible size of the allocated
- struct costs. */
-static int max_struct_costs_size;
-
-/* Allocated and initialized once, and used to initialize cost values
- for each insn. */
-static struct costs *init_cost;
-
-/* Allocated once, and used for temporary purposes. */
-static struct costs *temp_costs;
-
-/* Allocated once, and used for the cost calculation. */
-static struct costs *op_costs[MAX_RECOG_OPERANDS];
-static struct costs *this_op_costs[MAX_RECOG_OPERANDS];
+#define max_struct_costs_size \
+ (this_target_ira_int->x_max_struct_costs_size)
+#define init_cost \
+ (this_target_ira_int->x_init_cost)
+#define temp_costs \
+ (this_target_ira_int->x_temp_costs)
+#define op_costs \
+ (this_target_ira_int->x_op_costs)
+#define this_op_costs \
+ (this_target_ira_int->x_this_op_costs)
/* Costs of each class for each allocno or pseudo. */
static struct costs *costs;
/* Accumulated costs of each class for each allocno. */
static struct costs *total_allocno_costs;
-/* Classes used for cost calculation. They may be different on
- different iterations of the cost calculations or in different
- optimization modes. */
-static enum reg_class *cost_classes;
-
-/* The size of the previous array. */
-static int cost_classes_num;
-
-/* Map: cost class -> order number (they start with 0) of the cost
- class. The order number is negative for non-cost classes. */
-static int cost_class_nums[N_REG_CLASSES];
-
/* It is the current size of struct costs. */
static int struct_costs_size;
((struct costs *) ((char *) (arr) + (num) * struct_costs_size))
/* Return index in COSTS when processing reg with REGNO. */
-#define COST_INDEX(regno) (allocno_p \
- ? ALLOCNO_NUM (ira_curr_regno_allocno_map[regno]) \
+#define COST_INDEX(regno) (allocno_p \
+ ? ALLOCNO_NUM (ira_curr_regno_allocno_map[regno]) \
: (int) regno)
/* Record register class preferences of each allocno or pseudo. Null
/* Allocated buffers for pref. */
static enum reg_class *pref_buffer;
-/* Record cover register class of each allocno with the same regno. */
-static enum reg_class *regno_cover_class;
+/* Record allocno class of each allocno with the same regno. */
+static enum reg_class *regno_aclass;
+
+/* Record cost gains for not allocating a register with an invariant
+ equivalence. */
+static int *regno_equiv_gains;
/* Execution frequency of the current insn. */
static int frequency;
\f
+/* Info about reg classes whose costs are calculated for a pseudo. */
+struct cost_classes
+{
+ /* Number of the cost classes in the subsequent array. */
+ int num;
+ /* Container of the cost classes. */
+ enum reg_class classes[N_REG_CLASSES];
+ /* Map reg class -> index of the reg class in the previous array.
+ -1 if it is not a cost classe. */
+ int index[N_REG_CLASSES];
+ /* Map hard regno index of first class in array CLASSES containing
+ the hard regno, -1 otherwise. */
+ int hard_regno_index[FIRST_PSEUDO_REGISTER];
+};
+
+/* Types of pointers to the structure above. */
+typedef struct cost_classes *cost_classes_t;
+typedef const struct cost_classes *const_cost_classes_t;
+
+/* Info about cost classes for each pseudo. */
+static cost_classes_t *regno_cost_classes;
+
+/* Returns hash value for cost classes info V. */
+static hashval_t
+cost_classes_hash (const void *v)
+{
+ const_cost_classes_t hv = (const_cost_classes_t) v;
+
+ return iterative_hash (&hv->classes, sizeof (enum reg_class) * hv->num, 0);
+}
+
+/* Compares cost classes info V1 and V2. */
+static int
+cost_classes_eq (const void *v1, const void *v2)
+{
+ const_cost_classes_t hv1 = (const_cost_classes_t) v1;
+ const_cost_classes_t hv2 = (const_cost_classes_t) v2;
+
+ return hv1->num == hv2->num && memcmp (hv1->classes, hv2->classes,
+ sizeof (enum reg_class) * hv1->num);
+}
+
+/* Delete cost classes info V from the hash table. */
+static void
+cost_classes_del (void *v)
+{
+ ira_free (v);
+}
+
+/* Hash table of unique cost classes. */
+static htab_t cost_classes_htab;
+
+/* Map allocno class -> cost classes for pseudo of given allocno
+ class. */
+static cost_classes_t cost_classes_aclass_cache[N_REG_CLASSES];
+
+/* Map mode -> cost classes for pseudo of give mode. */
+static cost_classes_t cost_classes_mode_cache[MAX_MACHINE_MODE];
+
+/* Initialize info about the cost classes for each pseudo. */
+static void
+initiate_regno_cost_classes (void)
+{
+ int size = sizeof (cost_classes_t) * max_reg_num ();
+
+ regno_cost_classes = (cost_classes_t *) ira_allocate (size);
+ memset (regno_cost_classes, 0, size);
+ memset (cost_classes_aclass_cache, 0,
+ sizeof (cost_classes_t) * N_REG_CLASSES);
+ memset (cost_classes_mode_cache, 0,
+ sizeof (cost_classes_t) * MAX_MACHINE_MODE);
+ cost_classes_htab
+ = htab_create (200, cost_classes_hash, cost_classes_eq, cost_classes_del);
+}
+
+/* Create new cost classes from cost classes FROM and set up members
+ index and hard_regno_index. Return the new classes. The function
+ implements some common code of two functions
+ setup_regno_cost_classes_by_aclass and
+ setup_regno_cost_classes_by_mode. */
+static cost_classes_t
+setup_cost_classes (cost_classes_t from)
+{
+ cost_classes_t classes_ptr;
+ enum reg_class cl;
+ int i, j, hard_regno;
+
+ classes_ptr = (cost_classes_t) ira_allocate (sizeof (struct cost_classes));
+ classes_ptr->num = from->num;
+ for (i = 0; i < N_REG_CLASSES; i++)
+ classes_ptr->index[i] = -1;
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ classes_ptr->hard_regno_index[i] = -1;
+ for (i = 0; i < from->num; i++)
+ {
+ cl = classes_ptr->classes[i] = from->classes[i];
+ classes_ptr->index[cl] = i;
+ for (j = ira_class_hard_regs_num[cl] - 1; j >= 0; j--)
+ {
+ hard_regno = ira_class_hard_regs[cl][j];
+ if (classes_ptr->hard_regno_index[hard_regno] < 0)
+ classes_ptr->hard_regno_index[hard_regno] = i;
+ }
+ }
+ return classes_ptr;
+}
+
+/* Setup cost classes for pseudo REGNO whose allocno class is ACLASS.
+ This function is used when we know an initial approximation of
+ allocno class of the pseudo already, e.g. on the second iteration
+ of class cost calculation or after class cost calculation in
+ register-pressure sensitive insn scheduling or register-pressure
+ sensitive loop-invariant motion. */
+static void
+setup_regno_cost_classes_by_aclass (int regno, enum reg_class aclass)
+{
+ static struct cost_classes classes;
+ cost_classes_t classes_ptr;
+ enum reg_class cl;
+ int i;
+ PTR *slot;
+ HARD_REG_SET temp, temp2;
+
+ if ((classes_ptr = cost_classes_aclass_cache[aclass]) == NULL)
+ {
+ COPY_HARD_REG_SET (temp, reg_class_contents[aclass]);
+ AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
+ classes.num = 0;
+ for (i = 0; i < ira_important_classes_num; i++)
+ {
+ cl = ira_important_classes[i];
+ COPY_HARD_REG_SET (temp2, reg_class_contents[cl]);
+ AND_COMPL_HARD_REG_SET (temp2, ira_no_alloc_regs);
+ if (! ira_reg_pressure_class_p[cl]
+ && hard_reg_set_subset_p (temp2, temp) && cl != aclass)
+ continue;
+ classes.classes[classes.num++] = cl;
+ }
+ slot = htab_find_slot (cost_classes_htab, &classes, INSERT);
+ if (*slot == NULL)
+ {
+ classes_ptr = setup_cost_classes (&classes);
+ *slot = classes_ptr;
+ }
+ classes_ptr = cost_classes_aclass_cache[aclass] = (cost_classes_t) *slot;
+ }
+ regno_cost_classes[regno] = classes_ptr;
+}
+
+/* Setup cost classes for pseudo REGNO with MODE. Usage of MODE can
+ decrease number of cost classes for the pseudo, if hard registers
+ of some important classes can not hold a value of MODE. So the
+ pseudo can not get hard register of some important classes and cost
+ calculation for such important classes is only waisting CPU
+ time. */
+static void
+setup_regno_cost_classes_by_mode (int regno, enum machine_mode mode)
+{
+ static struct cost_classes classes;
+ cost_classes_t classes_ptr;
+ enum reg_class cl;
+ int i;
+ PTR *slot;
+ HARD_REG_SET temp;
+
+ if ((classes_ptr = cost_classes_mode_cache[mode]) == NULL)
+ {
+ classes.num = 0;
+ for (i = 0; i < ira_important_classes_num; i++)
+ {
+ cl = ira_important_classes[i];
+ COPY_HARD_REG_SET (temp, ira_prohibited_class_mode_regs[cl][mode]);
+ IOR_HARD_REG_SET (temp, ira_no_alloc_regs);
+ if (hard_reg_set_subset_p (reg_class_contents[cl], temp))
+ continue;
+ classes.classes[classes.num++] = cl;
+ }
+ slot = htab_find_slot (cost_classes_htab, &classes, INSERT);
+ if (*slot == NULL)
+ {
+ classes_ptr = setup_cost_classes (&classes);
+ *slot = classes_ptr;
+ }
+ cost_classes_mode_cache[mode] = (cost_classes_t) *slot;
+ }
+ regno_cost_classes[regno] = classes_ptr;
+}
+
+/* Finilize info about the cost classes for each pseudo. */
+static void
+finish_regno_cost_classes (void)
+{
+ ira_free (regno_cost_classes);
+ htab_delete (cost_classes_htab);
+}
+
+\f
+
/* Compute the cost of loading X into (if TO_P is TRUE) or from (if
TO_P is FALSE) a register of class RCLASS in mode MODE. X must not
be a pseudo register. */
static int
-copy_cost (rtx x, enum machine_mode mode, enum reg_class rclass, bool to_p,
+copy_cost (rtx x, enum machine_mode mode, reg_class_t rclass, bool to_p,
secondary_reload_info *prev_sri)
{
secondary_reload_info sri;
- enum reg_class secondary_class = NO_REGS;
+ reg_class_t secondary_class = NO_REGS;
/* If X is a SCRATCH, there is actually nothing to move since we are
assuming optimal allocation. */
return 0;
/* Get the class we will actually use for a reload. */
- rclass = PREFERRED_RELOAD_CLASS (x, rclass);
+ rclass = targetm.preferred_reload_class (x, rclass);
/* If we need a secondary reload for an intermediate, the cost is
that to load the input into the intermediate register, then to
{
if (!move_cost[mode])
init_move_cost (mode);
- return (move_cost[mode][secondary_class][rclass] + sri.extra_cost
+ return (move_cost[mode][(int) secondary_class][(int) rclass]
+ + sri.extra_cost
+ copy_cost (x, mode, secondary_class, to_p, &sri));
}
the cost to move between the register classes, and use 2 for
everything else (constants). */
if (MEM_P (x) || rclass == NO_REGS)
- return sri.extra_cost + ira_memory_move_cost[mode][rclass][to_p != 0];
+ return sri.extra_cost
+ + ira_memory_move_cost[mode][(int) rclass][to_p != 0];
else if (REG_P (x))
{
if (!move_cost[mode])
init_move_cost (mode);
- return (sri.extra_cost + move_cost[mode][REGNO_REG_CLASS (REGNO (x))][rclass]);
+ return (sri.extra_cost
+ + move_cost[mode][REGNO_REG_CLASS (REGNO (x))][(int) rclass]);
}
else
/* If this is a constant, we may eventually want to call rtx_cost
static void
record_reg_classes (int n_alts, int n_ops, rtx *ops,
enum machine_mode *modes, const char **constraints,
- rtx insn, struct costs **op_costs,
- enum reg_class *pref)
+ rtx insn, enum reg_class *pref)
{
int alt;
int i, j, k;
int alt_fail = 0;
int alt_cost = 0, op_cost_add;
+ if (!recog_data.alternative_enabled_p[alt])
+ {
+ for (i = 0; i < recog_data.n_operands; i++)
+ constraints[i] = skip_alternative (constraints[i]);
+
+ continue;
+ }
+
for (i = 0; i < n_ops; i++)
{
unsigned char c;
Moreover, if we cannot tie them, this alternative
needs to do a copy, which is one insn. */
struct costs *pp = this_op_costs[i];
-
- for (k = 0; k < cost_classes_num; k++)
+ int *pp_costs = pp->cost;
+ cost_classes_t cost_classes_ptr
+ = regno_cost_classes[REGNO (op)];
+ enum reg_class *cost_classes = cost_classes_ptr->classes;
+ bool in_p = recog_data.operand_type[i] != OP_OUT;
+ bool out_p = recog_data.operand_type[i] != OP_IN;
+ enum reg_class op_class = classes[i];
+ move_table *move_in_cost, *move_out_cost;
+
+ ira_init_register_move_cost_if_necessary (mode);
+ if (! in_p)
{
- rclass = cost_classes[k];
- pp->cost[k]
- = (((recog_data.operand_type[i] != OP_OUT
- ? ira_get_may_move_cost (mode, rclass,
- classes[i], true) : 0)
- + (recog_data.operand_type[i] != OP_IN
- ? ira_get_may_move_cost (mode, classes[i],
- rclass, false) : 0))
- * frequency);
+ ira_assert (out_p);
+ move_out_cost = ira_may_move_out_cost[mode];
+ for (k = cost_classes_ptr->num - 1; k >= 0; k--)
+ {
+ rclass = cost_classes[k];
+ pp_costs[k]
+ = move_out_cost[op_class][rclass] * frequency;
+ }
+ }
+ else if (! out_p)
+ {
+ ira_assert (in_p);
+ move_in_cost = ira_may_move_in_cost[mode];
+ for (k = cost_classes_ptr->num - 1; k >= 0; k--)
+ {
+ rclass = cost_classes[k];
+ pp_costs[k]
+ = move_in_cost[rclass][op_class] * frequency;
+ }
+ }
+ else
+ {
+ move_in_cost = ira_may_move_in_cost[mode];
+ move_out_cost = ira_may_move_out_cost[mode];
+ for (k = cost_classes_ptr->num - 1; k >= 0; k--)
+ {
+ rclass = cost_classes[k];
+ pp_costs[k] = ((move_in_cost[rclass][op_class]
+ + move_out_cost[op_class][rclass])
+ * frequency);
+ }
}
/* If the alternative actually allows memory, make
things a bit cheaper since we won't need an extra
insn to load it. */
pp->mem_cost
- = ((recog_data.operand_type[i] != OP_IN
- ? ira_memory_move_cost[mode][classes[i]][0] : 0)
- + (recog_data.operand_type[i] != OP_OUT
- ? ira_memory_move_cost[mode][classes[i]][1] : 0)
+ = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
+ + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
- allows_mem[i]) * frequency;
- /* If we have assigned a class to this allocno in our
- first pass, add a cost to this alternative
- corresponding to what we would add if this allocno
- were not in the appropriate class. We could use
- cover class here but it is less accurate
- approximation. */
+ /* If we have assigned a class to this allocno in
+ our first pass, add a cost to this alternative
+ corresponding to what we would add if this
+ allocno were not in the appropriate class. */
if (pref)
{
enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
if (pref_class == NO_REGS)
alt_cost
- += ((recog_data.operand_type[i] != OP_IN
- ? ira_memory_move_cost[mode][classes[i]][0]
- : 0)
- + (recog_data.operand_type[i] != OP_OUT
- ? ira_memory_move_cost[mode][classes[i]][1]
+ += ((out_p
+ ? ira_memory_move_cost[mode][op_class][0] : 0)
+ + (in_p
+ ? ira_memory_move_cost[mode][op_class][1]
: 0));
else if (ira_reg_class_intersect
- [pref_class][classes[i]] == NO_REGS)
- alt_cost += ira_get_register_move_cost (mode,
- pref_class,
- classes[i]);
+ [pref_class][op_class] == NO_REGS)
+ alt_cost
+ += ira_register_move_cost[mode][pref_class][op_class];
}
if (REGNO (ops[i]) != REGNO (ops[j])
&& ! find_reg_note (insn, REG_DEAD, op))
to be allocated to a register that can be the
base of an address, i.e. BASE_REG_CLASS. */
classes[i]
- = ira_reg_class_union[classes[i]]
+ = ira_reg_class_subunion[classes[i]]
[base_reg_class (VOIDmode, ADDRESS, SCRATCH)];
break;
win = 1;
insn_allows_mem[i] = allows_mem[i] = 1;
case 'r':
- classes[i] = ira_reg_class_union[classes[i]][GENERAL_REGS];
+ classes[i] = ira_reg_class_subunion[classes[i]][GENERAL_REGS];
break;
default:
if (REG_CLASS_FROM_CONSTRAINT (c, p) != NO_REGS)
- classes[i] = ira_reg_class_union[classes[i]]
+ classes[i] = ira_reg_class_subunion[classes[i]]
[REG_CLASS_FROM_CONSTRAINT (c, p)];
#ifdef EXTRA_CONSTRAINT_STR
else if (EXTRA_CONSTRAINT_STR (op, c, p))
that can be the base of an address,
i.e. BASE_REG_CLASS. */
classes[i]
- = ira_reg_class_union[classes[i]]
+ = ira_reg_class_subunion[classes[i]]
[base_reg_class (VOIDmode, ADDRESS, SCRATCH)];
}
#endif
}
else
{
+ unsigned int regno = REGNO (op);
struct costs *pp = this_op_costs[i];
-
- for (k = 0; k < cost_classes_num; k++)
+ int *pp_costs = pp->cost;
+ cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
+ enum reg_class *cost_classes = cost_classes_ptr->classes;
+ bool in_p = recog_data.operand_type[i] != OP_OUT;
+ bool out_p = recog_data.operand_type[i] != OP_IN;
+ enum reg_class op_class = classes[i];
+ move_table *move_in_cost, *move_out_cost;
+
+ ira_init_register_move_cost_if_necessary (mode);
+ if (! in_p)
{
- rclass = cost_classes[k];
- pp->cost[k]
- = (((recog_data.operand_type[i] != OP_OUT
- ? ira_get_may_move_cost (mode, rclass,
- classes[i], true) : 0)
- + (recog_data.operand_type[i] != OP_IN
- ? ira_get_may_move_cost (mode, classes[i],
- rclass, false) : 0))
- * frequency);
+ ira_assert (out_p);
+ move_out_cost = ira_may_move_out_cost[mode];
+ for (k = cost_classes_ptr->num - 1; k >= 0; k--)
+ {
+ rclass = cost_classes[k];
+ pp_costs[k]
+ = move_out_cost[op_class][rclass] * frequency;
+ }
+ }
+ else if (! out_p)
+ {
+ ira_assert (in_p);
+ move_in_cost = ira_may_move_in_cost[mode];
+ for (k = cost_classes_ptr->num - 1; k >= 0; k--)
+ {
+ rclass = cost_classes[k];
+ pp_costs[k]
+ = move_in_cost[rclass][op_class] * frequency;
+ }
+ }
+ else
+ {
+ move_in_cost = ira_may_move_in_cost[mode];
+ move_out_cost = ira_may_move_out_cost[mode];
+ for (k = cost_classes_ptr->num - 1; k >= 0; k--)
+ {
+ rclass = cost_classes[k];
+ pp_costs[k] = ((move_in_cost[rclass][op_class]
+ + move_out_cost[op_class][rclass])
+ * frequency);
+ }
}
/* If the alternative actually allows memory, make
things a bit cheaper since we won't need an extra
insn to load it. */
pp->mem_cost
- = ((recog_data.operand_type[i] != OP_IN
- ? ira_memory_move_cost[mode][classes[i]][0] : 0)
- + (recog_data.operand_type[i] != OP_OUT
- ? ira_memory_move_cost[mode][classes[i]][1] : 0)
+ = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
+ + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
- allows_mem[i]) * frequency;
- /* If we have assigned a class to this allocno in our
- first pass, add a cost to this alternative
- corresponding to what we would add if this allocno
- were not in the appropriate class. We could use
- cover class here but it is less accurate
- approximation. */
+ /* If we have assigned a class to this allocno in
+ our first pass, add a cost to this alternative
+ corresponding to what we would add if this
+ allocno were not in the appropriate class. */
if (pref)
{
enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
if (pref_class == NO_REGS)
alt_cost
- += ((recog_data.operand_type[i] != OP_IN
- ? ira_memory_move_cost[mode][classes[i]][0]
- : 0)
- + (recog_data.operand_type[i] != OP_OUT
- ? ira_memory_move_cost[mode][classes[i]][1]
+ += ((out_p
+ ? ira_memory_move_cost[mode][op_class][0] : 0)
+ + (in_p
+ ? ira_memory_move_cost[mode][op_class][1]
: 0));
- else if (ira_reg_class_intersect[pref_class][classes[i]]
+ else if (ira_reg_class_intersect[pref_class][op_class]
== NO_REGS)
- alt_cost += ira_get_register_move_cost (mode,
- pref_class,
- classes[i]);
+ alt_cost += ira_register_move_cost[mode][pref_class][op_class];
}
}
}
if (REG_P (ops[i]) && REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
{
struct costs *pp = op_costs[i], *qq = this_op_costs[i];
+ int *pp_costs = pp->cost, *qq_costs = qq->cost;
int scale = 1 + (recog_data.operand_type[i] == OP_INOUT);
+ cost_classes_t cost_classes_ptr
+ = regno_cost_classes[REGNO (ops[i])];
pp->mem_cost = MIN (pp->mem_cost,
(qq->mem_cost + op_cost_add) * scale);
- for (k = 0; k < cost_classes_num; k++)
- pp->cost[k]
- = MIN (pp->cost[k], (qq->cost[k] + op_cost_add) * scale);
+ for (k = cost_classes_ptr->num - 1; k >= 0; k--)
+ pp_costs[k]
+ = MIN (pp_costs[k], (qq_costs[k] + op_cost_add) * scale);
}
}
&& REG_P (ops[0]) && REG_P (ops[1])
&& find_regno_note (insn, REG_DEAD, REGNO (ops[1])))
for (i = 0; i <= 1; i++)
- if (REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
+ if (REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER
+ && REGNO (ops[!i]) < FIRST_PSEUDO_REGISTER)
{
- unsigned int regno = REGNO (ops[!i]);
+ unsigned int regno = REGNO (ops[i]);
+ unsigned int other_regno = REGNO (ops[!i]);
enum machine_mode mode = GET_MODE (ops[!i]);
+ cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
+ enum reg_class *cost_classes = cost_classes_ptr->classes;
enum reg_class rclass;
- unsigned int nr;
+ int nr;
- if (regno < FIRST_PSEUDO_REGISTER)
- for (k = 0; k < cost_classes_num; k++)
- {
- rclass = cost_classes[k];
- if (TEST_HARD_REG_BIT (reg_class_contents[rclass], regno)
- && (reg_class_size[rclass]
- == (unsigned) CLASS_MAX_NREGS (rclass, mode)))
- {
- if (reg_class_size[rclass] == 1)
- op_costs[i]->cost[k] = -frequency;
- else
- {
- for (nr = 0;
- nr < (unsigned) hard_regno_nregs[regno][mode];
- nr++)
- if (! TEST_HARD_REG_BIT (reg_class_contents[rclass],
- regno + nr))
- break;
-
- if (nr == (unsigned) hard_regno_nregs[regno][mode])
- op_costs[i]->cost[k] = -frequency;
- }
- }
- }
+ for (k = cost_classes_ptr->num - 1; k >= 0; k--)
+ {
+ rclass = cost_classes[k];
+ if (TEST_HARD_REG_BIT (reg_class_contents[rclass], other_regno)
+ && (reg_class_size[rclass]
+ == (unsigned) CLASS_MAX_NREGS (rclass, mode)))
+ {
+ if (reg_class_size[rclass] == 1)
+ op_costs[i]->cost[k] = -frequency;
+ else
+ {
+ for (nr = 0;
+ nr < hard_regno_nregs[other_regno][mode];
+ nr++)
+ if (! TEST_HARD_REG_BIT (reg_class_contents[rclass],
+ other_regno + nr))
+ break;
+
+ if (nr == hard_regno_nregs[other_regno][mode])
+ op_costs[i]->cost[k] = -frequency;
+ }
+ }
+ }
}
}
case REG:
{
struct costs *pp;
+ int *pp_costs;
enum reg_class i;
- int k;
+ int k, regno, add_cost;
+ cost_classes_t cost_classes_ptr;
+ enum reg_class *cost_classes;
+ move_table *move_in_cost;
if (REGNO (x) < FIRST_PSEUDO_REGISTER)
break;
+ regno = REGNO (x);
if (allocno_p)
- ALLOCNO_BAD_SPILL_P (ira_curr_regno_allocno_map[REGNO (x)]) = true;
- pp = COSTS (costs, COST_INDEX (REGNO (x)));
- pp->mem_cost += (ira_memory_move_cost[Pmode][rclass][1] * scale) / 2;
- for (k = 0; k < cost_classes_num; k++)
+ ALLOCNO_BAD_SPILL_P (ira_curr_regno_allocno_map[regno]) = true;
+ pp = COSTS (costs, COST_INDEX (regno));
+ add_cost = (ira_memory_move_cost[Pmode][rclass][1] * scale) / 2;
+ if (INT_MAX - add_cost < pp->mem_cost)
+ pp->mem_cost = INT_MAX;
+ else
+ pp->mem_cost += add_cost;
+ cost_classes_ptr = regno_cost_classes[regno];
+ cost_classes = cost_classes_ptr->classes;
+ pp_costs = pp->cost;
+ ira_init_register_move_cost_if_necessary (Pmode);
+ move_in_cost = ira_may_move_in_cost[Pmode];
+ for (k = cost_classes_ptr->num - 1; k >= 0; k--)
{
i = cost_classes[k];
- pp->cost[k]
- += (ira_get_may_move_cost (Pmode, i, rclass, true) * scale) / 2;
+ add_cost = (move_in_cost[i][rclass] * scale) / 2;
+ if (INT_MAX - add_cost < pp_costs[k])
+ pp_costs[k] = INT_MAX;
+ else
+ pp_costs[k] += add_cost;
}
}
break;
/* Calculate the costs of insn operands. */
static void
-record_operand_costs (rtx insn, struct costs **op_costs, enum reg_class *pref)
+record_operand_costs (rtx insn, enum reg_class *pref)
{
const char *constraints[MAX_RECOG_OPERANDS];
enum machine_mode modes[MAX_RECOG_OPERANDS];
record_address_regs (VOIDmode, recog_data.operand[i], 0, ADDRESS,
SCRATCH, frequency * 2);
}
-
+
/* Check for commutative in a separate loop so everything will have
been initialized. We must do this even if one operand is a
constant--see addsi3 in m68k.md. */
xconstraints[i+1] = constraints[i];
record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
recog_data.operand, modes,
- xconstraints, insn, op_costs, pref);
+ xconstraints, insn, pref);
}
record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
recog_data.operand, modes,
- constraints, insn, op_costs, pref);
+ constraints, insn, pref);
}
\f
enum rtx_code pat_code;
rtx set, note;
int i, k;
+ bool counted_mem;
if (!NONDEBUG_INSN_P (insn))
return insn;
|| pat_code == ADDR_VEC || pat_code == ADDR_DIFF_VEC)
return insn;
+ counted_mem = false;
set = single_set (insn);
extract_insn (insn);
/* If this insn loads a parameter from its stack slot, then it
represents a savings, rather than a cost, if the parameter is
- stored in memory. Record this fact. */
+ stored in memory. Record this fact.
+
+ Similarly if we're loading other constants from memory (constant
+ pool, TOC references, small data areas, etc) and this is the only
+ assignment to the destination pseudo. */
if (set != 0 && REG_P (SET_DEST (set)) && MEM_P (SET_SRC (set))
&& (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL_RTX
- && MEM_P (XEXP (note, 0)))
+ && ((MEM_P (XEXP (note, 0)))
+ || (CONSTANT_P (XEXP (note, 0))
+ && targetm.legitimate_constant_p (GET_MODE (SET_DEST (set)),
+ XEXP (note, 0))
+ && REG_N_SETS (REGNO (SET_DEST (set))) == 1)))
{
enum reg_class cl = GENERAL_REGS;
rtx reg = SET_DEST (set);
int num = COST_INDEX (REGNO (reg));
- if (pref)
- cl = pref[num];
COSTS (costs, num)->mem_cost
-= ira_memory_move_cost[GET_MODE (reg)][cl][1] * frequency;
record_address_regs (GET_MODE (SET_SRC (set)), XEXP (SET_SRC (set), 0),
0, MEM, SCRATCH, frequency * 2);
+ counted_mem = true;
}
- record_operand_costs (insn, op_costs, pref);
+ record_operand_costs (insn, pref);
/* Now add the cost for each operand to the total costs for its
allocno. */
int regno = REGNO (recog_data.operand[i]);
struct costs *p = COSTS (costs, COST_INDEX (regno));
struct costs *q = op_costs[i];
+ int *p_costs = p->cost, *q_costs = q->cost;
+ cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
+ int add_cost;
- p->mem_cost += q->mem_cost;
- for (k = 0; k < cost_classes_num; k++)
- p->cost[k] += q->cost[k];
+ /* If the already accounted for the memory "cost" above, don't
+ do so again. */
+ if (!counted_mem)
+ {
+ add_cost = q->mem_cost;
+ if (add_cost > 0 && INT_MAX - add_cost < p->mem_cost)
+ p->mem_cost = INT_MAX;
+ else
+ p->mem_cost += add_cost;
+ }
+ for (k = cost_classes_ptr->num - 1; k >= 0; k--)
+ {
+ add_cost = q_costs[k];
+ if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
+ p_costs[k] = INT_MAX;
+ else
+ p_costs[k] += add_cost;
+ }
}
return insn;
int i, rclass;
basic_block bb;
int regno = ALLOCNO_REGNO (a);
+ cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
+ enum reg_class *cost_classes = cost_classes_ptr->classes;
i = ALLOCNO_NUM (a);
fprintf (f, " a%d(r%d,", i, regno);
else
fprintf (f, "l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop->num);
fprintf (f, ") costs:");
- for (k = 0; k < cost_classes_num; k++)
+ for (k = 0; k < cost_classes_ptr->num; k++)
{
rclass = cost_classes[k];
if (contains_reg_of_mode[rclass][PSEUDO_REGNO_MODE (regno)]
&& (! in_inc_dec[i] || ! forbidden_inc_dec_class[rclass])
#endif
#ifdef CANNOT_CHANGE_MODE_CLASS
- && ! invalid_mode_change_p (regno, (enum reg_class) rclass,
- PSEUDO_REGNO_MODE (regno))
+ && ! invalid_mode_change_p (regno, (enum reg_class) rclass)
#endif
)
{
fprintf (f, ",%d", COSTS (total_allocno_costs, i)->cost[k]);
}
}
- fprintf (f, " MEM:%i\n", COSTS (costs, i)->mem_cost);
+ fprintf (f, " MEM:%i", COSTS (costs, i)->mem_cost);
+ if (flag_ira_region == IRA_REGION_ALL
+ || flag_ira_region == IRA_REGION_MIXED)
+ fprintf (f, ",%d", COSTS (total_allocno_costs, i)->mem_cost);
+ fprintf (f, "\n");
}
}
{
int regno, k;
int rclass;
+ cost_classes_t cost_classes_ptr;
+ enum reg_class *cost_classes;
ira_assert (! allocno_p);
fprintf (f, "\n");
for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
{
- if (regno_reg_rtx[regno] == NULL_RTX)
+ if (REG_N_REFS (regno) <= 0)
continue;
+ cost_classes_ptr = regno_cost_classes[regno];
+ cost_classes = cost_classes_ptr->classes;
fprintf (f, " r%d costs:", regno);
- for (k = 0; k < cost_classes_num; k++)
+ for (k = 0; k < cost_classes_ptr->num; k++)
{
rclass = cost_classes[k];
if (contains_reg_of_mode[rclass][PSEUDO_REGNO_MODE (regno)]
&& (! in_inc_dec[regno] || ! forbidden_inc_dec_class[rclass])
#endif
#ifdef CANNOT_CHANGE_MODE_CLASS
- && ! invalid_mode_change_p (regno, (enum reg_class) rclass,
- PSEUDO_REGNO_MODE (regno))
+ && ! invalid_mode_change_p (regno, (enum reg_class) rclass)
#endif
)
fprintf (f, " %s:%d", reg_class_names[rclass],
}
/* Find costs of register classes and memory for allocnos or pseudos
- and their best costs. Set up preferred, alternative and cover
+ and their best costs. Set up preferred, alternative and allocno
classes for pseudos. */
static void
find_costs_and_classes (FILE *dump_file)
{
- int i, k, start;
+ int i, k, start, max_cost_classes_num;
int pass;
basic_block bb;
+ enum reg_class *regno_best_class;
init_recog ();
#ifdef FORBIDDEN_INC_DEC_CLASSES
in_inc_dec = ira_allocate (sizeof (bool) * cost_elements_num);
#endif /* FORBIDDEN_INC_DEC_CLASSES */
- pref = NULL;
- start = 0;
- if (!resize_reg_info () && allocno_p && pseudo_classes_defined_p)
+ regno_best_class
+ = (enum reg_class *) ira_allocate (max_reg_num ()
+ * sizeof (enum reg_class));
+ for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
+ regno_best_class[i] = NO_REGS;
+ if (!resize_reg_info () && allocno_p
+ && pseudo_classes_defined_p && flag_expensive_optimizations)
{
ira_allocno_t a;
ira_allocno_iterator ai;
pref = pref_buffer;
+ max_cost_classes_num = 1;
FOR_EACH_ALLOCNO (a, ai)
- pref[ALLOCNO_NUM (a)] = reg_preferred_class (ALLOCNO_REGNO (a));
- if (flag_expensive_optimizations)
- start = 1;
+ {
+ pref[ALLOCNO_NUM (a)] = reg_preferred_class (ALLOCNO_REGNO (a));
+ setup_regno_cost_classes_by_aclass
+ (ALLOCNO_REGNO (a), pref[ALLOCNO_NUM (a)]);
+ max_cost_classes_num
+ = MAX (max_cost_classes_num,
+ regno_cost_classes[ALLOCNO_REGNO (a)]->num);
+ }
+ start = 1;
+ }
+ else
+ {
+ pref = NULL;
+ max_cost_classes_num = ira_important_classes_num;
+ for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
+ if (regno_reg_rtx[i] != NULL_RTX)
+ setup_regno_cost_classes_by_mode (i, PSEUDO_REGNO_MODE (i));
+ else
+ setup_regno_cost_classes_by_aclass (i, ALL_REGS);
+ start = 0;
}
if (allocno_p)
/* Clear the flag for the next compiled function. */
if ((!allocno_p || internal_flag_ira_verbose > 0) && dump_file)
fprintf (dump_file,
"\nPass %i for finding pseudo/allocno costs\n\n", pass);
- /* We could use only cover classes. Unfortunately it does not
- work well for some targets where some subclass of cover class
- is costly and wrong cover class is chosen. */
- for (i = 0; i < N_REG_CLASSES; i++)
- cost_class_nums[i] = -1;
- for (cost_classes_num = 0;
- cost_classes_num < ira_important_classes_num;
- cost_classes_num++)
+
+ if (pass != start)
{
- cost_classes[cost_classes_num]
- = ira_important_classes[cost_classes_num];
- cost_class_nums[cost_classes[cost_classes_num]]
- = cost_classes_num;
+ max_cost_classes_num = 1;
+ for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
+ {
+ setup_regno_cost_classes_by_aclass (i, regno_best_class[i]);
+ max_cost_classes_num
+ = MAX (max_cost_classes_num, regno_cost_classes[i]->num);
+ }
}
+
struct_costs_size
- = sizeof (struct costs) + sizeof (int) * (cost_classes_num - 1);
+ = sizeof (struct costs) + sizeof (int) * (max_cost_classes_num - 1);
/* Zero out our accumulation of the cost of each class for each
allocno. */
memset (costs, 0, cost_elements_num * struct_costs_size);
for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
{
ira_allocno_t a, parent_a;
- int rclass, a_num, parent_a_num;
+ int rclass, a_num, parent_a_num, add_cost;
ira_loop_tree_node_t parent;
int best_cost, allocno_cost;
enum reg_class best, alt_class;
#ifdef FORBIDDEN_INC_DEC_CLASSES
int inc_dec_p = false;
#endif
+ cost_classes_t cost_classes_ptr = regno_cost_classes[i];
+ enum reg_class *cost_classes = cost_classes_ptr->classes;
+ int *i_costs = temp_costs->cost;
+ int i_mem_cost;
+ int equiv_savings = regno_equiv_gains[i];
if (! allocno_p)
{
inc_dec_p = in_inc_dec[i];
#endif
memcpy (temp_costs, COSTS (costs, i), struct_costs_size);
+ i_mem_cost = temp_costs->mem_cost;
}
else
{
if (ira_regno_allocno_map[i] == NULL)
continue;
memset (temp_costs, 0, struct_costs_size);
+ i_mem_cost = 0;
/* Find cost of all allocnos with the same regno. */
for (a = ira_regno_allocno_map[i];
a != NULL;
a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
{
+ int *a_costs, *p_costs;
+
a_num = ALLOCNO_NUM (a);
if ((flag_ira_region == IRA_REGION_ALL
|| flag_ira_region == IRA_REGION_MIXED)
/* Propagate costs to upper levels in the region
tree. */
parent_a_num = ALLOCNO_NUM (parent_a);
- for (k = 0; k < cost_classes_num; k++)
- COSTS (total_allocno_costs, parent_a_num)->cost[k]
- += COSTS (total_allocno_costs, a_num)->cost[k];
- COSTS (total_allocno_costs, parent_a_num)->mem_cost
- += COSTS (total_allocno_costs, a_num)->mem_cost;
+ a_costs = COSTS (total_allocno_costs, a_num)->cost;
+ p_costs = COSTS (total_allocno_costs, parent_a_num)->cost;
+ for (k = cost_classes_ptr->num - 1; k >= 0; k--)
+ {
+ add_cost = a_costs[k];
+ if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
+ p_costs[k] = INT_MAX;
+ else
+ p_costs[k] += add_cost;
+ }
+ add_cost = COSTS (total_allocno_costs, a_num)->mem_cost;
+ if (add_cost > 0
+ && (INT_MAX - add_cost
+ < COSTS (total_allocno_costs,
+ parent_a_num)->mem_cost))
+ COSTS (total_allocno_costs, parent_a_num)->mem_cost
+ = INT_MAX;
+ else
+ COSTS (total_allocno_costs, parent_a_num)->mem_cost
+ += add_cost;
+
}
- for (k = 0; k < cost_classes_num; k++)
- temp_costs->cost[k] += COSTS (costs, a_num)->cost[k];
- temp_costs->mem_cost += COSTS (costs, a_num)->mem_cost;
+ a_costs = COSTS (costs, a_num)->cost;
+ for (k = cost_classes_ptr->num - 1; k >= 0; k--)
+ {
+ add_cost = a_costs[k];
+ if (add_cost > 0 && INT_MAX - add_cost < i_costs[k])
+ i_costs[k] = INT_MAX;
+ else
+ i_costs[k] += add_cost;
+ }
+ add_cost = COSTS (costs, a_num)->mem_cost;
+ if (add_cost > 0 && INT_MAX - add_cost < i_mem_cost)
+ i_mem_cost = INT_MAX;
+ else
+ i_mem_cost += add_cost;
#ifdef FORBIDDEN_INC_DEC_CLASSES
if (in_inc_dec[a_num])
inc_dec_p = true;
#endif
}
}
+ if (equiv_savings < 0)
+ i_mem_cost = -equiv_savings;
+ else if (equiv_savings > 0)
+ {
+ i_mem_cost = 0;
+ for (k = cost_classes_ptr->num - 1; k >= 0; k--)
+ i_costs[k] += equiv_savings;
+ }
+
best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
best = ALL_REGS;
alt_class = NO_REGS;
/* Find best common class for all allocnos with the same
regno. */
- for (k = 0; k < cost_classes_num; k++)
+ for (k = 0; k < cost_classes_ptr->num; k++)
{
rclass = cost_classes[k];
/* Ignore classes that are too small for this operand or
|| (inc_dec_p && forbidden_inc_dec_class[rclass])
#endif
#ifdef CANNOT_CHANGE_MODE_CLASS
- || invalid_mode_change_p (i, (enum reg_class) rclass,
- PSEUDO_REGNO_MODE (i))
+ || invalid_mode_change_p (i, (enum reg_class) rclass)
#endif
)
continue;
- if (temp_costs->cost[k] < best_cost)
+ if (i_costs[k] < best_cost)
{
- best_cost = temp_costs->cost[k];
+ best_cost = i_costs[k];
best = (enum reg_class) rclass;
}
- else if (temp_costs->cost[k] == best_cost)
- best = ira_reg_class_union[best][rclass];
+ else if (i_costs[k] == best_cost)
+ best = ira_reg_class_subunion[best][rclass];
if (pass == flag_expensive_optimizations
- && temp_costs->cost[k] < temp_costs->mem_cost
+ && i_costs[k] < i_mem_cost
&& (reg_class_size[reg_class_subunion[alt_class][rclass]]
> reg_class_size[alt_class]))
alt_class = reg_class_subunion[alt_class][rclass];
}
- alt_class = ira_class_translate[alt_class];
- if (best_cost > temp_costs->mem_cost)
- regno_cover_class[i] = NO_REGS;
- else if (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
- /* Make the common class the biggest class of best and
- alt_class. */
- regno_cover_class[i] = alt_class == NO_REGS ? best : alt_class;
+ alt_class = ira_allocno_class_translate[alt_class];
+ if (best_cost > i_mem_cost)
+ regno_aclass[i] = NO_REGS;
else
- /* Make the common class a cover class. Remember all
- allocnos with the same regno should have the same cover
- class. */
- regno_cover_class[i] = ira_class_translate[best];
+ {
+ /* Make the common class the biggest class of best and
+ alt_class. */
+ regno_aclass[i]
+ = ira_reg_class_superunion[best][alt_class];
+ ira_assert (regno_aclass[i] != NO_REGS
+ && ira_reg_allocno_class_p[regno_aclass[i]]);
+ }
if (pass == flag_expensive_optimizations)
{
- if (best_cost > temp_costs->mem_cost)
+ if (best_cost > i_mem_cost)
best = alt_class = NO_REGS;
else if (best == alt_class)
alt_class = NO_REGS;
- setup_reg_classes (i, best, alt_class, regno_cover_class[i]);
+ setup_reg_classes (i, best, alt_class, regno_aclass[i]);
if ((!allocno_p || internal_flag_ira_verbose > 2)
&& dump_file != NULL)
fprintf (dump_file,
- " r%d: preferred %s, alternative %s, cover %s\n",
+ " r%d: preferred %s, alternative %s, allocno %s\n",
i, reg_class_names[best], reg_class_names[alt_class],
- reg_class_names[regno_cover_class[i]]);
+ reg_class_names[regno_aclass[i]]);
}
+ regno_best_class[i] = best;
if (! allocno_p)
{
- pref[i] = best_cost > temp_costs->mem_cost ? NO_REGS : best;
+ pref[i] = best_cost > i_mem_cost ? NO_REGS : best;
continue;
}
for (a = ira_regno_allocno_map[i];
a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
{
a_num = ALLOCNO_NUM (a);
- if (regno_cover_class[i] == NO_REGS)
+ if (regno_aclass[i] == NO_REGS)
best = NO_REGS;
else
{
+ int *total_a_costs = COSTS (total_allocno_costs, a_num)->cost;
+ int *a_costs = COSTS (costs, a_num)->cost;
+
/* Finding best class which is subset of the common
class. */
best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
allocno_cost = best_cost;
best = ALL_REGS;
- for (k = 0; k < cost_classes_num; k++)
+ for (k = 0; k < cost_classes_ptr->num; k++)
{
rclass = cost_classes[k];
- if (! ira_class_subset_p[rclass][regno_cover_class[i]])
+ if (! ira_class_subset_p[rclass][regno_aclass[i]])
continue;
/* Ignore classes that are too small for this
operand or invalid for an operand that was
|| (inc_dec_p && forbidden_inc_dec_class[rclass])
#endif
#ifdef CANNOT_CHANGE_MODE_CLASS
- || invalid_mode_change_p (i, (enum reg_class) rclass,
- PSEUDO_REGNO_MODE (i))
+ || invalid_mode_change_p (i, (enum reg_class) rclass)
#endif
)
;
- else if (COSTS (total_allocno_costs, a_num)->cost[k]
- < best_cost)
+ else if (total_a_costs[k] < best_cost)
{
- best_cost
- = COSTS (total_allocno_costs, a_num)->cost[k];
- allocno_cost = COSTS (costs, a_num)->cost[k];
+ best_cost = total_a_costs[k];
+ allocno_cost = a_costs[k];
best = (enum reg_class) rclass;
}
- else if (COSTS (total_allocno_costs, a_num)->cost[k]
- == best_cost)
+ else if (total_a_costs[k] == best_cost)
{
- best = ira_reg_class_union[best][rclass];
- allocno_cost
- = MAX (allocno_cost, COSTS (costs, a_num)->cost[k]);
+ best = ira_reg_class_subunion[best][rclass];
+ allocno_cost = MAX (allocno_cost, a_costs[k]);
}
}
- ALLOCNO_COVER_CLASS_COST (a) = allocno_cost;
+ ALLOCNO_CLASS_COST (a) = allocno_cost;
}
- ira_assert (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY
- || ira_class_translate[best] == regno_cover_class[i]);
if (internal_flag_ira_verbose > 2 && dump_file != NULL
&& (pass == 0 || pref[a_num] != best))
{
else
fprintf (dump_file, "l%d",
ALLOCNO_LOOP_TREE_NODE (a)->loop->num);
- fprintf (dump_file, ") best %s, cover %s\n",
+ fprintf (dump_file, ") best %s, allocno %s\n",
reg_class_names[best],
- reg_class_names[regno_cover_class[i]]);
+ reg_class_names[regno_aclass[i]]);
}
pref[a_num] = best;
}
}
-
+
if (internal_flag_ira_verbose > 4 && dump_file)
{
if (allocno_p)
fprintf (dump_file,"\n");
}
}
+ ira_free (regno_best_class);
#ifdef FORBIDDEN_INC_DEC_CLASSES
ira_free (in_inc_dec);
#endif
\f
/* Process moves involving hard regs to modify allocno hard register
- costs. We can do this only after determining allocno cover class.
- If a hard register forms a register class, than moves with the hard
+ costs. We can do this only after determining allocno class. If a
+ hard register forms a register class, than moves with the hard
register are already taken into account in class costs for the
allocno. */
static void
}
else
continue;
- rclass = ALLOCNO_COVER_CLASS (a);
+ rclass = ALLOCNO_CLASS (a);
if (! TEST_HARD_REG_BIT (reg_class_contents[rclass], hard_regno))
continue;
i = ira_class_hard_reg_index[rclass][hard_regno];
continue;
mode = ALLOCNO_MODE (a);
hard_reg_class = REGNO_REG_CLASS (hard_regno);
+ ira_init_register_move_cost_if_necessary (mode);
cost
- = (to_p ? ira_get_register_move_cost (mode, hard_reg_class, rclass)
- : ira_get_register_move_cost (mode, rclass, hard_reg_class)) * freq;
+ = (to_p ? ira_register_move_cost[mode][hard_reg_class][rclass]
+ : ira_register_move_cost[mode][rclass][hard_reg_class]) * freq;
ira_allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (a), rclass,
- ALLOCNO_COVER_CLASS_COST (a));
+ ALLOCNO_CLASS_COST (a));
ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
rclass, 0);
ALLOCNO_HARD_REG_COSTS (a)[i] -= cost;
ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[i] -= cost;
- ALLOCNO_COVER_CLASS_COST (a) = MIN (ALLOCNO_COVER_CLASS_COST (a),
- ALLOCNO_HARD_REG_COSTS (a)[i]);
+ ALLOCNO_CLASS_COST (a) = MIN (ALLOCNO_CLASS_COST (a),
+ ALLOCNO_HARD_REG_COSTS (a)[i]);
}
}
/* After we find hard register and memory costs for allocnos, define
- its cover class and modify hard register cost because insns moving
+ its class and modify hard register cost because insns moving
allocno to/from hard registers. */
static void
-setup_allocno_cover_class_and_costs (void)
+setup_allocno_class_and_costs (void)
{
- int i, j, n, regno, num;
+ int i, j, n, regno, hard_regno, num;
int *reg_costs;
- enum reg_class cover_class, rclass;
+ enum reg_class aclass, rclass;
ira_allocno_t a;
ira_allocno_iterator ai;
+ cost_classes_t cost_classes_ptr;
ira_assert (allocno_p);
FOR_EACH_ALLOCNO (a, ai)
{
i = ALLOCNO_NUM (a);
- cover_class = regno_cover_class[ALLOCNO_REGNO (a)];
- ira_assert (pref[i] == NO_REGS || cover_class != NO_REGS);
+ regno = ALLOCNO_REGNO (a);
+ aclass = regno_aclass[regno];
+ cost_classes_ptr = regno_cost_classes[regno];
+ ira_assert (pref[i] == NO_REGS || aclass != NO_REGS);
ALLOCNO_MEMORY_COST (a) = COSTS (costs, i)->mem_cost;
- ira_set_allocno_cover_class (a, cover_class);
- if (cover_class == NO_REGS)
+ ira_set_allocno_class (a, aclass);
+ if (aclass == NO_REGS)
continue;
- ALLOCNO_AVAILABLE_REGS_NUM (a) = ira_available_class_regs[cover_class];
- if (optimize && ALLOCNO_COVER_CLASS (a) != pref[i])
+ if (optimize && ALLOCNO_CLASS (a) != pref[i])
{
- n = ira_class_hard_regs_num[cover_class];
+ n = ira_class_hard_regs_num[aclass];
ALLOCNO_HARD_REG_COSTS (a)
- = reg_costs = ira_allocate_cost_vector (cover_class);
+ = reg_costs = ira_allocate_cost_vector (aclass);
for (j = n - 1; j >= 0; j--)
{
- regno = ira_class_hard_regs[cover_class][j];
- if (TEST_HARD_REG_BIT (reg_class_contents[pref[i]], regno))
- reg_costs[j] = ALLOCNO_COVER_CLASS_COST (a);
+ hard_regno = ira_class_hard_regs[aclass][j];
+ if (TEST_HARD_REG_BIT (reg_class_contents[pref[i]], hard_regno))
+ reg_costs[j] = ALLOCNO_CLASS_COST (a);
else
{
- rclass = REGNO_REG_CLASS (regno);
- num = cost_class_nums[rclass];
+ rclass = REGNO_REG_CLASS (hard_regno);
+ num = cost_classes_ptr->index[rclass];
if (num < 0)
{
- /* The hard register class is not a cover class or a
- class not fully inside in a cover class -- use
- the allocno cover class. */
- ira_assert (ira_hard_regno_cover_class[regno]
- == cover_class);
- num = cost_class_nums[cover_class];
+ num = cost_classes_ptr->hard_regno_index[hard_regno];
+ ira_assert (num >= 0);
}
reg_costs[j] = COSTS (costs, i)->cost[num];
}
this_op_costs[i] = NULL;
}
temp_costs = NULL;
- cost_classes = NULL;
}
/* Free allocated temporary cost vectors. */
{
int i;
- if (init_cost != NULL)
- free (init_cost);
+ free (init_cost);
init_cost = NULL;
for (i = 0; i < MAX_RECOG_OPERANDS; i++)
{
- if (op_costs[i] != NULL)
- free (op_costs[i]);
- if (this_op_costs[i] != NULL)
- free (this_op_costs[i]);
+ free (op_costs[i]);
+ free (this_op_costs[i]);
op_costs[i] = this_op_costs[i] = NULL;
}
- if (temp_costs != NULL)
- free (temp_costs);
+ free (temp_costs);
temp_costs = NULL;
- if (cost_classes != NULL)
- free (cost_classes);
- cost_classes = NULL;
}
/* This is called each time register related information is
free_ira_costs ();
max_struct_costs_size
= sizeof (struct costs) + sizeof (int) * (ira_important_classes_num - 1);
- /* Don't use ira_allocate because vectors live through several IRA calls. */
+ /* Don't use ira_allocate because vectors live through several IRA
+ calls. */
init_cost = (struct costs *) xmalloc (max_struct_costs_size);
init_cost->mem_cost = 1000000;
for (i = 0; i < ira_important_classes_num; i++)
this_op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
}
temp_costs = (struct costs *) xmalloc (max_struct_costs_size);
- cost_classes = (enum reg_class *) xmalloc (sizeof (enum reg_class)
- * ira_important_classes_num);
}
/* Function called once at the end of compiler work. */
init_subregs_of_mode ();
costs = (struct costs *) ira_allocate (max_struct_costs_size
* cost_elements_num);
- pref_buffer
- = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
- * cost_elements_num);
- regno_cover_class
- = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
- * max_reg_num ());
+ pref_buffer = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
+ * cost_elements_num);
+ regno_aclass = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
+ * max_reg_num ());
+ regno_equiv_gains = (int *) ira_allocate (sizeof (int) * max_reg_num ());
+ memset (regno_equiv_gains, 0, sizeof (int) * max_reg_num ());
}
/* Common finalization function for ira_costs and
static void
finish_costs (void)
{
- ira_free (regno_cover_class);
+ finish_subregs_of_mode ();
+ ira_free (regno_equiv_gains);
+ ira_free (regno_aclass);
ira_free (pref_buffer);
ira_free (costs);
}
-/* Entry function which defines cover class, memory and hard register
- costs for each allocno. */
+/* Entry function which defines register class, memory and hard
+ register costs for each allocno. */
void
ira_costs (void)
{
init_costs ();
total_allocno_costs = (struct costs *) ira_allocate (max_struct_costs_size
* ira_allocnos_num);
+ initiate_regno_cost_classes ();
+ calculate_elim_costs_all_insns ();
find_costs_and_classes (ira_dump_file);
- setup_allocno_cover_class_and_costs ();
+ setup_allocno_class_and_costs ();
+ finish_regno_cost_classes ();
finish_costs ();
ira_free (total_allocno_costs);
}
internal_flag_ira_verbose = flag_ira_verbose;
cost_elements_num = max_reg_num ();
init_costs ();
+ initiate_regno_cost_classes ();
find_costs_and_classes (dump_file);
+ finish_regno_cost_classes ();
pseudo_classes_defined_p = true;
finish_costs ();
}
function calls. This is called only when we found all intersected
calls during building allocno live ranges. */
void
-ira_tune_allocno_costs_and_cover_classes (void)
+ira_tune_allocno_costs (void)
{
int j, n, regno;
int cost, min_cost, *reg_costs;
- enum reg_class cover_class, rclass;
+ enum reg_class aclass, rclass;
enum machine_mode mode;
ira_allocno_t a;
ira_allocno_iterator ai;
+ ira_allocno_object_iterator oi;
+ ira_object_t obj;
+ bool skip_p;
FOR_EACH_ALLOCNO (a, ai)
{
- cover_class = ALLOCNO_COVER_CLASS (a);
- if (cover_class == NO_REGS)
+ aclass = ALLOCNO_CLASS (a);
+ if (aclass == NO_REGS)
continue;
mode = ALLOCNO_MODE (a);
- n = ira_class_hard_regs_num[cover_class];
+ n = ira_class_hard_regs_num[aclass];
min_cost = INT_MAX;
if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
{
ira_allocate_and_set_costs
- (&ALLOCNO_HARD_REG_COSTS (a), cover_class,
- ALLOCNO_COVER_CLASS_COST (a));
+ (&ALLOCNO_HARD_REG_COSTS (a), aclass,
+ ALLOCNO_CLASS_COST (a));
reg_costs = ALLOCNO_HARD_REG_COSTS (a);
for (j = n - 1; j >= 0; j--)
{
- regno = ira_class_hard_regs[cover_class][j];
+ regno = ira_class_hard_regs[aclass][j];
+ skip_p = false;
+ FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
+ {
+ if (! ira_hard_reg_not_in_set_p (regno, mode,
+ OBJECT_CONFLICT_HARD_REGS
+ (obj)))
+ {
+ skip_p = true;
+ break;
+ }
+ }
+ if (skip_p)
+ continue;
rclass = REGNO_REG_CLASS (regno);
cost = 0;
if (! ira_hard_reg_not_in_set_p (regno, mode, call_used_reg_set)
* ALLOCNO_FREQ (a)
* IRA_HARD_REGNO_ADD_COST_MULTIPLIER (regno) / 2);
#endif
- reg_costs[j] += cost;
+ if (INT_MAX - cost < reg_costs[j])
+ reg_costs[j] = INT_MAX;
+ else
+ reg_costs[j] += cost;
if (min_cost > reg_costs[j])
min_cost = reg_costs[j];
}
}
if (min_cost != INT_MAX)
- ALLOCNO_COVER_CLASS_COST (a) = min_cost;
+ ALLOCNO_CLASS_COST (a) = min_cost;
+
+ /* Some targets allow pseudos to be allocated to unaligned sequences
+ of hard registers. However, selecting an unaligned sequence can
+ unnecessarily restrict later allocations. So increase the cost of
+ unaligned hard regs to encourage the use of aligned hard regs. */
+ {
+ const int nregs = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
+
+ if (nregs > 1)
+ {
+ ira_allocate_and_set_costs
+ (&ALLOCNO_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a));
+ reg_costs = ALLOCNO_HARD_REG_COSTS (a);
+ for (j = n - 1; j >= 0; j--)
+ {
+ regno = ira_non_ordered_class_hard_regs[aclass][j];
+ if ((regno % nregs) != 0)
+ {
+ int index = ira_class_hard_reg_index[aclass][regno];
+ ira_assert (index != -1);
+ reg_costs[index] += ALLOCNO_FREQ (a);
+ }
+ }
+ }
+ }
}
}
+
+/* Add COST to the estimated gain for eliminating REGNO with its
+ equivalence. If COST is zero, record that no such elimination is
+ possible. */
+
+void
+ira_adjust_equiv_reg_cost (unsigned regno, int cost)
+{
+ if (cost == 0)
+ regno_equiv_gains[regno] = 0;
+ else
+ regno_equiv_gains[regno] += cost;
+}