o Coloring. Now IRA has all necessary info to start graph coloring
process. It is done in each region on top-down traverse of the
region tree (file ira-color.c). There are following subpasses:
-
+
* Optional aggressive coalescing of allocnos in the region.
* Putting allocnos onto the coloring stack. IRA uses Briggs
hard_regno = reg_alloc_order[i];
#else
hard_regno = i;
-#endif
+#endif
if (TEST_HARD_REG_BIT (processed_hard_reg_set, hard_regno))
continue;
SET_HARD_REG_BIT (processed_hard_reg_set, hard_regno);
static void
setup_class_subset_and_memory_move_costs (void)
{
- int cl, cl2;
- enum machine_mode mode;
+ int cl, cl2, mode;
HARD_REG_SET temp_hard_regset2;
for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
if (cl != (int) NO_REGS)
for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
{
- ira_memory_move_cost[mode][cl][0] = MEMORY_MOVE_COST (mode, cl, 0);
- ira_memory_move_cost[mode][cl][1] = MEMORY_MOVE_COST (mode, cl, 1);
+ ira_memory_move_cost[mode][cl][0] =
+ MEMORY_MOVE_COST ((enum machine_mode) mode,
+ (enum reg_class) cl, 0);
+ ira_memory_move_cost[mode][cl][1] =
+ MEMORY_MOVE_COST ((enum machine_mode) mode,
+ (enum reg_class) cl, 1);
/* Costs for NO_REGS are used in cost calculation on the
1st pass when the preferred register classes are not
known yet. In this case we take the best scenario. */
static void
setup_cover_and_important_classes (void)
{
- int i, j, n;
- bool set_p, eq_p;
- enum reg_class cl;
+ int i, j, n, cl;
+ bool set_p;
const enum reg_class *cover_classes;
HARD_REG_SET temp_hard_regset2;
static enum reg_class classes[LIM_REG_CLASSES + 1];
else
{
for (i = 0; (cl = cover_classes[i]) != LIM_REG_CLASSES; i++)
- classes[i] = cl;
+ classes[i] = (enum reg_class) cl;
classes[i] = LIM_REG_CLASSES;
}
{
if (i == NO_REGS)
continue;
-#ifdef CONSTRAINT__LIMIT
+#ifdef CONSTRAINT_NUM_DEFINED_P
for (j = 0; j < CONSTRAINT__LIMIT; j++)
- if ((int) regclass_for_constraint (j) == i)
+ if ((int) REG_CLASS_FOR_CONSTRAINT ((enum constraint_num) j) == i)
break;
if (j < CONSTRAINT__LIMIT)
{
- classes[n++] = i;
+ classes[n++] = (enum reg_class) i;
continue;
}
#endif
break;
}
if (j >= i)
- classes[n++] = i;
+ classes[n++] = (enum reg_class) i;
}
classes[n] = LIM_REG_CLASSES;
}
{
for (j = 0; j < i; j++)
if (flag_ira_algorithm != IRA_ALGORITHM_PRIORITY
- && reg_classes_intersect_p (cl, classes[j]))
+ && reg_classes_intersect_p ((enum reg_class) cl, classes[j]))
gcc_unreachable ();
COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
if (! hard_reg_set_empty_p (temp_hard_regset))
- ira_reg_class_cover[ira_reg_class_cover_size++] = cl;
+ ira_reg_class_cover[ira_reg_class_cover_size++] = (enum reg_class) cl;
}
ira_important_classes_num = 0;
for (cl = 0; cl < N_REG_CLASSES; cl++)
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
if (! hard_reg_set_empty_p (temp_hard_regset))
{
- set_p = eq_p = false;
+ set_p = false;
for (j = 0; j < ira_reg_class_cover_size; j++)
{
COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
COPY_HARD_REG_SET (temp_hard_regset2,
reg_class_contents[ira_reg_class_cover[j]]);
AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
- if (cl == ira_reg_class_cover[j])
- {
- eq_p = false;
- set_p = true;
- break;
- }
- else if (hard_reg_set_equal_p (temp_hard_regset,
- temp_hard_regset2))
- eq_p = true;
+ if ((enum reg_class) cl == ira_reg_class_cover[j]
+ || hard_reg_set_equal_p (temp_hard_regset,
+ temp_hard_regset2))
+ break;
else if (hard_reg_set_subset_p (temp_hard_regset,
temp_hard_regset2))
set_p = true;
}
- if (set_p && ! eq_p)
- {
- ira_important_class_nums[cl] = ira_important_classes_num;
- ira_important_classes[ira_important_classes_num++] = cl;
- }
+ if (set_p && j >= ira_reg_class_cover_size)
+ ira_important_classes[ira_important_classes_num++]
+ = (enum reg_class) cl;
}
}
+ for (j = 0; j < ira_reg_class_cover_size; j++)
+ ira_important_classes[ira_important_classes_num++]
+ = ira_reg_class_cover[j];
}
/* Map of all register classes to corresponding cover class containing
static void
setup_class_translate (void)
{
- enum reg_class cl, cover_class, best_class, *cl_ptr;
- enum machine_mode mode;
+ int cl, mode;
+ enum reg_class cover_class, best_class, *cl_ptr;
int i, cost, min_cost, best_cost;
for (cl = 0; cl < N_REG_CLASSES; cl++)
ira_class_translate[cl] = NO_REGS;
-
+
if (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
for (cl = 0; cl < LIM_REG_CLASSES; cl++)
{
for (i = 0; i < ira_reg_class_cover_size; i++)
{
HARD_REG_SET temp_hard_regset2;
-
+
cover_class = ira_reg_class_cover[i];
COPY_HARD_REG_SET (temp_hard_regset2,
reg_class_contents[cover_class]);
}
}
+/* Order numbers of cover classes in original target cover class
+ array, -1 for non-cover classes. */
+static int cover_class_order[N_REG_CLASSES];
+
+/* The function used to sort the important classes. */
+static int
+comp_reg_classes_func (const void *v1p, const void *v2p)
+{
+ enum reg_class cl1 = *(const enum reg_class *) v1p;
+ enum reg_class cl2 = *(const enum reg_class *) v2p;
+ int diff;
+
+ cl1 = ira_class_translate[cl1];
+ cl2 = ira_class_translate[cl2];
+ if (cl1 != NO_REGS && cl2 != NO_REGS
+ && (diff = cover_class_order[cl1] - cover_class_order[cl2]) != 0)
+ return diff;
+ return (int) cl1 - (int) cl2;
+}
+
+/* Reorder important classes according to the order of their cover
+ classes. Set up array ira_important_class_nums too. */
+static void
+reorder_important_classes (void)
+{
+ int i;
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ cover_class_order[i] = -1;
+ for (i = 0; i < ira_reg_class_cover_size; i++)
+ cover_class_order[ira_reg_class_cover[i]] = i;
+ qsort (ira_important_classes, ira_important_classes_num,
+ sizeof (enum reg_class), comp_reg_classes_func);
+ for (i = 0; i < ira_important_classes_num; i++)
+ ira_important_class_nums[ira_important_classes[i]] = i;
+}
+
/* The biggest important reg_class inside of intersection of the two
reg_classes (that is calculated taking only hard registers
available for allocation into account). If the both reg_classes
if (cl3 == LIM_REG_CLASSES)
break;
if (reg_class_subset_p (ira_reg_class_intersect[cl1][cl2],
- cl3))
- ira_reg_class_intersect[cl1][cl2] = cl3;
+ (enum reg_class) cl3))
+ ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3;
}
ira_reg_class_union[cl1][cl2] = reg_class_subunion[cl1][cl2];
continue;
AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
if (ira_reg_class_union[cl1][cl2] == NO_REGS
|| (hard_reg_set_subset_p (temp_set2, temp_hard_regset)
-
+
&& (! hard_reg_set_equal_p (temp_set2,
temp_hard_regset)
/* Ignore unavailable hard registers and
setup_reg_subclasses ();
setup_cover_and_important_classes ();
setup_class_translate ();
+ reorder_important_classes ();
setup_reg_class_relations ();
}
break;
}
}
-
+
}
}
static void
setup_reg_class_nregs (void)
{
- int m;
- enum reg_class cl;
+ int cl, m;
ira_max_nregs = -1;
for (cl = 0; cl < N_REG_CLASSES; cl++)
for (m = 0; m < MAX_MACHINE_MODE; m++)
{
- ira_reg_class_nregs[cl][m] = CLASS_MAX_NREGS (cl, m);
+ ira_reg_class_nregs[cl][m] = CLASS_MAX_NREGS ((enum reg_class) cl,
+ (enum machine_mode) m);
if (ira_max_nregs < ira_reg_class_nregs[cl][m])
ira_max_nregs = ira_reg_class_nregs[cl][m];
}
for (k = ira_class_hard_regs_num[cl] - 1; k >= 0; k--)
{
hard_regno = ira_class_hard_regs[cl][k];
- if (! HARD_REGNO_MODE_OK (hard_regno, j))
+ if (! HARD_REGNO_MODE_OK (hard_regno, (enum machine_mode) j))
SET_HARD_REG_BIT (prohibited_class_mode_regs[cl][j],
hard_regno);
}
void
ira_init_once (void)
{
- enum machine_mode mode;
+ int mode;
for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
{
static void
free_register_move_costs (void)
{
- enum machine_mode mode;
+ int mode;
for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
{
SET_HARD_REG_SET (ira_prohibited_mode_move_regs[i]);
for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
{
- if (! HARD_REGNO_MODE_OK (j, i))
+ if (! HARD_REGNO_MODE_OK (j, (enum machine_mode) i))
continue;
SET_REGNO (test_reg1, j);
- PUT_MODE (test_reg1, i);
+ PUT_MODE (test_reg1, (enum machine_mode) i);
SET_REGNO (test_reg2, j);
- PUT_MODE (test_reg2, i);
+ PUT_MODE (test_reg2, (enum machine_mode) i);
INSN_CODE (move_insn) = -1;
recog_memoized (move_insn);
if (INSN_CODE (move_insn) < 0)
basic_block bb;
memset (regs_asm_clobbered, 0, sizeof (char) * FIRST_PSEUDO_REGISTER);
-
+
FOR_EACH_BB (bb)
{
rtx insn;
{
unsigned int i;
enum machine_mode mode = GET_MODE (DF_REF_REAL_REG (def));
- unsigned int end = dregno
+ unsigned int end = dregno
+ hard_regno_nregs[dregno][mode] - 1;
for (i = dregno; i <= end; ++i)
/* Set up ELIMINABLE_REGSET, IRA_NO_ALLOC_REGS, and REGS_EVER_LIVE. */
-static void
-setup_eliminable_regset (void)
+void
+ira_setup_eliminable_regset (void)
{
/* Like regs_ever_live, but 1 if a reg is set or clobbered from an
asm. Unlike regs_ever_live, elements of this array corresponding
int need_fp
= (! flag_omit_frame_pointer
|| (cfun->calls_alloca && EXIT_IGNORE_STACK)
+ /* We need the frame pointer to catch stack overflow exceptions
+ if the stack pointer is moving. */
+ || (flag_stack_check && STACK_CHECK_MOVING_SP)
|| crtl->accesses_prior_frames
|| crtl->stack_realign_needed
- || FRAME_POINTER_REQUIRED);
+ || targetm.frame_pointer_required ());
frame_pointer_needed = need_fp;
for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++)
{
bool cannot_elim
- = (! CAN_ELIMINATE (eliminables[i].from, eliminables[i].to)
+ = (! targetm.can_eliminate (eliminables[i].from, eliminables[i].to)
|| (eliminables[i].to == STACK_POINTER_REGNUM && need_fp));
if (! regs_asm_clobbered[eliminables[i].from])
{
insn = XEXP (list, 0);
note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
-
+
if (note == NULL_RTX)
continue;
x = XEXP (note, 0);
-
+
if (! function_invariant_p (x)
|| ! flag_pic
/* A function invariant is often CONSTANT_P but may
ira_assert (hard_regno < 0
|| ! ira_hard_reg_not_in_set_p
(hard_regno, ALLOCNO_MODE (a),
- reg_class_contents[ALLOCNO_COVER_CLASS (a)]));
+ reg_class_contents[ALLOCNO_COVER_CLASS (a)]));
if (hard_regno < 0)
{
cost = ALLOCNO_MEMORY_COST (a);
int max_regno = max_reg_num ();
int i, new_regno;
rtx x, prev, next, insn, set;
-
+
if (reg_equiv_init_size < max_regno)
{
reg_equiv_init
ira_allocno_t a;
ira_copy_t cp, next_cp;
ira_allocno_iterator ai;
-
+
FOR_EACH_ALLOCNO (a, ai)
{
if (ALLOCNO_CAP_MEMBER (a) != NULL)
for (i = start; i < max_regno; i++)
{
old_regno = ORIGINAL_REGNO (regno_reg_rtx[i]);
- ira_assert (i != old_regno);
+ ira_assert (i != old_regno);
setup_reg_classes (i, reg_preferred_class (old_regno),
- reg_alternate_class (old_regno));
+ reg_alternate_class (old_regno),
+ reg_cover_class (old_regno));
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
fprintf (ira_dump_file,
" New r%d: setting preferred %s, alternative %s\n",
resize_reg_info ();
for (i = old_size; i < size; i++)
- {
- reg_renumber[i] = -1;
- setup_reg_classes (i, GENERAL_REGS, ALL_REGS);
- }
+ setup_reg_classes (i, GENERAL_REGS, ALL_REGS, GENERAL_REGS);
}
/* Return TRUE if there is too high register pressure in the function.
{
int i;
enum reg_class cover_class;
-
+
for (i = 0; i < ira_reg_class_cover_size; i++)
{
cover_class = ira_reg_class_cover[i];
struct equivalence
{
- /* Set when an attempt should be made to replace a register
- with the associated src_p entry. */
- char replace;
/* Set when a REG_EQUIV note is found or created. Use to
keep track of what memory accesses might be created later,
e.g. by reload. */
rtx replacement;
rtx *src_p;
+ /* The list of each instruction which initializes this register. */
+ rtx init_insns;
/* Loop depth is used to recognize equivalences which appear
to be present within the same loop (or in an inner loop). */
int loop_depth;
- /* The list of each instruction which initializes this register. */
- rtx init_insns;
/* Nonzero if this had a preexisting REG_EQUIV note. */
int is_arg_equivalence;
+ /* Set when an attempt should be made to replace a register
+ with the associated src_p entry. */
+ char replace;
};
/* reg_equiv[N] (where N is a pseudo reg number) is the equivalence
for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
insn = NEXT_INSN (insn))
{
- if (!INSN_P (insn))
+ if (!NONDEBUG_INSN_P (insn))
continue;
-
+
if (memref_referenced_p (memref, PATTERN (insn)))
return 1;
basic_block bb;
int loop_depth;
bitmap cleared_regs;
-
+
/* We need to keep track of whether or not we recorded a LABEL_REF so
that we know if the jump optimizer needs to be rerun. */
recorded_label_ref = 0;
}
/* Move the initialization of the register to just before
INSN. Update the flow information. */
- else if (PREV_INSN (insn) != equiv_insn)
+ else if (prev_nondebug_insn (insn) != equiv_insn)
{
rtx new_insn;
to init all of the subregs to ones else init to 0. */
if (init_value)
sbitmap_ones (live_subregs[allocnum]);
- else
+ else
sbitmap_zero (live_subregs[allocnum]);
/* Set the number of bits that we really want. */
{
bitmap_iterator bi;
rtx insn;
-
+
CLEAR_REG_SET (live_relevant_regs);
memset (live_subregs_used, 0, max_regno * sizeof (int));
-
- EXECUTE_IF_SET_IN_BITMAP (df_get_live_out (bb), 0, i, bi)
+
+ EXECUTE_IF_SET_IN_BITMAP (DF_LR_OUT (bb), 0, i, bi)
{
if (i >= FIRST_PSEUDO_REGISTER)
break;
bitmap_set_bit (live_relevant_regs, i);
}
- EXECUTE_IF_SET_IN_BITMAP (df_get_live_out (bb),
+ EXECUTE_IF_SET_IN_BITMAP (DF_LR_OUT (bb),
FIRST_PSEUDO_REGISTER, i, bi)
{
if (pseudo_for_reload_consideration_p (i))
next = c;
*p = c;
p = &c->prev;
-
+
c->insn = insn;
c->block = bb->index;
{
df_ref def = *def_rec;
unsigned int regno = DF_REF_REGNO (def);
-
+
/* Ignore may clobbers because these are generated
from calls. However, every other kind of def is
added to dead_or_set. */
&& !DF_REF_FLAGS_IS_SET (def, DF_REF_ZERO_EXTRACT))
{
unsigned int start = SUBREG_BYTE (reg);
- unsigned int last = start
+ unsigned int last = start
+ GET_MODE_SIZE (GET_MODE (reg));
init_live_subregs
- (bitmap_bit_p (live_relevant_regs, regno),
+ (bitmap_bit_p (live_relevant_regs, regno),
live_subregs, live_subregs_used, regno, reg);
if (!DF_REF_FLAGS_IS_SET
RESET_BIT (live_subregs[regno], start);
start++;
}
-
+
if (sbitmap_empty_p (live_subregs[regno]))
{
live_subregs_used[regno] = 0;
}
}
}
-
+
bitmap_and_compl_into (live_relevant_regs, elim_regset);
bitmap_copy (&c->live_throughout, live_relevant_regs);
df_ref use = *use_rec;
unsigned int regno = DF_REF_REGNO (use);
rtx reg = DF_REF_REG (use);
-
+
/* DF_REF_READ_WRITE on a use means that this use
is fabricated from a def that is a partial set
to a multiword reg. Here, we only model the
subreg case that is not wrapped in ZERO_EXTRACT
precisely so we do not need to look at the
fabricated use. */
- if (DF_REF_FLAGS_IS_SET (use, DF_REF_READ_WRITE)
- && !DF_REF_FLAGS_IS_SET (use, DF_REF_ZERO_EXTRACT)
+ if (DF_REF_FLAGS_IS_SET (use, DF_REF_READ_WRITE)
+ && !DF_REF_FLAGS_IS_SET (use, DF_REF_ZERO_EXTRACT)
&& DF_REF_FLAGS_IS_SET (use, DF_REF_SUBREG))
continue;
-
+
/* Add the last use of each var to dead_or_set. */
if (!bitmap_bit_p (live_relevant_regs, regno))
{
else if (pseudo_for_reload_consideration_p (regno))
bitmap_set_bit (&c->dead_or_set, regno);
}
-
+
if (regno < FIRST_PSEUDO_REGISTER
|| pseudo_for_reload_consideration_p (regno))
{
if (GET_CODE (reg) == SUBREG
&& !DF_REF_FLAGS_IS_SET (use,
DF_REF_SIGN_EXTRACT
- | DF_REF_ZERO_EXTRACT))
+ | DF_REF_ZERO_EXTRACT))
{
unsigned int start = SUBREG_BYTE (reg);
- unsigned int last = start
+ unsigned int last = start
+ GET_MODE_SIZE (GET_MODE (reg));
-
+
init_live_subregs
- (bitmap_bit_p (live_relevant_regs, regno),
+ (bitmap_bit_p (live_relevant_regs, regno),
live_subregs, live_subregs_used, regno, reg);
-
+
/* Ignore the paradoxical bits. */
if ((int)last > live_subregs_used[regno])
last = live_subregs_used[regno];
labels and jump tables that are just hanging out in between
the basic blocks. See pr33676. */
insn = BB_HEAD (bb);
-
+
/* Skip over the barriers and cruft. */
- while (insn && (BARRIER_P (insn) || NOTE_P (insn)
+ while (insn && (BARRIER_P (insn) || NOTE_P (insn)
|| BLOCK_FOR_INSN (insn) == bb))
insn = PREV_INSN (insn);
-
+
/* While we add anything except barriers and notes, the focus is
to get the labels and jump tables into the
reload_insn_chain. */
{
if (BLOCK_FOR_INSN (insn))
break;
-
+
c = new_insn_chain ();
c->next = next;
next = c;
*p = c;
p = &c->prev;
-
+
/* The block makes no sense here, but it is what the old
code did. */
c->block = bb->index;
c->insn = insn;
bitmap_copy (&c->live_throughout, live_relevant_regs);
- }
+ }
insn = PREV_INSN (insn);
}
}
epilogue thus changing register elimination offsets. */
current_function_is_leaf = leaf_function_p ();
+ if (resize_reg_info () && flag_ira_loop_pressure)
+ ira_set_pseudo_classes (ira_dump_file);
+
rebuild_p = update_equiv_regs ();
#ifndef IRA_NO_OBSTACK
#endif
bitmap_obstack_initialize (&ira_bitmap_obstack);
if (optimize)
- {
+ {
max_regno = max_reg_num ();
ira_reg_equiv_len = max_regno;
ira_reg_equiv_invariant_p
}
max_regno_before_ira = allocated_reg_info_size = max_reg_num ();
- allocate_reg_info ();
- setup_eliminable_regset ();
-
+ ira_setup_eliminable_regset ();
+
ira_overall_cost = ira_reg_cost = ira_mem_cost = 0;
ira_load_cost = ira_store_cost = ira_shuffle_cost = 0;
ira_move_loops_num = ira_additional_jumps_num = 0;
-
+
ira_assert (current_loops == NULL);
flow_loops_find (&ira_loops);
+ record_loop_exits ();
current_loops = &ira_loops;
-
+
if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
fprintf (ira_dump_file, "Building IRA IR\n");
loops_p = ira_build (optimize
&& (flag_ira_region == IRA_REGION_ALL
|| flag_ira_region == IRA_REGION_MIXED));
-
+
ira_assert (ira_conflicts_p || !loops_p);
saved_flag_ira_share_spill_slots = flag_ira_share_spill_slots;
if (too_high_register_pressure_p ())
/* It is just wasting compiler's time to pack spilled pseudos into
- stack slots in this case -- prohibit it. */
+ stack slots in this case -- prohibit it. */
flag_ira_share_spill_slots = FALSE;
ira_color ();
-
+
ira_max_point_before_emit = ira_max_point;
-
+
ira_emit (loops_p);
-
+
if (ira_conflicts_p)
{
max_regno = max_reg_num ();
-
+
if (! loops_p)
ira_initiate_assign ();
else
setup_preferred_alternate_classes_for_new_pseudos
(allocated_reg_info_size);
allocated_reg_info_size = max_regno;
-
+
if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
fprintf (ira_dump_file, "Flattening IR\n");
ira_flattening (max_regno_before_ira, ira_max_point_before_emit);
/* New insns were generated: add notes and recalculate live
info. */
df_analyze ();
-
+
flow_loops_find (&ira_loops);
+ record_loop_exits ();
current_loops = &ira_loops;
setup_allocno_assignment_flags ();
}
setup_reg_renumber ();
-
+
calculate_allocation_cost ();
-
+
#ifdef ENABLE_IRA_CHECKING
if (ira_conflicts_p)
check_allocation ();
#endif
-
+
delete_trivially_dead_insns (get_insns (), max_reg_num ());
max_regno = max_reg_num ();
-
+
/* And the reg_equiv_memory_loc array. */
VEC_safe_grow (rtx, gc, reg_equiv_memory_loc_vec, max_regno);
memset (VEC_address (rtx, reg_equiv_memory_loc_vec), 0,
if (ira_conflicts_p)
{
fix_reg_equiv_init ();
-
+
#ifdef ENABLE_IRA_CHECKING
print_redundant_copies ();
#endif
memset (ira_spilled_reg_stack_slots, 0,
max_regno * sizeof (struct ira_spilled_reg_stack_slot));
}
-
+
timevar_pop (TV_IRA);
timevar_push (TV_RELOAD);
reload_completed = !reload (get_insns (), ira_conflicts_p);
+ finish_subregs_of_mode ();
+
timevar_pop (TV_RELOAD);
timevar_push (TV_IRA);
if (ira_conflicts_p)
{
ira_free (ira_spilled_reg_stack_slots);
-
+
ira_finish_assign ();
-
- }
+
+ }
if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL
&& overall_cost_before != ira_overall_cost)
fprintf (ira_dump_file, "+++Overall after reload %d\n", ira_overall_cost);
ira_destroy ();
-
+
flag_ira_share_spill_slots = saved_flag_ira_share_spill_slots;
flow_loops_free (&ira_loops);
regstat_free_ri ();
regstat_free_n_sets_and_refs ();
-
+
if (optimize)
{
cleanup_cfg (CLEANUP_EXPENSIVE);
-
+
ira_free (ira_reg_equiv_invariant_p);
ira_free (ira_reg_equiv_const);
}
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
- 0, /* tv_id */
+ TV_NONE, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */