X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fmode-switching.c;h=fa119d056b35313bcf0f808c7ce00fa58f1cce9d;hb=de2bc49318bb75a38a6fa668fd5553cd8d92f19d;hp=57eb1d21dc1528d03e296f3a81920fa9498868ec;hpb=2f138c1cfbd62fbaa71196fb5e9adb90c297c468;p=pf3gnuchains%2Fgcc-fork.git
diff --git a/gcc/mode-switching.c b/gcc/mode-switching.c
index 57eb1d21dc1..fa119d056b3 100644
--- a/gcc/mode-switching.c
+++ b/gcc/mode-switching.c
@@ -1,12 +1,12 @@
/* CPU mode switching
- Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
- Free Software Foundation, Inc.
+ Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
+ 2009 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
+Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
@@ -15,25 +15,27 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+along with GCC; see the file COPYING3. If not see
+. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
+#include "target.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "flags.h"
-#include "real.h"
#include "insn-config.h"
#include "recog.h"
#include "basic-block.h"
#include "output.h"
#include "tm_p.h"
#include "function.h"
+#include "tree-pass.h"
+#include "timevar.h"
+#include "df.h"
/* We want target macros for the mode switching code to be able to refer
to instruction attribute values. */
@@ -90,8 +92,8 @@ static sbitmap *comp;
static struct seginfo * new_seginfo (int, rtx, int, HARD_REG_SET);
static void add_seginfo (struct bb_info *, struct seginfo *);
-static void reg_dies (rtx, HARD_REG_SET);
-static void reg_becomes_live (rtx, rtx, void *);
+static void reg_dies (rtx, HARD_REG_SET *);
+static void reg_becomes_live (rtx, const_rtx, void *);
static void make_preds_opaque (basic_block, int);
@@ -102,7 +104,7 @@ static struct seginfo *
new_seginfo (int mode, rtx insn, int bb, HARD_REG_SET regs_live)
{
struct seginfo *ptr;
- ptr = xmalloc (sizeof (struct seginfo));
+ ptr = XNEW (struct seginfo);
ptr->mode = mode;
ptr->insn_ptr = insn;
ptr->bbnum = bb;
@@ -158,27 +160,25 @@ make_preds_opaque (basic_block b, int j)
/* Record in LIVE that register REG died. */
static void
-reg_dies (rtx reg, HARD_REG_SET live)
+reg_dies (rtx reg, HARD_REG_SET *live)
{
- int regno, nregs;
+ int regno;
if (!REG_P (reg))
return;
regno = REGNO (reg);
if (regno < FIRST_PSEUDO_REGISTER)
- for (nregs = hard_regno_nregs[regno][GET_MODE (reg)] - 1; nregs >= 0;
- nregs--)
- CLEAR_HARD_REG_BIT (live, regno + nregs);
+ remove_from_hard_reg_set (live, GET_MODE (reg), regno);
}
/* Record in LIVE that register REG became live.
This is called via note_stores. */
static void
-reg_becomes_live (rtx reg, rtx setter ATTRIBUTE_UNUSED, void *live)
+reg_becomes_live (rtx reg, const_rtx setter ATTRIBUTE_UNUSED, void *live)
{
- int regno, nregs;
+ int regno;
if (GET_CODE (reg) == SUBREG)
reg = SUBREG_REG (reg);
@@ -188,9 +188,7 @@ reg_becomes_live (rtx reg, rtx setter ATTRIBUTE_UNUSED, void *live)
regno = REGNO (reg);
if (regno < FIRST_PSEUDO_REGISTER)
- for (nregs = hard_regno_nregs[regno][GET_MODE (reg)] - 1; nregs >= 0;
- nregs--)
- SET_HARD_REG_BIT (* (HARD_REG_SET *) live, regno + nregs);
+ add_to_hard_reg_set ((HARD_REG_SET *) live, GET_MODE (reg), regno);
}
/* Make sure if MODE_ENTRY is defined the MODE_EXIT is defined
@@ -219,7 +217,6 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
if (eg->flags & EDGE_FALLTHRU)
{
basic_block src_bb = eg->src;
- regset live_at_end = src_bb->global_live_at_end;
rtx last_insn, ret_reg;
gcc_assert (!pre_exit);
@@ -248,15 +245,48 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
if (INSN_P (return_copy))
{
- if (GET_CODE (PATTERN (return_copy)) == USE
- && GET_CODE (XEXP (PATTERN (return_copy), 0)) == REG
- && (FUNCTION_VALUE_REGNO_P
- (REGNO (XEXP (PATTERN (return_copy), 0)))))
+ /* When using SJLJ exceptions, the call to the
+ unregister function is inserted between the
+ clobber of the return value and the copy.
+ We do not want to split the block before this
+ or any other call; if we have not found the
+ copy yet, the copy must have been deleted. */
+ if (CALL_P (return_copy))
{
- maybe_builtin_apply = 1;
+ short_block = 1;
+ break;
+ }
+ return_copy_pat = PATTERN (return_copy);
+ switch (GET_CODE (return_copy_pat))
+ {
+ case USE:
+ /* Skip __builtin_apply pattern. */
+ if (GET_CODE (XEXP (return_copy_pat, 0)) == REG
+ && (targetm.calls.function_value_regno_p
+ (REGNO (XEXP (return_copy_pat, 0)))))
+ {
+ maybe_builtin_apply = 1;
+ last_insn = return_copy;
+ continue;
+ }
+ break;
+
+ case ASM_OPERANDS:
+ /* Skip barrier insns. */
+ if (!MEM_VOLATILE_P (return_copy_pat))
+ break;
+
+ /* Fall through. */
+
+ case ASM_INPUT:
+ case UNSPEC_VOLATILE:
last_insn = return_copy;
continue;
+
+ default:
+ break;
}
+
/* If the return register is not (in its entirety)
likely spilled, the return copy might be
partially or completely optimized away. */
@@ -266,6 +296,25 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
return_copy_pat = PATTERN (return_copy);
if (GET_CODE (return_copy_pat) != CLOBBER)
break;
+ else if (!optimize)
+ {
+ /* This might be (clobber (reg []))
+ when not optimizing. Then check if
+ the previous insn is the clobber for
+ the return register. */
+ copy_reg = SET_DEST (return_copy_pat);
+ if (GET_CODE (copy_reg) == REG
+ && !HARD_REGISTER_NUM_P (REGNO (copy_reg)))
+ {
+ if (INSN_P (PREV_INSN (return_copy)))
+ {
+ return_copy = PREV_INSN (return_copy);
+ return_copy_pat = PATTERN (return_copy);
+ if (GET_CODE (return_copy_pat) != CLOBBER)
+ break;
+ }
+ }
+ }
}
copy_reg = SET_DEST (return_copy_pat);
if (GET_CODE (copy_reg) == REG)
@@ -310,7 +359,8 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
&& copy_start + copy_num <= ret_end)
nregs -= copy_num;
else if (!maybe_builtin_apply
- || !FUNCTION_VALUE_REGNO_P (copy_start))
+ || !targetm.calls.function_value_regno_p
+ (copy_start))
break;
last_insn = return_copy;
}
@@ -328,7 +378,7 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
last_insn = return_copy;
}
while (nregs);
-
+
/* If we didn't see a full return value copy, verify that there
is a plausible reason for this. If some, but not all of the
return register is likely spilled, we can expect that there
@@ -348,7 +398,7 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
failures, so let it pass. */
|| (GET_MODE_CLASS (GET_MODE (ret_reg)) != MODE_INT
&& nregs != 1));
-
+
if (INSN_P (last_insn))
{
before_return_copy
@@ -368,8 +418,6 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
else
{
pre_exit = split_edge (eg);
- COPY_REG_SET (pre_exit->global_live_at_start, live_at_end);
- COPY_REG_SET (pre_exit->global_live_at_end, live_at_end);
}
}
@@ -380,8 +428,8 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
/* Find all insns that need a particular mode setting, and insert the
necessary mode switches. Return true if we did work. */
-int
-optimize_mode_switching (FILE *file)
+static int
+optimize_mode_switching (void)
{
rtx insn;
int e;
@@ -399,8 +447,6 @@ optimize_mode_switching (FILE *file)
bool emited = false;
basic_block post_entry ATTRIBUTE_UNUSED, pre_exit ATTRIBUTE_UNUSED;
- clear_bb_flags ();
-
for (e = N_ENTITIES - 1, n_entities = 0; e >= 0; e--)
if (OPTIMIZE_MODE_SWITCHING (e))
{
@@ -413,7 +459,7 @@ optimize_mode_switching (FILE *file)
entry_exit_extra = 3;
#endif
bb_info[n_entities]
- = xcalloc (last_basic_block + entry_exit_extra, sizeof **bb_info);
+ = XCNEWVEC (struct bb_info, last_basic_block + entry_exit_extra);
entity_map[n_entities++] = e;
if (num_modes[e] > max_num_modes)
max_num_modes = num_modes[e];
@@ -429,6 +475,8 @@ optimize_mode_switching (FILE *file)
pre_exit = create_pre_exit (n_entities, entity_map, num_modes);
#endif
+ df_analyze ();
+
/* Create the bitmap vectors. */
antic = sbitmap_vector_alloc (last_basic_block, n_entities);
@@ -452,8 +500,23 @@ optimize_mode_switching (FILE *file)
int last_mode = no_mode;
HARD_REG_SET live_now;
- REG_SET_TO_HARD_REG_SET (live_now,
- bb->global_live_at_start);
+ REG_SET_TO_HARD_REG_SET (live_now, df_get_live_in (bb));
+
+ /* Pretend the mode is clobbered across abnormal edges. */
+ {
+ edge_iterator ei;
+ edge e;
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ if (e->flags & EDGE_COMPLEX)
+ break;
+ if (e)
+ {
+ ptr = new_seginfo (no_mode, BB_HEAD (bb), bb->index, live_now);
+ add_seginfo (info + bb->index, ptr);
+ RESET_BIT (transp[bb->index], j);
+ }
+ }
+
for (insn = BB_HEAD (bb);
insn != NULL && insn != NEXT_INSN (BB_END (bb));
insn = NEXT_INSN (insn))
@@ -476,12 +539,12 @@ optimize_mode_switching (FILE *file)
/* Update LIVE_NOW. */
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) == REG_DEAD)
- reg_dies (XEXP (link, 0), live_now);
+ reg_dies (XEXP (link, 0), &live_now);
note_stores (PATTERN (insn), reg_becomes_live, &live_now);
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) == REG_UNUSED)
- reg_dies (XEXP (link, 0), live_now);
+ reg_dies (XEXP (link, 0), &live_now);
}
}
@@ -523,7 +586,7 @@ optimize_mode_switching (FILE *file)
for (i = 0; i < max_num_modes; i++)
{
int current_mode[N_ENTITIES];
- sbitmap *delete;
+ sbitmap *del;
sbitmap *insert;
/* Set the anticipatable and computing arrays. */
@@ -549,8 +612,8 @@ optimize_mode_switching (FILE *file)
FOR_EACH_BB (bb)
sbitmap_not (kill[bb->index], transp[bb->index]);
- edge_list = pre_edge_lcm (file, 1, transp, comp, antic,
- kill, &insert, &delete);
+ edge_list = pre_edge_lcm (n_entities, transp, comp, antic,
+ kill, &insert, &del);
for (j = n_entities - 1; j >= 0; j--)
{
@@ -582,8 +645,7 @@ optimize_mode_switching (FILE *file)
mode = current_mode[j];
src_bb = eg->src;
- REG_SET_TO_HARD_REG_SET (live_at_edge,
- src_bb->global_live_at_end);
+ REG_SET_TO_HARD_REG_SET (live_at_edge, df_get_live_out (src_bb));
start_sequence ();
EMIT_MODE_SET (entity_map[j], mode, live_at_edge);
@@ -594,42 +656,15 @@ optimize_mode_switching (FILE *file)
if (mode_set == NULL_RTX)
continue;
- /* If this is an abnormal edge, we'll insert at the end
- of the previous block. */
- if (eg->flags & EDGE_ABNORMAL)
- {
- emited = true;
- if (JUMP_P (BB_END (src_bb)))
- emit_insn_before (mode_set, BB_END (src_bb));
- else
- {
- /* It doesn't make sense to switch to normal
- mode after a CALL_INSN. The cases in which a
- CALL_INSN may have an abnormal edge are
- sibcalls and EH edges. In the case of
- sibcalls, the dest basic-block is the
- EXIT_BLOCK, that runs in normal mode; it is
- assumed that a sibcall insn requires normal
- mode itself, so no mode switch would be
- required after the call (it wouldn't make
- sense, anyway). In the case of EH edges, EH
- entry points also start in normal mode, so a
- similar reasoning applies. */
- gcc_assert (NONJUMP_INSN_P (BB_END (src_bb)));
- emit_insn_after (mode_set, BB_END (src_bb));
- }
- bb_info[j][src_bb->index].computing = mode;
- RESET_BIT (transp[src_bb->index], j);
- }
- else
- {
- need_commit = 1;
- insert_insn_on_edge (mode_set, eg);
- }
+ /* We should not get an abnormal edge here. */
+ gcc_assert (! (eg->flags & EDGE_ABNORMAL));
+
+ need_commit = 1;
+ insert_insn_on_edge (mode_set, eg);
}
FOR_EACH_BB_REVERSE (bb)
- if (TEST_BIT (delete[bb->index], j))
+ if (TEST_BIT (del[bb->index], j))
{
make_preds_opaque (bb, j);
/* Cancel the 'deleted' mode set. */
@@ -637,7 +672,7 @@ optimize_mode_switching (FILE *file)
}
}
- sbitmap_vector_free (delete);
+ sbitmap_vector_free (del);
sbitmap_vector_free (insert);
clear_aux_for_edges ();
free_edge_list (edge_list);
@@ -667,9 +702,7 @@ optimize_mode_switching (FILE *file)
if (mode_set != NULL_RTX)
{
emited = true;
- if (NOTE_P (ptr->insn_ptr)
- && (NOTE_LINE_NUMBER (ptr->insn_ptr)
- == NOTE_INSN_BASIC_BLOCK))
+ if (NOTE_INSN_BASIC_BLOCK_P (ptr->insn_ptr))
emit_insn_after (mode_set, ptr->insn_ptr);
else
emit_insn_before (mode_set, ptr->insn_ptr);
@@ -684,7 +717,6 @@ optimize_mode_switching (FILE *file)
}
/* Finished. Free up all the things we've allocated. */
-
sbitmap_vector_free (kill);
sbitmap_vector_free (antic);
sbitmap_vector_free (transp);
@@ -700,12 +732,47 @@ optimize_mode_switching (FILE *file)
return 0;
#endif
- max_regno = max_reg_num ();
- allocate_reg_info (max_regno, FALSE, FALSE);
- update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
- (PROP_DEATH_NOTES | PROP_KILL_DEAD_CODE
- | PROP_SCAN_DEAD_CODE));
-
return 1;
}
+
#endif /* OPTIMIZE_MODE_SWITCHING */
+
+static bool
+gate_mode_switching (void)
+{
+#ifdef OPTIMIZE_MODE_SWITCHING
+ return true;
+#else
+ return false;
+#endif
+}
+
+static unsigned int
+rest_of_handle_mode_switching (void)
+{
+#ifdef OPTIMIZE_MODE_SWITCHING
+ optimize_mode_switching ();
+#endif /* OPTIMIZE_MODE_SWITCHING */
+ return 0;
+}
+
+
+struct rtl_opt_pass pass_mode_switching =
+{
+ {
+ RTL_PASS,
+ "mode_sw", /* name */
+ gate_mode_switching, /* gate */
+ rest_of_handle_mode_switching, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_MODE_SWITCH, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_df_finish | TODO_verify_rtl_sharing |
+ TODO_dump_func /* todo_flags_finish */
+ }
+};