/* Global common subexpression elimination/Partial redundancy elimination
and global constant/copy propagation for GNU compiler.
- Copyright (C) 1997, 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
+ Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002
+ Free Software Foundation, Inc.
This file is part of GCC.
#include "output.h"
#include "function.h"
#include "expr.h"
+#include "except.h"
#include "ggc.h"
#include "params.h"
struct null_pointer_info
{
/* The basic block being processed. */
- int current_block;
+ basic_block current_block;
/* The first register to be handled in this pass. */
unsigned int min_reg;
/* One greater than the last register to be handled in this pass. */
basic_block));
static void free_store_memory PARAMS ((void));
static void store_motion PARAMS ((void));
+static void free_insn_expr_list_list PARAMS ((rtx *));
static void clear_modify_mem_tables PARAMS ((void));
static void free_modify_mem_tables PARAMS ((void));
+static rtx gcse_emit_move_after PARAMS ((rtx, rtx, rtx));
\f
/* Entry point for global common subexpression elimination.
F is the first instruction in the function. */
if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
{
if (warn_disabled_optimization)
- warning ("GCSE disabled: %d > 1000 basic blocks and %d >= 20 edges/basic block",
- n_basic_blocks, n_edges / n_basic_blocks);
+ warning ("GCSE disabled: %d > 1000 basic blocks and %d >= 20 edges/basic block",
+ n_basic_blocks, n_edges / n_basic_blocks);
return 0;
}
{
free_modify_mem_tables ();
modify_mem_list
- = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx *));
+ = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
canon_modify_mem_list
- = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx *));
- memset ((char *) modify_mem_list, 0, n_basic_blocks * sizeof (rtx *));
- memset ((char *) canon_modify_mem_list, 0, n_basic_blocks * sizeof (rtx *));
+ = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
+ memset ((char *) modify_mem_list, 0, last_basic_block * sizeof (rtx));
+ memset ((char *) canon_modify_mem_list, 0, last_basic_block * sizeof (rtx));
orig_bb_count = n_basic_blocks;
}
free_reg_set_mem ();
end_alias_analysis ();
allocate_reg_info (max_reg_num (), FALSE, FALSE);
- if (!optimize_size && flag_gcse_sm)
+ /* Store motion disabled until it is fixed. */
+ if (0 && !optimize_size && flag_gcse_sm)
store_motion ();
/* Record where pseudo-registers are set. */
return run_jump_opt_after_gcse;
{
int i;
#ifndef AVOID_CCMODE_COPIES
- rtx reg,insn;
+ rtx reg, insn;
#endif
memset (can_copy_p, 0, NUM_MACHINE_MODES);
alloc_gcse_mem (f)
rtx f;
{
- int i,n;
+ int i, n;
rtx insn;
/* Find the largest UID and create a mapping from UIDs to CUIDs.
reg_set_bitmap = BITMAP_XMALLOC ();
/* Allocate vars to track sets of regs, memory per block. */
- reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks,
+ reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (last_basic_block,
max_gcse_regno);
/* Allocate array to keep a list of insns which modify memory in each
basic block. */
- modify_mem_list = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx *));
- canon_modify_mem_list = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx *));
- memset ((char *) modify_mem_list, 0, n_basic_blocks * sizeof (rtx *));
- memset ((char *) canon_modify_mem_list, 0, n_basic_blocks * sizeof (rtx *));
+ modify_mem_list = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
+ canon_modify_mem_list = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
+ memset ((char *) modify_mem_list, 0, last_basic_block * sizeof (rtx));
+ memset ((char *) canon_modify_mem_list, 0, last_basic_block * sizeof (rtx));
modify_mem_list_set = BITMAP_XMALLOC ();
canon_modify_mem_list_set = BITMAP_XMALLOC ();
}
if (transp)
{
if (setp)
- sbitmap_vector_zero (transp, n_basic_blocks);
+ sbitmap_vector_zero (transp, last_basic_block);
else
- sbitmap_vector_ones (transp, n_basic_blocks);
+ sbitmap_vector_ones (transp, last_basic_block);
}
if (comp)
- sbitmap_vector_zero (comp, n_basic_blocks);
+ sbitmap_vector_zero (comp, last_basic_block);
if (antloc)
- sbitmap_vector_zero (antloc, n_basic_blocks);
+ sbitmap_vector_zero (antloc, last_basic_block);
/* We use the same code for cprop, pre and hoisting. For cprop
we care about the set hash table, for pre and hoisting we
= (struct reg_set **) grealloc ((char *) reg_set_table,
new_size * sizeof (struct reg_set *));
memset ((char *) (reg_set_table + reg_set_table_size), 0,
- (new_size - reg_set_table_size) * sizeof (struct reg_set *));
+ (new_size - reg_set_table_size) * sizeof (struct reg_set *));
reg_set_table_size = new_size;
}
struct reg_avail_info
{
- int last_bb;
+ basic_block last_bb;
int first_set;
int last_set;
};
static struct reg_avail_info *reg_avail_info;
-static int current_bb;
+static basic_block current_bb;
/* See whether X, the source of a set, is something we want to consider for
case SUBREG:
case CONST_INT:
case CONST_DOUBLE:
+ case CONST_VECTOR:
case CALL:
return 0;
}
case MEM:
- if (load_killed_in_block_p (BASIC_BLOCK (current_bb), INSN_CUID (insn),
+ if (load_killed_in_block_p (current_bb, INSN_CUID (insn),
x, avail_p))
return 0;
else
case CONST:
case CONST_INT:
case CONST_DOUBLE:
+ case CONST_VECTOR:
case SYMBOL_REF:
case LABEL_REF:
case ADDR_VEC:
const char *ps;
{
unsigned hash = 0;
- const unsigned char *p = (const unsigned char *)ps;
+ const unsigned char *p = (const unsigned char *) ps;
if (p)
while (*p)
+ (unsigned int) CONST_DOUBLE_HIGH (x));
return hash;
+ case CONST_VECTOR:
+ {
+ int units;
+ rtx elt;
+
+ units = CONST_VECTOR_NUNITS (x);
+
+ for (i = 0; i < units; ++i)
+ {
+ elt = CONST_VECTOR_ELT (x, i);
+ hash += hash_expr_1 (elt, GET_MODE (elt), do_not_record_p);
+ }
+
+ return hash;
+ }
+
/* Assume there is only one rtx object for any given label. */
case LABEL_REF:
/* We don't hash on the address of the CODE_LABEL to avoid bootstrap
}
hash += (unsigned int) MEM;
- hash += MEM_ALIAS_SET (x);
+ /* We used alias set for hashing, but this is not good, since the alias
+ set may differ in -fprofile-arcs and -fbranch-probabilities compilation
+ causing the profiles to fail to match. */
x = XEXP (x, 0);
goto repeat;
default:
abort ();
}
- }
+ }
return 1;
}
&& regno >= FIRST_PSEUDO_REGISTER
/* Don't GCSE something if we can't do a reg/reg copy. */
&& can_copy_p [GET_MODE (dest)]
+ /* GCSE commonly inserts instruction after the insn. We can't
+ do that easily for EH_REGION notes so disable GCSE on these
+ for now. */
+ && !find_reg_note (insn, REG_EH_REGION, NULL_RTX)
/* Is SET_SRC something we want to gcse? */
&& want_to_gcse_p (src)
/* Don't CSE a nop. */
&& REGNO (src) >= FIRST_PSEUDO_REGISTER
&& can_copy_p [GET_MODE (dest)]
&& REGNO (src) != regno)
- || GET_CODE (src) == CONST_INT
- || GET_CODE (src) == SYMBOL_REF
- || GET_CODE (src) == CONST_DOUBLE)
+ || CONSTANT_P (src))
/* A copy is not available if its src or dest is subsequently
modified. Here we want to search from INSN+1 on, but
oprs_available_p searches from INSN on. */
{
info->last_bb = current_bb;
info->first_set = cuid;
- SET_BIT (reg_set_in_block[current_bb], regno);
+ SET_BIT (reg_set_in_block[current_bb->index], regno);
}
}
void * v_insn;
{
rtx dest_addr, insn;
+ int bb;
while (GET_CODE (dest) == SUBREG
|| GET_CODE (dest) == ZERO_EXTRACT
dest_addr = get_addr (XEXP (dest, 0));
dest_addr = canon_rtx (dest_addr);
insn = (rtx) v_insn;
+ bb = BLOCK_NUM (insn);
- canon_modify_mem_list[BLOCK_NUM (insn)] =
- alloc_INSN_LIST (dest_addr, canon_modify_mem_list[BLOCK_NUM (insn)]);
- canon_modify_mem_list[BLOCK_NUM (insn)] =
- alloc_INSN_LIST (dest, canon_modify_mem_list[BLOCK_NUM (insn)]);
- bitmap_set_bit (canon_modify_mem_list_set, BLOCK_NUM (insn));
+ canon_modify_mem_list[bb] =
+ alloc_EXPR_LIST (VOIDmode, dest_addr, canon_modify_mem_list[bb]);
+ canon_modify_mem_list[bb] =
+ alloc_EXPR_LIST (VOIDmode, dest, canon_modify_mem_list[bb]);
+ bitmap_set_bit (canon_modify_mem_list_set, bb);
}
/* Record memory modification information for INSN. We do not actually care
record_last_mem_set_info (insn)
rtx insn;
{
+ int bb = BLOCK_NUM (insn);
+
/* load_killed_in_block_p will handle the case of calls clobbering
everything. */
- modify_mem_list[BLOCK_NUM (insn)] =
- alloc_INSN_LIST (insn, modify_mem_list[BLOCK_NUM (insn)]);
- bitmap_set_bit (modify_mem_list_set, BLOCK_NUM (insn));
+ modify_mem_list[bb] = alloc_INSN_LIST (insn, modify_mem_list[bb]);
+ bitmap_set_bit (modify_mem_list_set, bb);
if (GET_CODE (insn) == CALL_INSN)
{
/* Note that traversals of this loop (other than for free-ing)
will break after encountering a CALL_INSN. So, there's no
need to insert a pair of items, as canon_list_insert does. */
- canon_modify_mem_list[BLOCK_NUM (insn)] =
- alloc_INSN_LIST (insn, canon_modify_mem_list[BLOCK_NUM (insn)]);
- bitmap_set_bit (canon_modify_mem_list_set, BLOCK_NUM (insn));
+ canon_modify_mem_list[bb] =
+ alloc_INSN_LIST (insn, canon_modify_mem_list[bb]);
+ bitmap_set_bit (canon_modify_mem_list_set, bb);
}
else
- note_stores (PATTERN (insn), canon_list_insert, (void*)insn );
+ note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
}
/* Called from compute_hash_table via note_stores to handle one
registers are set in which blocks.
??? This isn't needed during const/copy propagation, but it's cheap to
compute. Later. */
- sbitmap_vector_zero (reg_set_in_block, n_basic_blocks);
+ sbitmap_vector_zero (reg_set_in_block, last_basic_block);
/* re-Cache any INSN_LIST nodes we have allocated. */
clear_modify_mem_tables ();
gmalloc (max_gcse_regno * sizeof (struct reg_avail_info));
for (i = 0; i < max_gcse_regno; ++i)
- reg_avail_info[i].last_bb = NEVER_SET;
+ reg_avail_info[i].last_bb = NULL;
- for (current_bb = 0; current_bb < n_basic_blocks; current_bb++)
+ FOR_EACH_BB (current_bb)
{
rtx insn;
unsigned int regno;
??? hard-reg reg_set_in_block computation
could be moved to compute_sets since they currently don't change. */
- for (insn = BLOCK_HEAD (current_bb);
- insn && insn != NEXT_INSN (BLOCK_END (current_bb));
+ for (insn = current_bb->head;
+ insn && insn != NEXT_INSN (current_bb->end);
insn = NEXT_INSN (insn))
{
if (! INSN_P (insn))
/* The next pass builds the hash table. */
- for (insn = BLOCK_HEAD (current_bb), in_libcall_block = 0;
- insn && insn != NEXT_INSN (BLOCK_END (current_bb));
+ for (insn = current_bb->head, in_libcall_block = 0;
+ insn && insn != NEXT_INSN (current_bb->end);
insn = NEXT_INSN (insn))
if (INSN_P (insn))
{
hash_scan_insn (insn, set_p, in_libcall_block);
if (!set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
in_libcall_block = 0;
- }
+ }
}
free (reg_avail_info);
/* Initialize count of number of entries in hash table. */
n_sets = 0;
memset ((char *) set_hash_table, 0,
- set_hash_table_size * sizeof (struct expr *));
+ set_hash_table_size * sizeof (struct expr *));
compute_hash_table (1);
}
/* Initialize count of number of entries in hash table. */
n_exprs = 0;
memset ((char *) expr_hash_table, 0,
- expr_hash_table_size * sizeof (struct expr *));
+ expr_hash_table_size * sizeof (struct expr *));
compute_hash_table (0);
}
return expr;
}
+/* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node
+ types may be mixed. */
+
+static void
+free_insn_expr_list_list (listp)
+ rtx *listp;
+{
+ rtx list, next;
+
+ for (list = *listp; list ; list = next)
+ {
+ next = XEXP (list, 1);
+ if (GET_CODE (list) == EXPR_LIST)
+ free_EXPR_LIST_node (list);
+ else
+ free_INSN_LIST_node (list);
+ }
+
+ *listp = NULL;
+}
+
/* Clear canon_modify_mem_list and modify_mem_list tables. */
static void
clear_modify_mem_tables ()
int i;
EXECUTE_IF_SET_IN_BITMAP
- (canon_modify_mem_list_set, 0, i,
- free_INSN_LIST_list (modify_mem_list + i));
- bitmap_clear (canon_modify_mem_list_set);
+ (modify_mem_list_set, 0, i, free_INSN_LIST_list (modify_mem_list + i));
+ bitmap_clear (modify_mem_list_set);
EXECUTE_IF_SET_IN_BITMAP
(canon_modify_mem_list_set, 0, i,
- free_INSN_LIST_list (canon_modify_mem_list + i));
- bitmap_clear (modify_mem_list_set);
+ free_insn_expr_list_list (canon_modify_mem_list + i));
+ bitmap_clear (canon_modify_mem_list_set);
}
/* Release memory used by modify_mem_list_set and canon_modify_mem_list_set. */
case CONST:
case CONST_INT:
case CONST_DOUBLE:
+ case CONST_VECTOR:
case SYMBOL_REF:
case LABEL_REF:
case ADDR_VEC:
int n_blocks, n_insns;
{
rd_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
- sbitmap_vector_zero (rd_kill, n_basic_blocks);
+ sbitmap_vector_zero (rd_kill, n_blocks);
rd_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
- sbitmap_vector_zero (rd_gen, n_basic_blocks);
+ sbitmap_vector_zero (rd_gen, n_blocks);
reaching_defs = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
- sbitmap_vector_zero (reaching_defs, n_basic_blocks);
+ sbitmap_vector_zero (reaching_defs, n_blocks);
rd_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
- sbitmap_vector_zero (rd_out, n_basic_blocks);
+ sbitmap_vector_zero (rd_out, n_blocks);
}
/* Free reaching def variables. */
static void
compute_kill_rd ()
{
- int bb, cuid;
+ int cuid;
unsigned int regno;
int i;
+ basic_block bb;
/* For each block
For each set bit in `gen' of the block (i.e each insn which
For each setting of regx in the linked list, which is not in
this block
Set the bit in `kill' corresponding to that insn. */
- for (bb = 0; bb < n_basic_blocks; bb++)
+ FOR_EACH_BB (bb)
for (cuid = 0; cuid < max_cuid; cuid++)
- if (TEST_BIT (rd_gen[bb], cuid))
+ if (TEST_BIT (rd_gen[bb->index], cuid))
{
rtx insn = CUID_INSN (cuid);
rtx pat = PATTERN (insn);
{
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
- handle_rd_kill_set (insn, regno, BASIC_BLOCK (bb));
+ handle_rd_kill_set (insn, regno, bb);
}
if (GET_CODE (pat) == PARALLEL)
&& GET_CODE (XEXP (XVECEXP (pat, 0, i), 0)) == REG)
handle_rd_kill_set (insn,
REGNO (XEXP (XVECEXP (pat, 0, i), 0)),
- BASIC_BLOCK (bb));
+ bb);
}
}
else if (GET_CODE (pat) == SET && GET_CODE (SET_DEST (pat)) == REG)
/* Each setting of this register outside of this block
must be marked in the set of kills in this block. */
- handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), BASIC_BLOCK (bb));
+ handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), bb);
}
}
static void
compute_rd ()
{
- int bb, changed, passes;
+ int changed, passes;
+ basic_block bb;
- for (bb = 0; bb < n_basic_blocks; bb++)
- sbitmap_copy (rd_out[bb] /*dst*/, rd_gen[bb] /*src*/);
+ FOR_EACH_BB (bb)
+ sbitmap_copy (rd_out[bb->index] /*dst*/, rd_gen[bb->index] /*src*/);
passes = 0;
changed = 1;
while (changed)
{
changed = 0;
- for (bb = 0; bb < n_basic_blocks; bb++)
+ FOR_EACH_BB (bb)
{
- sbitmap_union_of_preds (reaching_defs[bb], rd_out, bb);
- changed |= sbitmap_union_of_diff (rd_out[bb], rd_gen[bb],
- reaching_defs[bb], rd_kill[bb]);
+ sbitmap_union_of_preds (reaching_defs[bb->index], rd_out, bb->index);
+ changed |= sbitmap_union_of_diff_cg (rd_out[bb->index], rd_gen[bb->index],
+ reaching_defs[bb->index], rd_kill[bb->index]);
}
passes++;
}
int n_blocks, n_exprs;
{
ae_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
- sbitmap_vector_zero (ae_kill, n_basic_blocks);
+ sbitmap_vector_zero (ae_kill, n_blocks);
ae_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
- sbitmap_vector_zero (ae_gen, n_basic_blocks);
+ sbitmap_vector_zero (ae_gen, n_blocks);
ae_in = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
- sbitmap_vector_zero (ae_in, n_basic_blocks);
+ sbitmap_vector_zero (ae_in, n_blocks);
ae_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
- sbitmap_vector_zero (ae_out, n_basic_blocks);
+ sbitmap_vector_zero (ae_out, n_blocks);
}
static void
case CONST:
case CONST_INT:
case CONST_DOUBLE:
+ case CONST_VECTOR:
case SYMBOL_REF:
case LABEL_REF:
case ADDR_VEC:
compute_ae_kill (ae_gen, ae_kill)
sbitmap *ae_gen, *ae_kill;
{
- int bb;
+ basic_block bb;
unsigned int i;
struct expr *expr;
- for (bb = 0; bb < n_basic_blocks; bb++)
+ FOR_EACH_BB (bb)
for (i = 0; i < expr_hash_table_size; i++)
for (expr = expr_hash_table[i]; expr; expr = expr->next_same_hash)
{
/* Skip EXPR if generated in this block. */
- if (TEST_BIT (ae_gen[bb], expr->bitmap_index))
+ if (TEST_BIT (ae_gen[bb->index], expr->bitmap_index))
continue;
- if (expr_killed_p (expr->expr, BASIC_BLOCK (bb)))
- SET_BIT (ae_kill[bb], expr->bitmap_index);
+ if (expr_killed_p (expr->expr, bb))
+ SET_BIT (ae_kill[bb->index], expr->bitmap_index);
}
}
\f
int check_self_loop;
{
int rval;
- char *visited = (char *) xcalloc (n_basic_blocks, 1);
+ char *visited = (char *) xcalloc (last_basic_block, 1);
rval = expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited);
|| (((this_reg = reg_set_table[regnum_for_replacing]),
this_reg->next == NULL)
|| can_disregard_other_sets (&this_reg, insn, 0)))
- {
- use_src = 1;
- found_setting = 1;
- }
+ {
+ use_src = 1;
+ found_setting = 1;
+ }
}
if (!found_setting)
static int
classic_gcse ()
{
- int bb, changed;
+ int changed;
rtx insn;
+ basic_block bb;
/* Note we start at block 1. */
+ if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
+ return 0;
+
changed = 0;
- for (bb = 1; bb < n_basic_blocks; bb++)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
{
/* Reset tables used to keep track of what's still valid [since the
start of the block]. */
reset_opr_set_tables ();
- for (insn = BLOCK_HEAD (bb);
- insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
+ for (insn = bb->head;
+ insn != NULL && insn != NEXT_INSN (bb->end);
insn = NEXT_INSN (insn))
{
/* Is insn of form (set (pseudo-reg) ...)? */
&& ((expr = lookup_expr (src)) != NULL)
/* Is the expression available [at the start of the
block]? */
- && TEST_BIT (ae_in[bb], expr->bitmap_index)
+ && TEST_BIT (ae_in[bb->index], expr->bitmap_index)
/* Are the operands unchanged since the start of the
block? */
&& oprs_not_set_p (src, insn))
gcse_create_count = 0;
alloc_expr_hash_table (max_cuid);
- alloc_rd_mem (n_basic_blocks, max_cuid);
+ alloc_rd_mem (last_basic_block, max_cuid);
compute_expr_hash_table ();
if (gcse_file)
dump_hash_table (gcse_file, "Expression", expr_hash_table,
{
compute_kill_rd ();
compute_rd ();
- alloc_avail_expr_mem (n_basic_blocks, n_exprs);
+ alloc_avail_expr_mem (last_basic_block, n_exprs);
compute_ae_gen ();
compute_ae_kill (ae_gen, ae_kill);
compute_available (ae_gen, ae_kill, ae_out, ae_in);
sbitmap *bmap;
int set_p;
{
- int bb, i, j;
+ int i, j;
+ basic_block bb;
enum rtx_code code;
reg_set *r;
const char *fmt;
{
if (REGNO (x) < FIRST_PSEUDO_REGISTER)
{
- for (bb = 0; bb < n_basic_blocks; bb++)
- if (TEST_BIT (reg_set_in_block[bb], REGNO (x)))
- SET_BIT (bmap[bb], indx);
+ FOR_EACH_BB (bb)
+ if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x)))
+ SET_BIT (bmap[bb->index], indx);
}
else
{
{
if (REGNO (x) < FIRST_PSEUDO_REGISTER)
{
- for (bb = 0; bb < n_basic_blocks; bb++)
- if (TEST_BIT (reg_set_in_block[bb], REGNO (x)))
- RESET_BIT (bmap[bb], indx);
+ FOR_EACH_BB (bb)
+ if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x)))
+ RESET_BIT (bmap[bb->index], indx);
}
else
{
return;
case MEM:
- for (bb = 0; bb < n_basic_blocks; bb++)
+ FOR_EACH_BB (bb)
{
- rtx list_entry = canon_modify_mem_list[bb];
+ rtx list_entry = canon_modify_mem_list[bb->index];
while (list_entry)
{
if (GET_CODE (XEXP (list_entry, 0)) == CALL_INSN)
{
if (set_p)
- SET_BIT (bmap[bb], indx);
+ SET_BIT (bmap[bb->index], indx);
else
- RESET_BIT (bmap[bb], indx);
+ RESET_BIT (bmap[bb->index], indx);
break;
}
/* LIST_ENTRY must be an INSN of some kind that sets memory.
x, rtx_addr_varies_p))
{
if (set_p)
- SET_BIT (bmap[bb], indx);
+ SET_BIT (bmap[bb->index], indx);
else
- RESET_BIT (bmap[bb], indx);
+ RESET_BIT (bmap[bb->index], indx);
break;
}
list_entry = XEXP (list_entry, 1);
case CONST:
case CONST_INT:
case CONST_DOUBLE:
+ case CONST_VECTOR:
case SYMBOL_REF:
case LABEL_REF:
case ADDR_VEC:
This can not happen since the set of (reg Y) would have killed the
set of (reg X) making it unavailable at the start of this block. */
while (1)
- {
+ {
rtx src;
struct expr *set = lookup_set (regno, NULL_RTX);
/* Follow the copy chain, ie start another iteration of the loop
and see if we have an available copy into SRC. */
regno = REGNO (src);
- }
+ }
/* SET1 holds the last set that was available and anticipatable at
INSN. */
if (rtx_equal_p (new, SET_SRC (set)))
return 0;
- /* If this is now a no-op leave it that way, but update LABEL_NUSED if
- necessary. */
+ /* If this is now a no-op delete it, otherwise this must be a valid insn. */
if (new == pc_rtx)
+ delete_insn (insn);
+ else
{
- SET_SRC (set) = new;
-
- if (JUMP_LABEL (insn) != 0)
- {
- --LABEL_NUSES (JUMP_LABEL (insn));
- JUMP_LABEL (insn) = NULL_RTX;
- }
- }
-
- /* Otherwise, this must be a valid instruction. */
- else if (! validate_change (insn, &SET_SRC (set), new, 0))
- return 0;
+ if (! validate_change (insn, &SET_SRC (set), new, 0))
+ return 0;
- /* If this has turned into an unconditional jump,
- then put a barrier after it so that the unreachable
- code will be deleted. */
- if (GET_CODE (SET_SRC (set)) == LABEL_REF)
- emit_barrier_after (insn);
+ /* If this has turned into an unconditional jump,
+ then put a barrier after it so that the unreachable
+ code will be deleted. */
+ if (GET_CODE (SET_SRC (set)) == LABEL_REF)
+ emit_barrier_after (insn);
+ }
run_jump_opt_after_gcse = 1;
delete_insn (insn);
return 1;
- }
+}
#endif
/* Perform constant and copy propagation on INSN.
src = SET_SRC (pat);
/* Constant propagation. */
- if (GET_CODE (src) == CONST_INT || GET_CODE (src) == CONST_DOUBLE
- || GET_CODE (src) == SYMBOL_REF)
+ if (CONSTANT_P (src))
{
/* Handle normal insns first. */
if (GET_CODE (insn) == INSN
cprop (alter_jumps)
int alter_jumps;
{
- int bb, changed;
+ int changed;
+ basic_block bb;
rtx insn;
/* Note we start at block 1. */
+ if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
+ {
+ if (gcse_file != NULL)
+ fprintf (gcse_file, "\n");
+ return 0;
+ }
changed = 0;
- for (bb = 1; bb < n_basic_blocks; bb++)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
{
/* Reset tables used to keep track of what's still valid [since the
start of the block]. */
reset_opr_set_tables ();
- for (insn = BLOCK_HEAD (bb);
- insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
+ for (insn = bb->head;
+ insn != NULL && insn != NEXT_INSN (bb->end);
insn = NEXT_INSN (insn))
if (INSN_P (insn))
{
- changed |= cprop_insn (BASIC_BLOCK (bb), insn, alter_jumps);
+ changed |= cprop_insn (bb, insn, alter_jumps);
/* Keep track of everything modified by this insn. */
/* ??? Need to be careful w.r.t. mods done to INSN. Don't
call mark_oprs_set if we turned the insn into a NOTE. */
if (GET_CODE (insn) != NOTE)
mark_oprs_set (insn);
- }
+ }
}
if (gcse_file != NULL)
n_sets);
if (n_sets > 0)
{
- alloc_cprop_mem (n_basic_blocks, n_sets);
+ alloc_cprop_mem (last_basic_block, n_sets);
compute_cprop_data ();
changed = cprop (alter_jumps);
free_cprop_mem ();
compute_pre_data ()
{
sbitmap trapping_expr;
- int i;
+ basic_block bb;
unsigned int ui;
compute_local_properties (transp, comp, antloc, 0);
- sbitmap_vector_zero (ae_kill, n_basic_blocks);
+ sbitmap_vector_zero (ae_kill, last_basic_block);
/* Collect expressions which might trap. */
trapping_expr = sbitmap_alloc (n_exprs);
This is significantly faster than compute_ae_kill. */
- for (i = 0; i < n_basic_blocks; i++)
+ FOR_EACH_BB (bb)
{
edge e;
kill all trapping expressions because we won't be able to properly
place the instruction on the edge. So make them neither
anticipatable nor transparent. This is fairly conservative. */
- for (e = BASIC_BLOCK (i)->pred; e ; e = e->pred_next)
+ for (e = bb->pred; e ; e = e->pred_next)
if (e->flags & EDGE_ABNORMAL)
{
- sbitmap_difference (antloc[i], antloc[i], trapping_expr);
- sbitmap_difference (transp[i], transp[i], trapping_expr);
+ sbitmap_difference (antloc[bb->index], antloc[bb->index], trapping_expr);
+ sbitmap_difference (transp[bb->index], transp[bb->index], trapping_expr);
break;
}
- sbitmap_a_or_b (ae_kill[i], transp[i], comp[i]);
- sbitmap_not (ae_kill[i], ae_kill[i]);
+ sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
+ sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
}
edge_list = pre_edge_lcm (gcse_file, n_exprs, transp, comp, antloc,
antloc = NULL;
sbitmap_vector_free (ae_kill);
ae_kill = NULL;
- free (trapping_expr);
+ sbitmap_free (trapping_expr);
}
\f
/* PRE utilities */
basic_block bb;
{
int rval;
- char *visited = (char *) xcalloc (n_basic_blocks, 1);
+ char *visited = (char *) xcalloc (last_basic_block, 1);
- rval = pre_expr_reaches_here_p_work(occr_bb, expr, bb, visited);
+ rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
free (visited);
return rval;
pat = process_insert_insn (expr);
/* If the last insn is a jump, insert EXPR in front [taking care to
- handle cc0, etc. properly]. */
+ handle cc0, etc. properly]. Similary we need to care trapping
+ instructions in presence of non-call exceptions. */
- if (GET_CODE (insn) == JUMP_INSN)
+ if (GET_CODE (insn) == JUMP_INSN
+ || (GET_CODE (insn) == INSN
+ && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL))))
{
#ifdef HAVE_cc0
rtx note;
#endif
+ /* It should always be the case that we can put these instructions
+ anywhere in the basic block with performing PRE optimizations.
+ Check this. */
+ if (GET_CODE (insn) == INSN && pre
+ && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
+ && !TEST_BIT (transp[bb->index], expr->bitmap_index))
+ abort ();
/* If this is a jump table, then we can't insert stuff here. Since
we know the previous real insn must be the tablejump, we insert
/* Likewise if the last insn is a call, as will happen in the presence
of exception handling. */
- else if (GET_CODE (insn) == CALL_INSN)
+ else if (GET_CODE (insn) == CALL_INSN
+ && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL)))
{
/* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
we search backward and place the instructions before the first
}
}
+/* Emit move from SRC to DEST noting the equivalence with expression computed
+ in INSN. */
+static rtx
+gcse_emit_move_after (src, dest, insn)
+ rtx src, dest, insn;
+{
+ rtx new;
+ rtx set = single_set (insn);
+ rtx note;
+ rtx eqv;
+
+ /* This should never fail since we're creating a reg->reg copy
+ we've verified to be valid. */
+
+ new = emit_insn_after (gen_rtx_SET (VOIDmode, dest, src), insn);
+
+ /* Note the equivalence for local CSE pass. */
+ if ((note = find_reg_equal_equiv_note (insn)))
+ eqv = XEXP (note, 0);
+ else
+ eqv = SET_SRC (set);
+
+ set_unique_reg_note (new, REG_EQUAL, copy_insn_1 (src));
+
+ return new;
+}
+
/* Delete redundant computations.
Deletion is done by changing the insn to copy the `reaching_reg' of
the expression into the result of the SET. It is left to later passes
expr->reaching_reg
= gen_reg_rtx (GET_MODE (SET_DEST (set)));
- /* In theory this should never fail since we're creating
- a reg->reg copy.
-
- However, on the x86 some of the movXX patterns actually
- contain clobbers of scratch regs. This may cause the
- insn created by validate_change to not match any pattern
- and thus cause validate_change to fail. */
- if (validate_change (insn, &SET_SRC (set),
- expr->reaching_reg, 0))
- {
- occr->deleted_p = 1;
- SET_BIT (pre_redundant_insns, INSN_CUID (insn));
- changed = 1;
- gcse_subst_count++;
- }
+ gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
+ delete_insn (insn);
+ occr->deleted_p = 1;
+ SET_BIT (pre_redundant_insns, INSN_CUID (insn));
+ changed = 1;
+ gcse_subst_count++;
if (gcse_file)
{
}
free (index_map);
- free (pre_redundant_insns);
+ sbitmap_free (pre_redundant_insns);
return changed;
}
if (n_exprs > 0)
{
- alloc_pre_mem (n_basic_blocks, n_exprs);
+ alloc_pre_mem (last_basic_block, n_exprs);
compute_pre_data ();
changed |= pre_gcse ();
free_edge_list (edge_list);
static void
compute_transpout ()
{
- int bb;
+ basic_block bb;
unsigned int i;
struct expr *expr;
- sbitmap_vector_ones (transpout, n_basic_blocks);
+ sbitmap_vector_ones (transpout, last_basic_block);
- for (bb = 0; bb < n_basic_blocks; ++bb)
+ FOR_EACH_BB (bb)
{
/* Note that flow inserted a nop a the end of basic blocks that
end in call instructions for reasons other than abnormal
control flow. */
- if (GET_CODE (BLOCK_END (bb)) != CALL_INSN)
+ if (GET_CODE (bb->end) != CALL_INSN)
continue;
for (i = 0; i < expr_hash_table_size; i++)
/* ??? Optimally, we would use interprocedural alias
analysis to determine if this mem is actually killed
by this call. */
- RESET_BIT (transpout[bb], expr->bitmap_index);
+ RESET_BIT (transpout[bb->index], expr->bitmap_index);
}
}
}
regno = REGNO (x) - npi->min_reg;
- RESET_BIT (npi->nonnull_local[npi->current_block], regno);
- SET_BIT (npi->nonnull_killed[npi->current_block], regno);
+ RESET_BIT (npi->nonnull_local[npi->current_block->index], regno);
+ SET_BIT (npi->nonnull_killed[npi->current_block->index], regno);
}
/* Do null-pointer check elimination for the registers indicated in
sbitmap *nonnull_avout;
struct null_pointer_info *npi;
{
- int bb;
- int current_block;
+ basic_block bb, current_block;
sbitmap *nonnull_local = npi->nonnull_local;
sbitmap *nonnull_killed = npi->nonnull_killed;
Note that a register can have both properties in a single block. That
indicates that it's killed, then later in the block a new value is
computed. */
- sbitmap_vector_zero (nonnull_local, n_basic_blocks);
- sbitmap_vector_zero (nonnull_killed, n_basic_blocks);
+ sbitmap_vector_zero (nonnull_local, last_basic_block);
+ sbitmap_vector_zero (nonnull_killed, last_basic_block);
- for (current_block = 0; current_block < n_basic_blocks; current_block++)
+ FOR_EACH_BB (current_block)
{
rtx insn, stop_insn;
/* Scan each insn in the basic block looking for memory references and
register sets. */
- stop_insn = NEXT_INSN (BLOCK_END (current_block));
- for (insn = BLOCK_HEAD (current_block);
+ stop_insn = NEXT_INSN (current_block->end);
+ for (insn = current_block->head;
insn != stop_insn;
insn = NEXT_INSN (insn))
{
&& GET_CODE ((reg = XEXP (SET_SRC (set), 0))) == REG
&& REGNO (reg) >= npi->min_reg
&& REGNO (reg) < npi->max_reg)
- SET_BIT (nonnull_local[current_block],
+ SET_BIT (nonnull_local[current_block->index],
REGNO (reg) - npi->min_reg);
/* Now invalidate stuff clobbered by this insn. */
&& GET_CODE ((reg = XEXP (SET_DEST (set), 0))) == REG
&& REGNO (reg) >= npi->min_reg
&& REGNO (reg) < npi->max_reg)
- SET_BIT (nonnull_local[current_block],
+ SET_BIT (nonnull_local[current_block->index],
REGNO (reg) - npi->min_reg);
}
}
/* Now look at each bb and see if it ends with a compare of a value
against zero. */
- for (bb = 0; bb < n_basic_blocks; bb++)
+ FOR_EACH_BB (bb)
{
- rtx last_insn = BLOCK_END (bb);
+ rtx last_insn = bb->end;
rtx condition, earliest;
int compare_and_branch;
/* Since MIN_REG is always at least FIRST_PSEUDO_REGISTER, and
since BLOCK_REG[BB] is zero if this block did not end with a
comparison against zero, this condition works. */
- if (block_reg[bb] < npi->min_reg
- || block_reg[bb] >= npi->max_reg)
+ if (block_reg[bb->index] < npi->min_reg
+ || block_reg[bb->index] >= npi->max_reg)
continue;
/* LAST_INSN is a conditional jump. Get its condition. */
continue;
/* Is the register known to have a nonzero value? */
- if (!TEST_BIT (nonnull_avout[bb], block_reg[bb] - npi->min_reg))
+ if (!TEST_BIT (nonnull_avout[bb->index], block_reg[bb->index] - npi->min_reg))
continue;
/* Try to compute whether the compare/branch at the loop end is one or
{
rtx new_jump;
- new_jump = emit_jump_insn_before (gen_jump (JUMP_LABEL (last_insn)),
- last_insn);
+ new_jump = emit_jump_insn_after (gen_jump (JUMP_LABEL (last_insn)),
+ last_insn);
JUMP_LABEL (new_jump) = JUMP_LABEL (last_insn);
LABEL_NUSES (JUMP_LABEL (new_jump))++;
emit_barrier_after (new_jump);
delete_insn (last_insn);
if (compare_and_branch == 2)
delete_insn (earliest);
- purge_dead_edges (BASIC_BLOCK (bb));
+ purge_dead_edges (bb);
/* Don't check this block again. (Note that BLOCK_END is
invalid here; we deleted the last instruction in the
block.) */
- block_reg[bb] = 0;
+ block_reg[bb->index] = 0;
}
}
{
sbitmap *nonnull_avin, *nonnull_avout;
unsigned int *block_reg;
- int bb;
+ basic_block bb;
int reg;
int regs_per_pass;
int max_reg;
/* We need four bitmaps, each with a bit for each register in each
basic block. */
max_reg = max_reg_num ();
- regs_per_pass = get_bitmap_width (4, n_basic_blocks, max_reg);
+ regs_per_pass = get_bitmap_width (4, last_basic_block, max_reg);
/* Allocate bitmaps to hold local and global properties. */
- npi.nonnull_local = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
- npi.nonnull_killed = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
- nonnull_avin = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
- nonnull_avout = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
+ npi.nonnull_local = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
+ npi.nonnull_killed = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
+ nonnull_avin = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
+ nonnull_avout = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
/* Go through the basic blocks, seeing whether or not each block
ends with a conditional branch whose condition is a comparison
against zero. Record the register compared in BLOCK_REG. */
- block_reg = (unsigned int *) xcalloc (n_basic_blocks, sizeof (int));
- for (bb = 0; bb < n_basic_blocks; bb++)
+ block_reg = (unsigned int *) xcalloc (last_basic_block, sizeof (int));
+ FOR_EACH_BB (bb)
{
- rtx last_insn = BLOCK_END (bb);
+ rtx last_insn = bb->end;
rtx condition, earliest, reg;
/* We only want conditional branches. */
if (GET_CODE (reg) != REG)
continue;
- block_reg[bb] = REGNO (reg);
+ block_reg[bb->index] = REGNO (reg);
}
/* Go through the algorithm for each block of registers. */
static void
compute_code_hoist_vbeinout ()
{
- int bb, changed, passes;
+ int changed, passes;
+ basic_block bb;
- sbitmap_vector_zero (hoist_vbeout, n_basic_blocks);
- sbitmap_vector_zero (hoist_vbein, n_basic_blocks);
+ sbitmap_vector_zero (hoist_vbeout, last_basic_block);
+ sbitmap_vector_zero (hoist_vbein, last_basic_block);
passes = 0;
changed = 1;
/* We scan the blocks in the reverse order to speed up
the convergence. */
- for (bb = n_basic_blocks - 1; bb >= 0; bb--)
+ FOR_EACH_BB_REVERSE (bb)
{
- changed |= sbitmap_a_or_b_and_c (hoist_vbein[bb], antloc[bb],
- hoist_vbeout[bb], transp[bb]);
- if (bb != n_basic_blocks - 1)
- sbitmap_intersection_of_succs (hoist_vbeout[bb], hoist_vbein, bb);
+ changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->index], antloc[bb->index],
+ hoist_vbeout[bb->index], transp[bb->index]);
+ if (bb->next_bb != EXIT_BLOCK_PTR)
+ sbitmap_intersection_of_succs (hoist_vbeout[bb->index], hoist_vbein, bb->index);
}
passes++;
if (visited == NULL)
{
- visited_allocated_locally = 1;
- visited = xcalloc (n_basic_blocks, 1);
+ visited_allocated_locally = 1;
+ visited = xcalloc (last_basic_block, 1);
}
for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
static void
hoist_code ()
{
- int bb, dominated;
+ basic_block bb, dominated;
unsigned int i;
struct expr **index_map;
struct expr *expr;
- sbitmap_vector_zero (hoist_exprs, n_basic_blocks);
+ sbitmap_vector_zero (hoist_exprs, last_basic_block);
/* Compute a mapping from expression number (`bitmap_index') to
hash table entry. */
/* Walk over each basic block looking for potentially hoistable
expressions, nothing gets hoisted from the entry block. */
- for (bb = 0; bb < n_basic_blocks; bb++)
+ FOR_EACH_BB (bb)
{
int found = 0;
int insn_inserted_p;
/* Examine each expression that is very busy at the exit of this
block. These are the potentially hoistable expressions. */
- for (i = 0; i < hoist_vbeout[bb]->n_bits; i++)
+ for (i = 0; i < hoist_vbeout[bb->index]->n_bits; i++)
{
int hoistable = 0;
- if (TEST_BIT (hoist_vbeout[bb], i) && TEST_BIT (transpout[bb], i))
+ if (TEST_BIT (hoist_vbeout[bb->index], i) && TEST_BIT (transpout[bb->index], i))
{
/* We've found a potentially hoistable expression, now
we look at every block BB dominates to see if it
computes the expression. */
- for (dominated = 0; dominated < n_basic_blocks; dominated++)
+ FOR_EACH_BB (dominated)
{
/* Ignore self dominance. */
if (bb == dominated
- || ! TEST_BIT (dominators[dominated], bb))
+ || ! TEST_BIT (dominators[dominated->index], bb->index))
continue;
/* We've found a dominated block, now see if it computes
the busy expression and whether or not moving that
expression to the "beginning" of that block is safe. */
- if (!TEST_BIT (antloc[dominated], i))
+ if (!TEST_BIT (antloc[dominated->index], i))
continue;
/* Note if the expression would reach the dominated block
Keep track of how many times this expression is hoistable
from a dominated block into BB. */
- if (hoist_expr_reaches_here_p (BASIC_BLOCK (bb), i,
- BASIC_BLOCK (dominated), NULL))
+ if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
hoistable++;
}
to nullify any benefit we get from code hoisting. */
if (hoistable > 1)
{
- SET_BIT (hoist_exprs[bb], i);
+ SET_BIT (hoist_exprs[bb->index], i);
found = 1;
}
}
continue;
/* Loop over all the hoistable expressions. */
- for (i = 0; i < hoist_exprs[bb]->n_bits; i++)
+ for (i = 0; i < hoist_exprs[bb->index]->n_bits; i++)
{
/* We want to insert the expression into BB only once, so
note when we've inserted it. */
insn_inserted_p = 0;
/* These tests should be the same as the tests above. */
- if (TEST_BIT (hoist_vbeout[bb], i))
+ if (TEST_BIT (hoist_vbeout[bb->index], i))
{
/* We've found a potentially hoistable expression, now
we look at every block BB dominates to see if it
computes the expression. */
- for (dominated = 0; dominated < n_basic_blocks; dominated++)
+ FOR_EACH_BB (dominated)
{
/* Ignore self dominance. */
if (bb == dominated
- || ! TEST_BIT (dominators[dominated], bb))
+ || ! TEST_BIT (dominators[dominated->index], bb->index))
continue;
/* We've found a dominated block, now see if it computes
the busy expression and whether or not moving that
expression to the "beginning" of that block is safe. */
- if (!TEST_BIT (antloc[dominated], i))
+ if (!TEST_BIT (antloc[dominated->index], i))
continue;
/* The expression is computed in the dominated block and
dominated block. Now we have to determine if the
expression would reach the dominated block if it was
placed at the end of BB. */
- if (hoist_expr_reaches_here_p (BASIC_BLOCK (bb), i,
- BASIC_BLOCK (dominated), NULL))
+ if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
{
struct expr *expr = index_map[i];
struct occr *occr = expr->antic_occr;
rtx set;
/* Find the right occurrence of this expression. */
- while (BLOCK_NUM (occr->insn) != dominated && occr)
+ while (BLOCK_FOR_INSN (occr->insn) != dominated && occr)
occr = occr->next;
/* Should never happen. */
expr->reaching_reg
= gen_reg_rtx (GET_MODE (SET_DEST (set)));
- /* In theory this should never fail since we're creating
- a reg->reg copy.
-
- However, on the x86 some of the movXX patterns
- actually contain clobbers of scratch regs. This may
- cause the insn created by validate_change to not
- match any pattern and thus cause validate_change to
- fail. */
- if (validate_change (insn, &SET_SRC (set),
- expr->reaching_reg, 0))
+ gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
+ delete_insn (insn);
+ occr->deleted_p = 1;
+ if (!insn_inserted_p)
{
- occr->deleted_p = 1;
- if (!insn_inserted_p)
- {
- insert_insn_end_bb (index_map[i],
- BASIC_BLOCK (bb), 0);
- insn_inserted_p = 1;
- }
+ insert_insn_end_bb (index_map[i], bb, 0);
+ insn_inserted_p = 1;
}
}
}
}
}
- free (index_map);
+ free (index_map);
}
/* Top level routine to perform one code hoisting (aka unification) pass
if (n_exprs > 0)
{
- alloc_code_hoist_mem (n_basic_blocks, n_exprs);
+ alloc_code_hoist_mem (last_basic_block, n_exprs);
compute_code_hoist_data ();
hoist_code ();
free_code_hoist_mem ();
rtx x;
{
const char * fmt;
- int i,j;
+ int i, j;
struct ls_expr * ptr;
/* Invalidate it in the list. */
compute_ld_motion_mems ()
{
struct ls_expr * ptr;
- int bb;
+ basic_block bb;
rtx insn;
pre_ldst_mems = NULL;
- for (bb = 0; bb < n_basic_blocks; bb++)
+ FOR_EACH_BB (bb)
{
- for (insn = BLOCK_HEAD (bb);
- insn && insn != NEXT_INSN (BLOCK_END (bb));
+ for (insn = bb->head;
+ insn && insn != NEXT_INSN (bb->end);
insn = NEXT_INSN (insn))
{
if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
case CONST:
case CONST_INT:
case CONST_DOUBLE:
+ case CONST_VECTOR:
case SYMBOL_REF:
case LABEL_REF:
case ADDR_VEC:
static int
compute_store_table ()
{
- int bb, ret;
+ int ret;
+ basic_block bb;
unsigned regno;
rtx insn, pat;
max_gcse_regno = max_reg_num ();
- reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks,
+ reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (last_basic_block,
max_gcse_regno);
- sbitmap_vector_zero (reg_set_in_block, n_basic_blocks);
+ sbitmap_vector_zero (reg_set_in_block, last_basic_block);
pre_ldst_mems = 0;
/* Find all the stores we care about. */
- for (bb = 0; bb < n_basic_blocks; bb++)
+ FOR_EACH_BB (bb)
{
- regvec = & (reg_set_in_block[bb]);
- for (insn = BLOCK_END (bb);
- insn && insn != PREV_INSN (BLOCK_HEAD (bb));
+ regvec = & (reg_set_in_block[bb->index]);
+ for (insn = bb->end;
+ insn && insn != PREV_INSN (bb->end);
insn = PREV_INSN (insn))
{
/* Ignore anything that is not a normal insn. */
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
if (clobbers_all
|| TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
- SET_BIT (reg_set_in_block[bb], regno);
+ SET_BIT (reg_set_in_block[bb->index], regno);
}
pat = PATTERN (insn);
rtx x, store_pattern;
{
const char * fmt;
- int i,j;
+ int i, j;
int ret = 0;
if (!x)
{
/* A normal or pure call might read from pattern,
but a const call will not. */
- if (CONST_OR_PURE_CALL_P (insn))
- {
- rtx link;
-
- for (link = CALL_INSN_FUNCTION_USAGE (insn);
- link;
- link = XEXP (link, 1))
- if (GET_CODE (XEXP (link, 0)) == USE
- && GET_CODE (XEXP (XEXP (link, 0), 0)) == MEM
- && GET_CODE (XEXP (XEXP (XEXP (link, 0), 0), 0)) == SCRATCH)
- return 1;
- return 0;
- }
- else
- return 1;
+ return ! CONST_OR_PURE_CALL_P (insn) || pure_call_p (insn);
}
if (GET_CODE (PATTERN (insn)) == SET)
rtx x, insn;
basic_block bb;
{
- rtx last = bb->end;
+ rtx last = bb->end;
- if (insn == last)
- return 0;
+ if (insn == last)
+ return 0;
/* Check if the register operands of the store are OK in this block.
Note that if registers are changed ANYWHERE in the block, we'll
if (!store_ops_ok (XEXP (x, 0), bb))
return 1;
- for ( ; insn && insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
- if (store_killed_in_insn (x, insn))
- return 1;
+ for ( ; insn && insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
+ if (store_killed_in_insn (x, insn))
+ return 1;
return 0;
}
rtx x, insn;
basic_block bb;
{
- rtx first = bb->head;
+ rtx first = bb->head;
- if (insn == first)
- return store_killed_in_insn (x, insn);
+ if (insn == first)
+ return store_killed_in_insn (x, insn);
/* Check if the register operands of the store are OK in this block.
Note that if registers are changed ANYWHERE in the block, we'll
if (!store_ops_ok (XEXP (x, 0), bb))
return 1;
- for ( ; insn && insn != PREV_INSN (first); insn = PREV_INSN (insn))
- if (store_killed_in_insn (x, insn))
- return 1;
+ for ( ; insn && insn != PREV_INSN (first); insn = PREV_INSN (insn))
+ if (store_killed_in_insn (x, insn))
+ return 1;
- return 0;
+ return 0;
}
#define ANTIC_STORE_LIST(x) ((x)->loads)
static void
build_store_vectors ()
{
- basic_block bb;
- int b;
+ basic_block bb, b;
rtx insn, st;
struct ls_expr * ptr;
/* Build the gen_vector. This is any store in the table which is not killed
by aliasing later in its block. */
- ae_gen = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
- sbitmap_vector_zero (ae_gen, n_basic_blocks);
+ ae_gen = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
+ sbitmap_vector_zero (ae_gen, last_basic_block);
- st_antloc = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
- sbitmap_vector_zero (st_antloc, n_basic_blocks);
+ st_antloc = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
+ sbitmap_vector_zero (st_antloc, last_basic_block);
for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
{
{
rtx r = gen_reg_rtx (GET_MODE (ptr->pattern));
if (gcse_file)
- fprintf(gcse_file, "Removing redundant store:\n");
+ fprintf (gcse_file, "Removing redundant store:\n");
replace_store_insn (r, XEXP (st, 0), bb);
XEXP (st, 0) = insn;
continue;
free_INSN_LIST_list (&store_list);
}
- ae_kill = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
- sbitmap_vector_zero (ae_kill, n_basic_blocks);
+ ae_kill = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
+ sbitmap_vector_zero (ae_kill, last_basic_block);
- transp = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
- sbitmap_vector_zero (transp, n_basic_blocks);
+ transp = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
+ sbitmap_vector_zero (transp, last_basic_block);
for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
- for (b = 0; b < n_basic_blocks; b++)
+ FOR_EACH_BB (b)
{
- if (store_killed_after (ptr->pattern, BLOCK_HEAD (b), BASIC_BLOCK (b)))
+ if (store_killed_after (ptr->pattern, b->head, b))
{
/* The anticipatable expression is not killed if it's gen'd. */
/*
If we always kill it in this case, we'll sometimes do
uneccessary work, but it shouldn't actually hurt anything.
if (!TEST_BIT (ae_gen[b], ptr->index)). */
- SET_BIT (ae_kill[b], ptr->index);
+ SET_BIT (ae_kill[b->index], ptr->index);
}
else
- SET_BIT (transp[b], ptr->index);
+ SET_BIT (transp[b->index], ptr->index);
}
/* Any block with no exits calls some non-returning function, so
{
fprintf (gcse_file, "ST_avail and ST_antic (shown under loads..)\n");
print_ldst_list (gcse_file);
- dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, n_basic_blocks);
- dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, n_basic_blocks);
- dump_sbitmap_vector (gcse_file, "Transpt", "", transp, n_basic_blocks);
- dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, n_basic_blocks);
+ dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, last_basic_block);
+ dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, last_basic_block);
+ dump_sbitmap_vector (gcse_file, "Transpt", "", transp, last_basic_block);
+ dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, last_basic_block);
}
}
fprintf (gcse_file,
"STORE_MOTION delete insn in BB %d:\n ", bb->index);
print_inline_rtx (gcse_file, del, 6);
- fprintf(gcse_file, "\nSTORE MOTION replaced with insn:\n ");
+ fprintf (gcse_file, "\nSTORE MOTION replaced with insn:\n ");
print_inline_rtx (gcse_file, insn, 6);
- fprintf(gcse_file, "\n");
+ fprintf (gcse_file, "\n");
}
delete_insn (del);
static void
store_motion ()
{
+ basic_block bb;
int x;
struct ls_expr * ptr;
int update_flow = 0;
/* Now we want to insert the new stores which are going to be needed. */
for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
{
- for (x = 0; x < n_basic_blocks; x++)
- if (TEST_BIT (pre_delete_map[x], ptr->index))
- delete_store (ptr, BASIC_BLOCK (x));
+ FOR_EACH_BB (bb)
+ if (TEST_BIT (pre_delete_map[bb->index], ptr->index))
+ delete_store (ptr, bb);
for (x = 0; x < NUM_EDGES (edge_list); x++)
if (TEST_BIT (pre_insert_map[x], ptr->index))