/* Matrix layout transformations.
- Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
Contributed by Razya Ladelsky <razya@il.ibm.com>
Originally written by Revital Eres and Mustafa Hagog.
-
+
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
+Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
/*
- Matrix flattening optimization tries to replace a N-dimensional
+ Matrix flattening optimization tries to replace a N-dimensional
matrix with its equivalent M-dimensional matrix, where M < N.
This first implementation focuses on global matrices defined dynamically.
and transformation.
The driver of the optimization is matrix_reorg ().
-
-
+
+
Analysis phase:
===============
- We'll number the dimensions outside-in, meaning the most external
- is 0, then 1, and so on.
- The analysis part of the optimization determines K, the escape
- level of a N-dimensional matrix (K <= N), that allows flattening of
+ We'll number the dimensions outside-in, meaning the most external
+ is 0, then 1, and so on.
+ The analysis part of the optimization determines K, the escape
+ level of a N-dimensional matrix (K <= N), that allows flattening of
the external dimensions 0,1,..., K-1. Escape level 0 means that the
whole matrix escapes and no flattening is possible.
-
- The analysis part is implemented in analyze_matrix_allocation_site()
+
+ The analysis part is implemented in analyze_matrix_allocation_site()
and analyze_matrix_accesses().
Transformation phase:
=====================
- In this phase we define the new flattened matrices that replace the
- original matrices in the code.
- Implemented in transform_allocation_sites(),
- transform_access_sites().
+ In this phase we define the new flattened matrices that replace the
+ original matrices in the code.
+ Implemented in transform_allocation_sites(),
+ transform_access_sites().
Matrix Transposing
==================
- The idea of Matrix Transposing is organizing the matrix in a different
+ The idea of Matrix Transposing is organizing the matrix in a different
layout such that the dimensions are reordered.
This could produce better cache behavior in some cases.
for (j=0; j<M; j++)
access to a[i][j]
- This loop can produce good cache behavior because the elements of
+ This loop can produce good cache behavior because the elements of
the inner dimension are accessed sequentially.
However, if the accesses of the matrix were of the following form:
for (j=0; j<M; j++)
access to a[j][i]
- In this loop we iterate the columns and not the rows.
- Therefore, replacing the rows and columns
+ In this loop we iterate the columns and not the rows.
+ Therefore, replacing the rows and columns
would have had an organization with better (cache) locality.
Replacing the dimensions of the matrix is called matrix transposing.
- This example, of course, could be enhanced to multiple dimensions matrices
+ This example, of course, could be enhanced to multiple dimensions matrices
as well.
- Since a program could include all kind of accesses, there is a decision
- mechanism, implemented in analyze_transpose(), which implements a
+ Since a program could include all kind of accesses, there is a decision
+ mechanism, implemented in analyze_transpose(), which implements a
heuristic that tries to determine whether to transpose the matrix or not,
according to the form of the more dominant accesses.
- This decision is transferred to the flattening mechanism, and whether
+ This decision is transferred to the flattening mechanism, and whether
the matrix was transposed or not, the matrix is flattened (if possible).
-
+
This decision making is based on profiling information and loop information.
- If profiling information is available, decision making mechanism will be
+ If profiling information is available, decision making mechanism will be
operated, otherwise the matrix will only be flattened (if possible).
- Both optimizations are described in the paper "Matrix flattening and
- transposing in GCC" which was presented in GCC summit 2006.
- http://www.gccsummit.org/2006/2006-GCC-Summit-Proceedings.pdf
-
- */
+ Both optimizations are described in the paper "Matrix flattening and
+ transposing in GCC" which was presented in GCC summit 2006.
+ http://www.gccsummit.org/2006/2006-GCC-Summit-Proceedings.pdf. */
#include "config.h"
#include "system.h"
#include "tm.h"
#include "tree.h"
#include "rtl.h"
-#include "c-tree.h"
#include "tree-inline.h"
#include "tree-flow.h"
#include "tree-flow-inline.h"
#include "langhooks.h"
#include "hashtab.h"
-#include "toplev.h"
#include "flags.h"
#include "ggc.h"
#include "debug.h"
#include "target.h"
#include "cgraph.h"
-#include "diagnostic.h"
+#include "diagnostic-core.h"
#include "timevar.h"
#include "params.h"
#include "fibheap.h"
-#include "c-common.h"
#include "intl.h"
#include "function.h"
#include "basic-block.h"
#include "tree-data-ref.h"
#include "tree-chrec.h"
#include "tree-scalar-evolution.h"
+#include "tree-ssa-sccvn.h"
-/*
- We need to collect a lot of data from the original malloc,
+/* We need to collect a lot of data from the original malloc,
particularly as the gimplifier has converted:
orig_var = (struct_type *) malloc (x * sizeof (struct_type *));
struct malloc_call_data
{
- tree call_stmt; /* Tree for "T4 = malloc (T3);" */
+ gimple call_stmt; /* Tree for "T4 = malloc (T3);" */
tree size_var; /* Var decl for T3. */
tree malloc_size; /* Tree for "<constant>", the rhs assigned to T3. */
};
+static tree can_calculate_expr_before_stmt (tree, sbitmap);
+static tree can_calculate_stmt_before_stmt (gimple, sbitmap);
+
/* The front end of the compiler, when parsing statements of the form:
var = (type_cast) malloc (sizeof (type));
need to find the rest of the variables/statements on our own. That
is what the following function does. */
static void
-collect_data_for_malloc_call (tree stmt, struct malloc_call_data *m_data)
+collect_data_for_malloc_call (gimple stmt, struct malloc_call_data *m_data)
{
tree size_var = NULL;
tree malloc_fn_decl;
- tree tmp;
tree arg1;
- gcc_assert (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT);
+ gcc_assert (is_gimple_call (stmt));
- tmp = get_call_expr_in (stmt);
- malloc_fn_decl = CALL_EXPR_FN (tmp);
- if (TREE_CODE (malloc_fn_decl) != ADDR_EXPR
- || TREE_CODE (TREE_OPERAND (malloc_fn_decl, 0)) != FUNCTION_DECL
- || DECL_FUNCTION_CODE (TREE_OPERAND (malloc_fn_decl, 0)) !=
- BUILT_IN_MALLOC)
+ malloc_fn_decl = gimple_call_fndecl (stmt);
+ if (malloc_fn_decl == NULL
+ || DECL_FUNCTION_CODE (malloc_fn_decl) != BUILT_IN_MALLOC)
return;
- arg1 = CALL_EXPR_ARG (tmp, 0);
+ arg1 = gimple_call_arg (stmt, 0);
size_var = arg1;
m_data->call_stmt = stmt;
initial address and index of each dimension. */
struct access_site_info
{
- /* The statement (INDIRECT_REF or PLUS_EXPR). */
- tree stmt;
+ /* The statement (MEM_REF or POINTER_PLUS_EXPR). */
+ gimple stmt;
- /* In case of PLUS_EXPR, what is the offset. */
+ /* In case of POINTER_PLUS_EXPR, what is the offset. */
tree offset;
/* The index which created the offset. */
DEF_VEC_P (access_site_info_p);
DEF_VEC_ALLOC_P (access_site_info_p, heap);
+/* Calls to free when flattening a matrix. */
+
+struct free_info
+{
+ gimple stmt;
+ tree func;
+};
+
/* Information about matrix to flatten. */
struct matrix_info
{
0 to ACTUAL_DIM - k escapes. */
int min_indirect_level_escape;
- tree min_indirect_level_escape_stmt;
-
- /* Is the matrix transposed. */
- bool is_transposed_p;
+ gimple min_indirect_level_escape_stmt;
/* Hold the allocation site for each level (dimension).
We can use NUM_DIMS as the upper bound and allocate the array
once with this number of elements and no need to use realloc and
MAX_MALLOCED_LEVEL. */
- tree *malloc_for_level;
+ gimple *malloc_for_level;
int max_malloced_level;
+ /* Is the matrix transposed. */
+ bool is_transposed_p;
+
/* The location of the allocation sites (they must be in one
function). */
tree allocation_function_decl;
/* The calls to free for each level of indirection. */
- struct free_info
- {
- tree stmt;
- tree func;
- } *free_stmts;
+ struct free_info *free_stmts;
/* An array which holds for each dimension its size. where
dimension 0 is the outer most (one that contains all the others).
*/
tree *dimension_size;
- /* An array which holds for each dimension it's original size
+ /* An array which holds for each dimension it's original size
(before transposing and flattening take place). */
tree *dimension_size_orig;
/* An array of the accesses to be flattened.
elements are of type "struct access_site_info *". */
- VEC (access_site_info_p, heap) * access_l;
+ VEC (access_site_info_p, heap) * access_l;
- /* A map of how the dimensions will be organized at the end of
+ /* A map of how the dimensions will be organized at the end of
the analyses. */
int *dim_map;
};
struct matrix_access_phi_node
{
- tree phi;
+ gimple phi;
int indirection_level;
};
/* The variable whose accesses in the tree we are looking for. */
tree ssa_var;
/* The tree and code inside it the ssa_var is accessed, currently
- it could be an INDIRECT_REF or CALL_EXPR. */
+ it could be an MEM_REF or CALL_EXPR. */
enum tree_code t_code;
tree t_tree;
/* The place in the containing tree. */
static hashval_t
mat_acc_phi_hash (const void *p)
{
- const struct matrix_access_phi_node *ma_phi = p;
+ const struct matrix_access_phi_node *const ma_phi =
+ (const struct matrix_access_phi_node *) p;
return htab_hash_pointer (ma_phi->phi);
}
static int
mat_acc_phi_eq (const void *p1, const void *p2)
{
- const struct matrix_access_phi_node *phi1 = p1;
- const struct matrix_access_phi_node *phi2 = p2;
+ const struct matrix_access_phi_node *const phi1 =
+ (const struct matrix_access_phi_node *) p1;
+ const struct matrix_access_phi_node *const phi2 =
+ (const struct matrix_access_phi_node *) p2;
if (phi1->phi == phi2->phi)
return 1;
static hashval_t
mtt_info_hash (const void *mtt)
{
- return htab_hash_pointer (((struct matrix_info *) mtt)->decl);
+ return htab_hash_pointer (((const struct matrix_info *) mtt)->decl);
}
/* Return true if MTT1 and MTT2 (which are really both of type
static int
mtt_info_eq (const void *mtt1, const void *mtt2)
{
- const struct matrix_info *i1 = mtt1;
- const struct matrix_info *i2 = mtt2;
+ const struct matrix_info *const i1 = (const struct matrix_info *) mtt1;
+ const struct matrix_info *const i2 = (const struct matrix_info *) mtt2;
if (i1->decl == i2->decl)
return true;
return false;
}
-/* Return the inner most tree that is not a cast. */
-static tree
-get_inner_of_cast_expr (tree t)
-{
- while (TREE_CODE (t) == CONVERT_EXPR || TREE_CODE (t) == NOP_EXPR
- || TREE_CODE (t) == VIEW_CONVERT_EXPR)
- t = TREE_OPERAND (t, 0);
-
- return t;
-}
-
-/* Return false if STMT may contain a vector expression.
+/* Return false if STMT may contain a vector expression.
In this situation, all matrices should not be flattened. */
static bool
-may_flatten_matrices_1 (tree stmt)
+may_flatten_matrices_1 (gimple stmt)
{
- tree t;
-
- switch (TREE_CODE (stmt))
+ switch (gimple_code (stmt))
{
- case GIMPLE_MODIFY_STMT:
- t = GIMPLE_STMT_OPERAND (stmt, 1);
- while (TREE_CODE (t) == CONVERT_EXPR || TREE_CODE (t) == NOP_EXPR)
+ case GIMPLE_ASSIGN:
+ case GIMPLE_CALL:
+ if (!gimple_has_lhs (stmt))
+ return true;
+ if (TREE_CODE (TREE_TYPE (gimple_get_lhs (stmt))) == VECTOR_TYPE)
{
- if (TREE_TYPE (t) && POINTER_TYPE_P (TREE_TYPE (t)))
- {
- tree pointee;
-
- pointee = TREE_TYPE (t);
- while (POINTER_TYPE_P (pointee))
- pointee = TREE_TYPE (pointee);
- if (TREE_CODE (pointee) == VECTOR_TYPE)
- {
- if (dump_file)
- fprintf (dump_file,
- "Found vector type, don't flatten matrix\n");
- return false;
- }
- }
- t = TREE_OPERAND (t, 0);
+ if (dump_file)
+ fprintf (dump_file,
+ "Found vector type, don't flatten matrix\n");
+ return false;
}
break;
- case ASM_EXPR:
+ case GIMPLE_ASM:
/* Asm code could contain vector operations. */
return false;
break;
return true;
}
-/* Return false if there are hand-written vectors in the program.
+/* Return false if there are hand-written vectors in the program.
We disable the flattening in such a case. */
static bool
may_flatten_matrices (struct cgraph_node *node)
tree decl;
struct function *func;
basic_block bb;
- block_stmt_iterator bsi;
+ gimple_stmt_iterator gsi;
decl = node->decl;
if (node->analyzed)
{
func = DECL_STRUCT_FUNCTION (decl);
FOR_EACH_BB_FN (bb, func)
- for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
- if (!may_flatten_matrices_1 (bsi_stmt (bsi)))
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ if (!may_flatten_matrices_1 (gsi_stmt (gsi)))
return false;
}
return true;
/* Check to see if this pointer is already in there. */
tmpmi.decl = var_decl;
- mi = htab_find (matrices_to_reorg, &tmpmi);
+ mi = (struct matrix_info *) htab_find (matrices_to_reorg, &tmpmi);
if (mi)
return NULL;
if (!mat)
return;
- if (mat->free_stmts)
- free (mat->free_stmts);
- if (mat->dim_hot_level)
- free (mat->dim_hot_level);
- if (mat->malloc_for_level)
- free (mat->malloc_for_level);
+ free (mat->free_stmts);
+ free (mat->dim_hot_level);
+ free (mat->malloc_for_level);
}
/* Find all potential matrices.
/* Mark that the matrix MI escapes at level L. */
static void
-mark_min_matrix_escape_level (struct matrix_info *mi, int l, tree s)
+mark_min_matrix_escape_level (struct matrix_info *mi, int l, gimple s)
{
if (mi->min_indirect_level_escape == -1
|| (mi->min_indirect_level_escape > l))
/* Find if the SSA variable is accessed inside the
tree and record the tree containing it.
The only relevant uses are the case of SSA_NAME, or SSA inside
- INDIRECT_REF, CALL_EXPR, PLUS_EXPR, MULT_EXPR. */
+ MEM_REF, PLUS_EXPR, POINTER_PLUS_EXPR, MULT_EXPR. */
static void
ssa_accessed_in_tree (tree t, struct ssa_acc_in_tree *a)
{
- tree call, decl;
- tree arg;
- call_expr_arg_iterator iter;
-
a->t_code = TREE_CODE (t);
switch (a->t_code)
{
- tree op1, op2;
-
case SSA_NAME:
if (t == a->ssa_var)
a->var_found = true;
break;
- case INDIRECT_REF:
+ case MEM_REF:
if (SSA_VAR_P (TREE_OPERAND (t, 0))
&& TREE_OPERAND (t, 0) == a->ssa_var)
a->var_found = true;
break;
- case CALL_EXPR:
- FOR_EACH_CALL_EXPR_ARG (arg, iter, t)
- {
- if (arg == a->ssa_var)
- {
- a->var_found = true;
- call = get_call_expr_in (t);
- if (call && (decl = get_callee_fndecl (call)))
- a->t_tree = decl;
- break;
- }
- }
+ default:
+ break;
+ }
+}
+
+/* Find if the SSA variable is accessed on the right hand side of
+ gimple call STMT. */
+
+static void
+ssa_accessed_in_call_rhs (gimple stmt, struct ssa_acc_in_tree *a)
+{
+ tree decl;
+ tree arg;
+ size_t i;
+
+ a->t_code = CALL_EXPR;
+ for (i = 0; i < gimple_call_num_args (stmt); i++)
+ {
+ arg = gimple_call_arg (stmt, i);
+ if (arg == a->ssa_var)
+ {
+ a->var_found = true;
+ decl = gimple_call_fndecl (stmt);
+ a->t_tree = decl;
+ break;
+ }
+ }
+}
+
+/* Find if the SSA variable is accessed on the right hand side of
+ gimple assign STMT. */
+
+static void
+ssa_accessed_in_assign_rhs (gimple stmt, struct ssa_acc_in_tree *a)
+{
+
+ a->t_code = gimple_assign_rhs_code (stmt);
+ switch (a->t_code)
+ {
+ tree op1, op2;
+
+ case SSA_NAME:
+ case MEM_REF:
+ CASE_CONVERT:
+ case VIEW_CONVERT_EXPR:
+ ssa_accessed_in_tree (gimple_assign_rhs1 (stmt), a);
break;
+ case POINTER_PLUS_EXPR:
case PLUS_EXPR:
case MULT_EXPR:
- op1 = TREE_OPERAND (t, 0);
- op2 = TREE_OPERAND (t, 1);
+ op1 = gimple_assign_rhs1 (stmt);
+ op2 = gimple_assign_rhs2 (stmt);
if (op1 == a->ssa_var)
{
}
}
-/* Record the access/allocation site information for matrix MI so we can
+/* Record the access/allocation site information for matrix MI so we can
handle it later in transformation. */
static void
-record_access_alloc_site_info (struct matrix_info *mi, tree stmt, tree offset,
+record_access_alloc_site_info (struct matrix_info *mi, gimple stmt, tree offset,
tree index, int level, bool is_alloc)
{
struct access_site_info *acc_info;
all the allocation sites could be pre-calculated before the call to
the malloc of level 0 (the main malloc call). */
static void
-add_allocation_site (struct matrix_info *mi, tree stmt, int level)
+add_allocation_site (struct matrix_info *mi, gimple stmt, int level)
{
struct malloc_call_data mcd;
must be set accordingly. */
for (min_malloc_level = 0;
min_malloc_level < mi->max_malloced_level
- && mi->malloc_for_level[min_malloc_level]; min_malloc_level++);
+ && mi->malloc_for_level[min_malloc_level]; min_malloc_level++)
+ ;
if (level < min_malloc_level)
{
mi->allocation_function_decl = current_function_decl;
else
{
mark_min_matrix_escape_level (mi, level, stmt);
- /* cannot be that (level == min_malloc_level)
+ /* cannot be that (level == min_malloc_level)
we would have returned earlier. */
return;
}
calls like calloc and realloc. */
if (!mi->malloc_for_level)
{
- mi->malloc_for_level = xcalloc (level + 1, sizeof (tree));
+ mi->malloc_for_level = XCNEWVEC (gimple, level + 1);
mi->max_malloced_level = level + 1;
}
else if (mi->max_malloced_level <= level)
{
mi->malloc_for_level
- = xrealloc (mi->malloc_for_level, (level + 1) * sizeof (tree));
+ = XRESIZEVEC (gimple, mi->malloc_for_level, level + 1);
/* Zero the newly allocated items. */
memset (&(mi->malloc_for_level[mi->max_malloced_level + 1]),
will hold the size for each dimension; each malloc that allocates a
dimension has the size parameter; we use that parameter to
initialize the dimension size variable so we can use it later in
- the address calculations. LEVEL is the dimension we're inspecting.
+ the address calculations. LEVEL is the dimension we're inspecting.
Return if STMT is related to an allocation site. */
static void
-analyze_matrix_allocation_site (struct matrix_info *mi, tree stmt,
+analyze_matrix_allocation_site (struct matrix_info *mi, gimple stmt,
int level, sbitmap visited)
{
- if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT)
+ if (gimple_assign_copy_p (stmt) || gimple_assign_cast_p (stmt))
{
- tree rhs = GIMPLE_STMT_OPERAND (stmt, 1);
+ tree rhs = gimple_assign_rhs1 (stmt);
- rhs = get_inner_of_cast_expr (rhs);
if (TREE_CODE (rhs) == SSA_NAME)
{
- tree def = SSA_NAME_DEF_STMT (rhs);
+ gimple def = SSA_NAME_DEF_STMT (rhs);
analyze_matrix_allocation_site (mi, def, level, visited);
return;
}
+ /* If we are back to the original matrix variable then we
+ are sure that this is analyzed as an access site. */
+ else if (rhs == mi->decl)
+ return;
+ }
+ /* A result of call to malloc. */
+ else if (is_gimple_call (stmt))
+ {
+ int call_flags = gimple_call_flags (stmt);
- /* A result of call to malloc. */
- else if (TREE_CODE (rhs) == CALL_EXPR)
+ if (!(call_flags & ECF_MALLOC))
+ {
+ mark_min_matrix_escape_level (mi, level, stmt);
+ return;
+ }
+ else
{
- int call_flags = call_expr_flags (rhs);
+ tree malloc_fn_decl;
- if (!(call_flags & ECF_MALLOC))
+ malloc_fn_decl = gimple_call_fndecl (stmt);
+ if (malloc_fn_decl == NULL_TREE)
{
mark_min_matrix_escape_level (mi, level, stmt);
return;
}
- else
- {
- tree malloc_fn_decl;
- const char *malloc_fname;
-
- malloc_fn_decl = CALL_EXPR_FN (rhs);
- if (TREE_CODE (malloc_fn_decl) != ADDR_EXPR
- || TREE_CODE (TREE_OPERAND (malloc_fn_decl, 0)) !=
- FUNCTION_DECL)
- {
- mark_min_matrix_escape_level (mi, level, stmt);
- return;
- }
- malloc_fn_decl = TREE_OPERAND (malloc_fn_decl, 0);
- malloc_fname = IDENTIFIER_POINTER (DECL_NAME (malloc_fn_decl));
- if (DECL_FUNCTION_CODE (malloc_fn_decl) != BUILT_IN_MALLOC)
- {
- if (dump_file)
- fprintf (dump_file,
- "Matrix %s is an argument to function %s\n",
- get_name (mi->decl), get_name (malloc_fn_decl));
- mark_min_matrix_escape_level (mi, level, stmt);
- return;
- }
- }
- /* This is a call to malloc. Check to see if this is the first
- call in this indirection level; if so, mark it; if not, mark
- as escaping. */
- if (mi->malloc_for_level
- && mi->malloc_for_level[level]
- && mi->malloc_for_level[level] != stmt)
+ if (DECL_FUNCTION_CODE (malloc_fn_decl) != BUILT_IN_MALLOC)
{
+ if (dump_file)
+ fprintf (dump_file,
+ "Matrix %s is an argument to function %s\n",
+ get_name (mi->decl), get_name (malloc_fn_decl));
mark_min_matrix_escape_level (mi, level, stmt);
return;
}
- else
- add_allocation_site (mi, stmt, level);
+ }
+ /* This is a call to malloc of level 'level'.
+ mi->max_malloced_level-1 == level means that we've
+ seen a malloc statement of level 'level' before.
+ If the statement is not the same one that we've
+ seen before, then there's another malloc statement
+ for the same level, which means that we need to mark
+ it escaping. */
+ if (mi->malloc_for_level
+ && mi->max_malloced_level-1 == level
+ && mi->malloc_for_level[level] != stmt)
+ {
+ mark_min_matrix_escape_level (mi, level, stmt);
return;
}
- /* If we are back to the original matrix variable then we
- are sure that this is analyzed as an access site. */
- else if (rhs == mi->decl)
- return;
+ else
+ add_allocation_site (mi, stmt, level);
+ return;
}
/* Looks like we don't know what is happening in this
statement so be in the safe side and mark it as escaping. */
}
/* The transposing decision making.
- In order to to calculate the profitability of transposing, we collect two
+ In order to calculate the profitability of transposing, we collect two
types of information regarding the accesses:
1. profiling information used to express the hotness of an access, that
- is how often the matrix is accessed by this access site (count of the
- access site).
+ is how often the matrix is accessed by this access site (count of the
+ access site).
2. which dimension in the access site is iterated by the inner
most loop containing this access.
- The matrix will have a calculated value of weighted hotness for each
+ The matrix will have a calculated value of weighted hotness for each
dimension.
- Intuitively the hotness level of a dimension is a function of how
- many times it was the most frequently accessed dimension in the
+ Intuitively the hotness level of a dimension is a function of how
+ many times it was the most frequently accessed dimension in the
highly executed access sites of this matrix.
As computed by following equation:
- m n
- __ __
- \ \ dim_hot_level[i] +=
+ m n
+ __ __
+ \ \ dim_hot_level[i] +=
/_ /_
- j i
+ j i
acc[j]->dim[i]->iter_by_inner_loop * count(j)
Where n is the number of dims and m is the number of the matrix
static int
analyze_transpose (void **slot, void *data ATTRIBUTE_UNUSED)
{
- struct matrix_info *mi = *slot;
+ struct matrix_info *mi = (struct matrix_info *) *slot;
int min_escape_l = mi->min_indirect_level_escape;
struct loop *loop;
affine_iv iv;
{
if (mi->access_l)
{
- for (i = 0;
- VEC_iterate (access_site_info_p, mi->access_l, i, acc_info);
- i++)
+ FOR_EACH_VEC_ELT (access_site_info_p, mi->access_l, i, acc_info)
free (acc_info);
VEC_free (access_site_info_p, heap, mi->access_l);
for (i = 0; VEC_iterate (access_site_info_p, mi->access_l, i, acc_info);
i++)
{
- if (TREE_CODE (GIMPLE_STMT_OPERAND (acc_info->stmt, 1)) == PLUS_EXPR
+ if (gimple_assign_rhs_code (acc_info->stmt) == POINTER_PLUS_EXPR
&& acc_info->level < min_escape_l)
{
loop = loop_containing_stmt (acc_info->stmt);
free (acc_info);
continue;
}
- if (simple_iv (loop, acc_info->stmt, acc_info->offset, &iv, true))
+ if (simple_iv (loop, loop, acc_info->offset, &iv, true))
{
if (iv.step != NULL)
{
{
acc_info->iterated_by_inner_most_loop_p = 1;
mi->dim_hot_level[acc_info->level] +=
- bb_for_stmt (acc_info->stmt)->count;
+ gimple_bb (acc_info->stmt)->count;
}
}
return 1;
}
-/* Find the index which defines the OFFSET from base.
+/* Find the index which defines the OFFSET from base.
We walk from use to def until we find how the offset was defined. */
static tree
-get_index_from_offset (tree offset, tree def_stmt)
+get_index_from_offset (tree offset, gimple def_stmt)
{
- tree op1, op2, expr, index;
+ tree op1, op2, index;
- if (TREE_CODE (def_stmt) == PHI_NODE)
+ if (gimple_code (def_stmt) == GIMPLE_PHI)
return NULL;
- expr = get_inner_of_cast_expr (GIMPLE_STMT_OPERAND (def_stmt, 1));
- if (TREE_CODE (expr) == SSA_NAME)
- return get_index_from_offset (offset, SSA_NAME_DEF_STMT (expr));
- else if (TREE_CODE (expr) == MULT_EXPR)
+ if ((gimple_assign_copy_p (def_stmt) || gimple_assign_cast_p (def_stmt))
+ && TREE_CODE (gimple_assign_rhs1 (def_stmt)) == SSA_NAME)
+ return get_index_from_offset (offset,
+ SSA_NAME_DEF_STMT (gimple_assign_rhs1 (def_stmt)));
+ else if (is_gimple_assign (def_stmt)
+ && gimple_assign_rhs_code (def_stmt) == MULT_EXPR)
{
- op1 = TREE_OPERAND (expr, 0);
- op2 = TREE_OPERAND (expr, 1);
+ op1 = gimple_assign_rhs1 (def_stmt);
+ op2 = gimple_assign_rhs2 (def_stmt);
if (TREE_CODE (op1) != INTEGER_CST && TREE_CODE (op2) != INTEGER_CST)
return NULL;
index = (TREE_CODE (op1) == INTEGER_CST) ? op2 : op1;
/* update MI->dimension_type_size[CURRENT_INDIRECT_LEVEL] with the size
of the type related to the SSA_VAR, or the type related to the
- lhs of STMT, in the case that it is an INDIRECT_REF. */
+ lhs of STMT, in the case that it is an MEM_REF. */
static void
-update_type_size (struct matrix_info *mi, tree stmt, tree ssa_var,
+update_type_size (struct matrix_info *mi, gimple stmt, tree ssa_var,
int current_indirect_level)
{
tree lhs;
HOST_WIDE_INT type_size;
- /* Update type according to the type of the INDIRECT_REF expr. */
- if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT
- && TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) == INDIRECT_REF)
+ /* Update type according to the type of the MEM_REF expr. */
+ if (is_gimple_assign (stmt)
+ && TREE_CODE (gimple_assign_lhs (stmt)) == MEM_REF)
{
- lhs = GIMPLE_STMT_OPERAND (stmt, 0);
+ lhs = gimple_assign_lhs (stmt);
gcc_assert (POINTER_TYPE_P
(TREE_TYPE (SSA_NAME_VAR (TREE_OPERAND (lhs, 0)))));
type_size =
}
}
-/* USE_STMT represents a call_expr ,where one of the arguments is the
- ssa var that we want to check because it came from some use of matrix
- MI. CURRENT_INDIRECT_LEVEL is the indirection level we reached so
+/* USE_STMT represents a GIMPLE_CALL, where one of the arguments is the
+ ssa var that we want to check because it came from some use of matrix
+ MI. CURRENT_INDIRECT_LEVEL is the indirection level we reached so
far. */
-static void
-analyze_accesses_for_call_expr (struct matrix_info *mi, tree use_stmt,
- int current_indirect_level)
+static int
+analyze_accesses_for_call_stmt (struct matrix_info *mi, tree ssa_var,
+ gimple use_stmt, int current_indirect_level)
{
- tree call = get_call_expr_in (use_stmt);
- if (call && get_callee_fndecl (call))
+ tree fndecl = gimple_call_fndecl (use_stmt);
+
+ if (gimple_call_lhs (use_stmt))
{
- if (DECL_FUNCTION_CODE (get_callee_fndecl (call)) != BUILT_IN_FREE)
+ tree lhs = gimple_call_lhs (use_stmt);
+ struct ssa_acc_in_tree lhs_acc, rhs_acc;
+
+ memset (&lhs_acc, 0, sizeof (lhs_acc));
+ memset (&rhs_acc, 0, sizeof (rhs_acc));
+
+ lhs_acc.ssa_var = ssa_var;
+ lhs_acc.t_code = ERROR_MARK;
+ ssa_accessed_in_tree (lhs, &lhs_acc);
+ rhs_acc.ssa_var = ssa_var;
+ rhs_acc.t_code = ERROR_MARK;
+ ssa_accessed_in_call_rhs (use_stmt, &rhs_acc);
+
+ /* The SSA must be either in the left side or in the right side,
+ to understand what is happening.
+ In case the SSA_NAME is found in both sides we should be escaping
+ at this level because in this case we cannot calculate the
+ address correctly. */
+ if ((lhs_acc.var_found && rhs_acc.var_found
+ && lhs_acc.t_code == MEM_REF)
+ || (!rhs_acc.var_found && !lhs_acc.var_found))
+ {
+ mark_min_matrix_escape_level (mi, current_indirect_level, use_stmt);
+ return current_indirect_level;
+ }
+ gcc_assert (!rhs_acc.var_found || !lhs_acc.var_found);
+
+ /* If we are storing to the matrix at some level, then mark it as
+ escaping at that level. */
+ if (lhs_acc.var_found)
+ {
+ int l = current_indirect_level + 1;
+
+ gcc_assert (lhs_acc.t_code == MEM_REF);
+ mark_min_matrix_escape_level (mi, l, use_stmt);
+ return current_indirect_level;
+ }
+ }
+
+ if (fndecl)
+ {
+ if (DECL_FUNCTION_CODE (fndecl) != BUILT_IN_FREE)
{
if (dump_file)
fprintf (dump_file,
"Matrix %s: Function call %s, level %d escapes.\n",
- get_name (mi->decl), get_name (get_callee_fndecl (call)),
+ get_name (mi->decl), get_name (fndecl),
current_indirect_level);
mark_min_matrix_escape_level (mi, current_indirect_level, use_stmt);
}
mi->free_stmts[l].func = current_function_decl;
}
}
+ return current_indirect_level;
}
-/* USE_STMT represents a phi node of the ssa var that we want to
- check because it came from some use of matrix
+/* USE_STMT represents a phi node of the ssa var that we want to
+ check because it came from some use of matrix
MI.
We check all the escaping levels that get to the PHI node
and make sure they are all the same escaping;
if not (which is rare) we let the escaping level be the
minimum level that gets into that PHI because starting from
- that level we cannot expect the behavior of the indirections.
+ that level we cannot expect the behavior of the indirections.
CURRENT_INDIRECT_LEVEL is the indirection level we reached so far. */
static void
-analyze_accesses_for_phi_node (struct matrix_info *mi, tree use_stmt,
+analyze_accesses_for_phi_node (struct matrix_info *mi, gimple use_stmt,
int current_indirect_level, sbitmap visited,
bool record_accesses)
{
struct matrix_access_phi_node tmp_maphi, *maphi, **pmaphi;
tmp_maphi.phi = use_stmt;
- if ((maphi = htab_find (htab_mat_acc_phi_nodes, &tmp_maphi)))
+ if ((maphi = (struct matrix_access_phi_node *)
+ htab_find (htab_mat_acc_phi_nodes, &tmp_maphi)))
{
if (maphi->indirection_level == current_indirect_level)
return;
{
int level = MIN (maphi->indirection_level,
current_indirect_level);
- int j;
- tree t = NULL_TREE;
+ size_t j;
+ gimple stmt = NULL;
maphi->indirection_level = level;
- for (j = 0; j < PHI_NUM_ARGS (use_stmt); j++)
+ for (j = 0; j < gimple_phi_num_args (use_stmt); j++)
{
tree def = PHI_ARG_DEF (use_stmt, j);
- if (TREE_CODE (SSA_NAME_DEF_STMT (def)) != PHI_NODE)
- t = SSA_NAME_DEF_STMT (def);
+ if (gimple_code (SSA_NAME_DEF_STMT (def)) != GIMPLE_PHI)
+ stmt = SSA_NAME_DEF_STMT (def);
}
- mark_min_matrix_escape_level (mi, level, t);
+ mark_min_matrix_escape_level (mi, level, stmt);
}
return;
}
}
}
-/* USE_STMT represents a modify statement (the rhs or lhs include
- the ssa var that we want to check because it came from some use of matrix
- MI.
- CURRENT_INDIRECT_LEVEL is the indirection level we reached so far. */
+/* USE_STMT represents an assign statement (the rhs or lhs include
+ the ssa var that we want to check because it came from some use of matrix
+ MI. CURRENT_INDIRECT_LEVEL is the indirection level we reached so far. */
static int
-analyze_accesses_for_modify_stmt (struct matrix_info *mi, tree ssa_var,
- tree use_stmt, int current_indirect_level,
+analyze_accesses_for_assign_stmt (struct matrix_info *mi, tree ssa_var,
+ gimple use_stmt, int current_indirect_level,
bool last_op, sbitmap visited,
bool record_accesses)
{
-
- tree lhs = GIMPLE_STMT_OPERAND (use_stmt, 0);
- tree rhs = GIMPLE_STMT_OPERAND (use_stmt, 1);
+ tree lhs = gimple_get_lhs (use_stmt);
struct ssa_acc_in_tree lhs_acc, rhs_acc;
memset (&lhs_acc, 0, sizeof (lhs_acc));
ssa_accessed_in_tree (lhs, &lhs_acc);
rhs_acc.ssa_var = ssa_var;
rhs_acc.t_code = ERROR_MARK;
- ssa_accessed_in_tree (get_inner_of_cast_expr (rhs), &rhs_acc);
+ ssa_accessed_in_assign_rhs (use_stmt, &rhs_acc);
/* The SSA must be either in the left side or in the right side,
to understand what is happening.
at this level because in this case we cannot calculate the
address correctly. */
if ((lhs_acc.var_found && rhs_acc.var_found
- && lhs_acc.t_code == INDIRECT_REF)
+ && lhs_acc.t_code == MEM_REF)
|| (!rhs_acc.var_found && !lhs_acc.var_found))
{
mark_min_matrix_escape_level (mi, current_indirect_level, use_stmt);
escaping at that level. */
if (lhs_acc.var_found)
{
- tree def;
int l = current_indirect_level + 1;
- gcc_assert (lhs_acc.t_code == INDIRECT_REF);
- def = get_inner_of_cast_expr (rhs);
- if (TREE_CODE (def) != SSA_NAME)
+ gcc_assert (lhs_acc.t_code == MEM_REF);
+
+ if (!(gimple_assign_copy_p (use_stmt)
+ || gimple_assign_cast_p (use_stmt))
+ || (TREE_CODE (gimple_assign_rhs1 (use_stmt)) != SSA_NAME))
mark_min_matrix_escape_level (mi, l, use_stmt);
else
{
- def = SSA_NAME_DEF_STMT (def);
- analyze_matrix_allocation_site (mi, def, l, visited);
+ gimple def_stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (use_stmt));
+ analyze_matrix_allocation_site (mi, def_stmt, l, visited);
if (record_accesses)
record_access_alloc_site_info (mi, use_stmt, NULL_TREE,
NULL_TREE, l, true);
}
return current_indirect_level;
}
- /* Now, check the right-hand-side, to see how the SSA variable
+ /* Now, check the right-hand-side, to see how the SSA variable
is used. */
if (rhs_acc.var_found)
{
- /* If we are passing the ssa name to a function call and
- the pointer escapes when passed to the function
- (not the case of free), then we mark the matrix as
- escaping at this level. */
- if (rhs_acc.t_code == CALL_EXPR)
- {
- analyze_accesses_for_call_expr (mi, use_stmt,
- current_indirect_level);
-
- return current_indirect_level;
- }
- if (rhs_acc.t_code != INDIRECT_REF
- && rhs_acc.t_code != PLUS_EXPR && rhs_acc.t_code != SSA_NAME)
+ if (rhs_acc.t_code != MEM_REF
+ && rhs_acc.t_code != POINTER_PLUS_EXPR && rhs_acc.t_code != SSA_NAME)
{
mark_min_matrix_escape_level (mi, current_indirect_level, use_stmt);
return current_indirect_level;
}
/* If the access in the RHS has an indirection increase the
indirection level. */
- if (rhs_acc.t_code == INDIRECT_REF)
+ if (rhs_acc.t_code == MEM_REF)
{
if (record_accesses)
record_access_alloc_site_info (mi, use_stmt, NULL_TREE,
current_indirect_level, true);
current_indirect_level += 1;
}
- else if (rhs_acc.t_code == PLUS_EXPR)
+ else if (rhs_acc.t_code == POINTER_PLUS_EXPR)
{
- /* ??? maybe we should check
- the type of the PLUS_EXP and make sure it's
- integral type. */
gcc_assert (rhs_acc.second_op);
if (last_op)
/* Currently we support only one PLUS expression on the
tree index;
tree op1, op2;
- op1 = TREE_OPERAND (rhs, 0);
- op2 = TREE_OPERAND (rhs, 1);
+ op1 = gimple_assign_rhs1 (use_stmt);
+ op2 = gimple_assign_rhs2 (use_stmt);
op2 = (op1 == ssa_var) ? op2 : op1;
if (TREE_CODE (op2) == INTEGER_CST)
}
/* If we are storing this level of indirection mark it as
escaping. */
- if (lhs_acc.t_code == INDIRECT_REF || TREE_CODE (lhs) != SSA_NAME)
+ if (lhs_acc.t_code == MEM_REF || TREE_CODE (lhs) != SSA_NAME)
{
int l = current_indirect_level;
/* One exception is when we are storing to the matrix
variable itself; this is the case of malloc, we must make
- sure that it's the one and only one call to malloc so
- we call analyze_matrix_allocation_site to check
+ sure that it's the one and only one call to malloc so
+ we call analyze_matrix_allocation_site to check
this out. */
if (TREE_CODE (lhs) != VAR_DECL || lhs != mi->decl)
mark_min_matrix_escape_level (mi, current_indirect_level,
/* We are placing it in an SSA, follow that SSA. */
analyze_matrix_accesses (mi, lhs,
current_indirect_level,
- rhs_acc.t_code == PLUS_EXPR,
+ rhs_acc.t_code == POINTER_PLUS_EXPR,
visited, record_accesses);
}
}
return current_indirect_level;
}
-/* Given a SSA_VAR (coming from a use statement of the matrix MI),
+/* Given a SSA_VAR (coming from a use statement of the matrix MI),
follow its uses and level of indirection and find out the minimum
indirection level it escapes in (the highest dimension) and the maximum
level it is accessed in (this will be the actual dimension of the
return;
/* Now go over the uses of the SSA_NAME and check how it is used in
- each one of them. We are mainly looking for the pattern INDIRECT_REF,
- then a PLUS_EXPR, then INDIRECT_REF etc. while in between there could
+ each one of them. We are mainly looking for the pattern MEM_REF,
+ then a POINTER_PLUS_EXPR, then MEM_REF etc. while in between there could
be any number of copies and casts. */
gcc_assert (TREE_CODE (ssa_var) == SSA_NAME);
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, ssa_var)
{
- tree use_stmt = USE_STMT (use_p);
- if (TREE_CODE (use_stmt) == PHI_NODE)
+ gimple use_stmt = USE_STMT (use_p);
+ if (gimple_code (use_stmt) == GIMPLE_PHI)
/* We check all the escaping levels that get to the PHI node
and make sure they are all the same escaping;
if not (which is rare) we let the escaping level be the
analyze_accesses_for_phi_node (mi, use_stmt, current_indirect_level,
visited, record_accesses);
- else if (TREE_CODE (use_stmt) == CALL_EXPR)
- analyze_accesses_for_call_expr (mi, use_stmt, current_indirect_level);
- else if (TREE_CODE (use_stmt) == GIMPLE_MODIFY_STMT)
+ else if (is_gimple_call (use_stmt))
+ analyze_accesses_for_call_stmt (mi, ssa_var, use_stmt,
+ current_indirect_level);
+ else if (is_gimple_assign (use_stmt))
current_indirect_level =
- analyze_accesses_for_modify_stmt (mi, ssa_var, use_stmt,
+ analyze_accesses_for_assign_stmt (mi, ssa_var, use_stmt,
current_indirect_level, last_op,
visited, record_accesses);
}
}
+typedef struct
+{
+ tree fn;
+ gimple stmt;
+} check_var_data;
/* A walk_tree function to go over the VAR_DECL, PARM_DECL nodes of
the malloc size expression and check that those aren't changed
{
basic_block bb;
tree t = *tp;
- tree fn = data;
- block_stmt_iterator bsi;
- tree stmt;
+ check_var_data *callback_data = (check_var_data*) data;
+ tree fn = callback_data->fn;
+ gimple_stmt_iterator gsi;
+ gimple stmt;
if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != PARM_DECL)
return NULL_TREE;
FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (fn))
{
- for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- stmt = bsi_stmt (bsi);
- if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
+ stmt = gsi_stmt (gsi);
+ if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
continue;
- if (GIMPLE_STMT_OPERAND (stmt, 0) == t)
- return stmt;
+ if (gimple_get_lhs (stmt) == t)
+ {
+ callback_data->stmt = stmt;
+ return t;
+ }
}
}
*walk_subtrees = 1;
}
/* Go backwards in the use-def chains and find out the expression
- represented by the possible SSA name in EXPR, until it is composed
+ represented by the possible SSA name in STMT, until it is composed
of only VAR_DECL, PARM_DECL and INT_CST. In case of phi nodes
we make sure that all the arguments represent the same subexpression,
otherwise we fail. */
+
static tree
-can_calculate_expr_before_stmt (tree expr, sbitmap visited)
+can_calculate_stmt_before_stmt (gimple stmt, sbitmap visited)
{
- tree def_stmt, op1, op2, res;
+ tree op1, op2, res;
+ enum tree_code code;
- switch (TREE_CODE (expr))
+ switch (gimple_code (stmt))
{
- case SSA_NAME:
- /* Case of loop, we don't know to represent this expression. */
- if (TEST_BIT (visited, SSA_NAME_VERSION (expr)))
- return NULL_TREE;
+ case GIMPLE_ASSIGN:
+ code = gimple_assign_rhs_code (stmt);
+ op1 = gimple_assign_rhs1 (stmt);
- SET_BIT (visited, SSA_NAME_VERSION (expr));
- def_stmt = SSA_NAME_DEF_STMT (expr);
- res = can_calculate_expr_before_stmt (def_stmt, visited);
- RESET_BIT (visited, SSA_NAME_VERSION (expr));
- return res;
- case VAR_DECL:
- case PARM_DECL:
- case INTEGER_CST:
- return expr;
- case PLUS_EXPR:
- case MINUS_EXPR:
- case MULT_EXPR:
- op1 = TREE_OPERAND (expr, 0);
- op2 = TREE_OPERAND (expr, 1);
+ switch (code)
+ {
+ case POINTER_PLUS_EXPR:
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ case MULT_EXPR:
+
+ op2 = gimple_assign_rhs2 (stmt);
+ op1 = can_calculate_expr_before_stmt (op1, visited);
+ if (!op1)
+ return NULL_TREE;
+ op2 = can_calculate_expr_before_stmt (op2, visited);
+ if (op2)
+ return fold_build2 (code, gimple_expr_type (stmt), op1, op2);
+ return NULL_TREE;
+
+ CASE_CONVERT:
+ res = can_calculate_expr_before_stmt (op1, visited);
+ if (res != NULL_TREE)
+ return build1 (code, gimple_expr_type (stmt), res);
+ else
+ return NULL_TREE;
- op1 = can_calculate_expr_before_stmt (op1, visited);
- if (!op1)
- return NULL_TREE;
- op2 = can_calculate_expr_before_stmt (op2, visited);
- if (op2)
- return fold_build2 (TREE_CODE (expr), TREE_TYPE (expr), op1, op2);
- return NULL_TREE;
- case GIMPLE_MODIFY_STMT:
- return can_calculate_expr_before_stmt (GIMPLE_STMT_OPERAND (expr, 1),
- visited);
- case PHI_NODE:
+ default:
+ if (gimple_assign_single_p (stmt))
+ return can_calculate_expr_before_stmt (op1, visited);
+ else
+ return NULL_TREE;
+ }
+
+ case GIMPLE_PHI:
{
- int j;
+ size_t j;
res = NULL_TREE;
/* Make sure all the arguments represent the same value. */
- for (j = 0; j < PHI_NUM_ARGS (expr); j++)
+ for (j = 0; j < gimple_phi_num_args (stmt); j++)
{
tree new_res;
- tree def = PHI_ARG_DEF (expr, j);
+ tree def = PHI_ARG_DEF (stmt, j);
new_res = can_calculate_expr_before_stmt (def, visited);
if (res == NULL_TREE)
}
return res;
}
- case NOP_EXPR:
- case CONVERT_EXPR:
- res = can_calculate_expr_before_stmt (TREE_OPERAND (expr, 0), visited);
- if (res != NULL_TREE)
- return build1 (TREE_CODE (expr), TREE_TYPE (expr), res);
- else
+
+ default:
+ return NULL_TREE;
+ }
+}
+
+/* Go backwards in the use-def chains and find out the expression
+ represented by the possible SSA name in EXPR, until it is composed
+ of only VAR_DECL, PARM_DECL and INT_CST. In case of phi nodes
+ we make sure that all the arguments represent the same subexpression,
+ otherwise we fail. */
+static tree
+can_calculate_expr_before_stmt (tree expr, sbitmap visited)
+{
+ gimple def_stmt;
+ tree res;
+
+ switch (TREE_CODE (expr))
+ {
+ case SSA_NAME:
+ /* Case of loop, we don't know to represent this expression. */
+ if (TEST_BIT (visited, SSA_NAME_VERSION (expr)))
return NULL_TREE;
+ SET_BIT (visited, SSA_NAME_VERSION (expr));
+ def_stmt = SSA_NAME_DEF_STMT (expr);
+ res = can_calculate_stmt_before_stmt (def_stmt, visited);
+ RESET_BIT (visited, SSA_NAME_VERSION (expr));
+ return res;
+ case VAR_DECL:
+ case PARM_DECL:
+ case INTEGER_CST:
+ return expr;
+
default:
return NULL_TREE;
}
check_allocation_function (void **slot, void *data ATTRIBUTE_UNUSED)
{
int level;
- block_stmt_iterator bsi;
- basic_block bb_level_0;
- struct matrix_info *mi = *slot;
- sbitmap visited = sbitmap_alloc (num_ssa_names);
+ struct matrix_info *mi = (struct matrix_info *) *slot;
+ sbitmap visited;
if (!mi->malloc_for_level)
return 1;
+
+ visited = sbitmap_alloc (num_ssa_names);
+
/* Do nothing if the current function is not the allocation
function of MI. */
if (mi->allocation_function_decl != current_function_decl
if (!mi->malloc_for_level[level])
break;
- mark_min_matrix_escape_level (mi, level, NULL_TREE);
-
- bsi = bsi_for_stmt (mi->malloc_for_level[0]);
- bb_level_0 = bsi.bb;
+ mark_min_matrix_escape_level (mi, level, NULL);
/* Check if the expression of the size passed to malloc could be
pre-calculated before the malloc of level 0. */
for (level = 1; level < mi->min_indirect_level_escape; level++)
{
- tree call_stmt, size;
- struct malloc_call_data mcd;
+ gimple call_stmt;
+ tree size;
+ struct malloc_call_data mcd = {NULL, NULL_TREE, NULL_TREE};
call_stmt = mi->malloc_for_level[level];
mark_min_matrix_escape_level (mi, level, call_stmt);
if (dump_file)
fprintf (dump_file,
- "Matrix %s: Cannot calculate the size of allocation. escaping at level %d\n",
+ "Matrix %s: Cannot calculate the size of allocation, escaping at level %d\n",
get_name (mi->decl), level);
break;
}
{
sbitmap visited_stmts_1;
- block_stmt_iterator bsi;
- tree stmt;
+ gimple_stmt_iterator gsi;
+ gimple stmt;
basic_block bb;
struct matrix_info tmpmi, *mi;
FOR_EACH_BB (bb)
{
- for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- stmt = bsi_stmt (bsi);
- if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT
- && TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) == VAR_DECL)
+ tree lhs;
+
+ stmt = gsi_stmt (gsi);
+ lhs = gimple_get_lhs (stmt);
+ if (lhs != NULL_TREE
+ && TREE_CODE (lhs) == VAR_DECL)
{
- tmpmi.decl = GIMPLE_STMT_OPERAND (stmt, 0);
- if ((mi = htab_find (matrices_to_reorg, &tmpmi)))
+ tmpmi.decl = lhs;
+ if ((mi = (struct matrix_info *) htab_find (matrices_to_reorg,
+ &tmpmi)))
{
sbitmap_zero (visited_stmts_1);
analyze_matrix_allocation_site (mi, stmt, 0, visited_stmts_1);
}
}
- if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT
- && TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) == SSA_NAME
- && TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 1)) == VAR_DECL)
+ if (is_gimple_assign (stmt)
+ && gimple_assign_single_p (stmt)
+ && TREE_CODE (lhs) == SSA_NAME
+ && TREE_CODE (gimple_assign_rhs1 (stmt)) == VAR_DECL)
{
- tmpmi.decl = GIMPLE_STMT_OPERAND (stmt, 1);
- if ((mi = htab_find (matrices_to_reorg, &tmpmi)))
+ tmpmi.decl = gimple_assign_rhs1 (stmt);
+ if ((mi = (struct matrix_info *) htab_find (matrices_to_reorg,
+ &tmpmi)))
{
sbitmap_zero (visited_stmts_1);
- analyze_matrix_accesses (mi,
- GIMPLE_STMT_OPERAND (stmt, 0), 0,
+ analyze_matrix_accesses (mi, lhs, 0,
false, visited_stmts_1, record);
}
}
tree rhs, lhs;
if (!ssa_var
- || TREE_CODE (SSA_NAME_DEF_STMT (ssa_var)) != GIMPLE_MODIFY_STMT)
+ || !is_gimple_assign (SSA_NAME_DEF_STMT (ssa_var))
+ || !gimple_assign_single_p (SSA_NAME_DEF_STMT (ssa_var)))
continue;
- rhs = GIMPLE_STMT_OPERAND (SSA_NAME_DEF_STMT (ssa_var), 1);
- lhs = GIMPLE_STMT_OPERAND (SSA_NAME_DEF_STMT (ssa_var), 0);
+ rhs = gimple_assign_rhs1 (SSA_NAME_DEF_STMT (ssa_var));
+ lhs = gimple_assign_lhs (SSA_NAME_DEF_STMT (ssa_var));
if (TREE_CODE (rhs) != VAR_DECL && TREE_CODE (lhs) != VAR_DECL)
continue;
chain for this SSA_VAR and check for escapes or apply the
flattening. */
tmpmi.decl = rhs;
- if ((mi = htab_find (matrices_to_reorg, &tmpmi)))
+ if ((mi = (struct matrix_info *) htab_find (matrices_to_reorg, &tmpmi)))
{
/* This variable will track the visited PHI nodes, so we can limit
its size to the maximum number of SSA names. */
sbitmap_free (visited_stmts_1);
}
+/* Used when we want to convert the expression: RESULT = something *
+ ORIG to RESULT = something * NEW_VAL. If ORIG and NEW_VAL are power
+ of 2, shift operations can be done, else division and
+ multiplication. */
+
+static tree
+compute_offset (HOST_WIDE_INT orig, HOST_WIDE_INT new_val, tree result)
+{
+
+ int x, y;
+ tree result1, ratio, log, orig_tree, new_tree;
+
+ x = exact_log2 (orig);
+ y = exact_log2 (new_val);
+
+ if (x != -1 && y != -1)
+ {
+ if (x == y)
+ return result;
+ else if (x > y)
+ {
+ log = build_int_cst (TREE_TYPE (result), x - y);
+ result1 =
+ fold_build2 (LSHIFT_EXPR, TREE_TYPE (result), result, log);
+ return result1;
+ }
+ log = build_int_cst (TREE_TYPE (result), y - x);
+ result1 = fold_build2 (RSHIFT_EXPR, TREE_TYPE (result), result, log);
+
+ return result1;
+ }
+ orig_tree = build_int_cst (TREE_TYPE (result), orig);
+ new_tree = build_int_cst (TREE_TYPE (result), new_val);
+ ratio = fold_build2 (TRUNC_DIV_EXPR, TREE_TYPE (result), result, orig_tree);
+ result1 = fold_build2 (MULT_EXPR, TREE_TYPE (result), ratio, new_tree);
+
+ return result1;
+}
+
+
/* We know that we are allowed to perform matrix flattening (according to the
escape analysis), so we traverse the use-def chains of the SSA vars
defined by the global variables pointing to the matrices of our interest.
according to the following equation:
a[I1][I2]...[Ik] , where D1..Dk is the length of each dimension and the
- escaping level is m <= k, and a' is the new allocated matrix,
+ escaping level is m <= k, and a' is the new allocated matrix,
will be translated to :
-
+
b[I(m+1)]...[Ik]
-
- where
+
+ where
b = a' + I1*D2...*Dm + I2*D3...Dm + ... + Im
*/
static int
transform_access_sites (void **slot, void *data ATTRIBUTE_UNUSED)
{
- tree stmts;
- block_stmt_iterator bsi;
- struct matrix_info *mi = *slot;
+ gimple_stmt_iterator gsi;
+ struct matrix_info *mi = (struct matrix_info *) *slot;
int min_escape_l = mi->min_indirect_level_escape;
struct access_site_info *acc_info;
+ enum tree_code code;
int i;
if (min_escape_l < 2 || !mi->access_l)
for (i = 0; VEC_iterate (access_site_info_p, mi->access_l, i, acc_info);
i++)
{
- tree orig, type;
-
/* This is possible because we collect the access sites before
we determine the final minimum indirection level. */
if (acc_info->level >= min_escape_l)
}
if (acc_info->is_alloc)
{
- if (acc_info->level >= 0 && bb_for_stmt (acc_info->stmt))
+ if (acc_info->level >= 0 && gimple_bb (acc_info->stmt))
{
ssa_op_iter iter;
tree def;
- tree stmt = acc_info->stmt;
+ gimple stmt = acc_info->stmt;
+ tree lhs;
FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
mark_sym_for_renaming (SSA_NAME_VAR (def));
- bsi = bsi_for_stmt (stmt);
- gcc_assert (TREE_CODE (acc_info->stmt) == GIMPLE_MODIFY_STMT);
- if (TREE_CODE (GIMPLE_STMT_OPERAND (acc_info->stmt, 0)) ==
- SSA_NAME && acc_info->level < min_escape_l - 1)
+ gsi = gsi_for_stmt (stmt);
+ gcc_assert (is_gimple_assign (acc_info->stmt));
+ lhs = gimple_assign_lhs (acc_info->stmt);
+ if (TREE_CODE (lhs) == SSA_NAME
+ && acc_info->level < min_escape_l - 1)
{
imm_use_iterator imm_iter;
use_operand_p use_p;
- tree use_stmt;
+ gimple use_stmt;
- FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter,
- GIMPLE_STMT_OPERAND (acc_info->stmt,
- 0))
+ FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs)
FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
{
- tree conv, tmp, stmts;
+ tree rhs, tmp;
+ gimple new_stmt;
+ gcc_assert (gimple_assign_rhs_code (acc_info->stmt)
+ == MEM_REF);
/* Emit convert statement to convert to type of use. */
- conv =
- fold_build1 (CONVERT_EXPR,
- TREE_TYPE (GIMPLE_STMT_OPERAND
- (acc_info->stmt, 0)),
- TREE_OPERAND (GIMPLE_STMT_OPERAND
- (acc_info->stmt, 1), 0));
- tmp =
- create_tmp_var (TREE_TYPE
- (GIMPLE_STMT_OPERAND
- (acc_info->stmt, 0)), "new");
+ tmp = create_tmp_var (TREE_TYPE (lhs), "new");
add_referenced_var (tmp);
- stmts =
- fold_build2 (GIMPLE_MODIFY_STMT,
- TREE_TYPE (GIMPLE_STMT_OPERAND
- (acc_info->stmt, 0)), tmp,
- conv);
- tmp = make_ssa_name (tmp, stmts);
- GIMPLE_STMT_OPERAND (stmts, 0) = tmp;
- bsi = bsi_for_stmt (acc_info->stmt);
- bsi_insert_after (&bsi, stmts, BSI_SAME_STMT);
+ rhs = gimple_assign_rhs1 (acc_info->stmt);
+ rhs = fold_convert (TREE_TYPE (tmp),
+ TREE_OPERAND (rhs, 0));
+ new_stmt = gimple_build_assign (tmp, rhs);
+ tmp = make_ssa_name (tmp, new_stmt);
+ gimple_assign_set_lhs (new_stmt, tmp);
+ gsi = gsi_for_stmt (acc_info->stmt);
+ gsi_insert_after (&gsi, new_stmt, GSI_SAME_STMT);
SET_USE (use_p, tmp);
}
}
if (acc_info->level < min_escape_l - 1)
- bsi_remove (&bsi, true);
+ gsi_remove (&gsi, true);
}
free (acc_info);
continue;
}
- orig = GIMPLE_STMT_OPERAND (acc_info->stmt, 1);
- type = TREE_TYPE (orig);
- if (TREE_CODE (orig) == INDIRECT_REF
+ code = gimple_assign_rhs_code (acc_info->stmt);
+ if (code == MEM_REF
&& acc_info->level < min_escape_l - 1)
{
- /* Replace the INDIRECT_REF with NOP (cast) usually we are casting
+ /* Replace the MEM_REF with NOP (cast) usually we are casting
from "pointer to type" to "type". */
- orig =
- build1 (NOP_EXPR, TREE_TYPE (orig),
- GIMPLE_STMT_OPERAND (orig, 0));
- GIMPLE_STMT_OPERAND (acc_info->stmt, 1) = orig;
+ tree t =
+ build1 (NOP_EXPR, TREE_TYPE (gimple_assign_rhs1 (acc_info->stmt)),
+ TREE_OPERAND (gimple_assign_rhs1 (acc_info->stmt), 0));
+ gimple_assign_set_rhs_code (acc_info->stmt, NOP_EXPR);
+ gimple_assign_set_rhs1 (acc_info->stmt, t);
}
- else if (TREE_CODE (orig) == PLUS_EXPR
+ else if (code == POINTER_PLUS_EXPR
&& acc_info->level < (min_escape_l))
{
imm_use_iterator imm_iter;
tmp1 = offset;
else
{
- int x, y;
- tree ratio;
tree new_offset;
- tree d_type_size, d_type_size_k;
- d_type_size =
- build_int_cst (type,
- mi->dimension_type_size[min_escape_l]);
- d_type_size_k =
- build_int_cst (type, mi->dimension_type_size[k + 1]);
- x = exact_log2 (mi->dimension_type_size[min_escape_l]);
- y = exact_log2 (mi->dimension_type_size[k + 1]);
+ new_offset =
+ compute_offset (mi->dimension_type_size[min_escape_l],
+ mi->dimension_type_size[k + 1], offset);
- if (x != -1 && y != -1)
- {
- if (x - y == 0)
- new_offset = offset;
- else
- {
- tree log = build_int_cst (type, x - y);
- new_offset =
- fold_build2 (LSHIFT_EXPR, TREE_TYPE (offset),
- offset, log);
- }
- }
- else
- {
- ratio =
- fold_build2 (TRUNC_DIV_EXPR, type, d_type_size,
- d_type_size_k);
- new_offset =
- fold_build2 (MULT_EXPR, type, offset, ratio);
- }
total_elements = new_offset;
if (new_offset != offset)
{
- tmp1 =
- force_gimple_operand (total_elements, &stmts, true,
- NULL);
- if (stmts)
- {
- tree_stmt_iterator tsi;
-
- for (tsi = tsi_start (stmts); !tsi_end_p (tsi);
- tsi_next (&tsi))
- mark_symbols_for_renaming (tsi_stmt (tsi));
- bsi = bsi_for_stmt (acc_info->stmt);
- bsi_insert_before (&bsi, stmts, BSI_SAME_STMT);
- }
+ gsi = gsi_for_stmt (acc_info->stmt);
+ tmp1 = force_gimple_operand_gsi (&gsi, total_elements,
+ true, NULL,
+ true, GSI_SAME_STMT);
}
else
tmp1 = offset;
{
d_size = mi->dimension_size[mi->dim_map[k] + 1];
num_elements =
- fold_build2 (MULT_EXPR, type, acc_info->index, d_size);
- tmp1 = force_gimple_operand (num_elements, &stmts, true, NULL);
+ fold_build2 (MULT_EXPR, sizetype, fold_convert (sizetype, acc_info->index),
+ fold_convert (sizetype, d_size));
add_referenced_var (d_size);
- if (stmts)
- {
- tree_stmt_iterator tsi;
-
- for (tsi = tsi_start (stmts); !tsi_end_p (tsi);
- tsi_next (&tsi))
- mark_symbols_for_renaming (tsi_stmt (tsi));
- bsi = bsi_for_stmt (acc_info->stmt);
- bsi_insert_before (&bsi, stmts, BSI_SAME_STMT);
- }
+ gsi = gsi_for_stmt (acc_info->stmt);
+ tmp1 = force_gimple_operand_gsi (&gsi, num_elements, true,
+ NULL, true, GSI_SAME_STMT);
}
/* Replace the offset if needed. */
if (tmp1 != offset)
{
if (TREE_CODE (offset) == SSA_NAME)
{
- tree use_stmt;
+ gimple use_stmt;
FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, offset)
FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
- if (use_stmt == acc_info->stmt)
- SET_USE (use_p, tmp1);
+ if (use_stmt == acc_info->stmt)
+ SET_USE (use_p, tmp1);
}
else
{
gcc_assert (TREE_CODE (offset) == INTEGER_CST);
- TREE_OPERAND (orig, 1) = tmp1;
+ gimple_assign_set_rhs2 (acc_info->stmt, tmp1);
+ update_stmt (acc_info->stmt);
}
}
}
}
}
-
/* Replace multiple mallocs (one for each dimension) to one malloc
with the size of DIM1*DIM2*...*DIMN*size_of_element
Make sure that we hold the size in the malloc site inside a
new global variable; this way we ensure that the size doesn't
change and it is accessible from all the other functions that
- uses the matrix. Also, the original calls to free are deleted,
+ uses the matrix. Also, the original calls to free are deleted,
and replaced by a new call to free the flattened matrix. */
static int
{
int i;
struct matrix_info *mi;
- tree type, call_stmt_0, malloc_stmt, oldfn, stmts, prev_dim_size, use_stmt;
+ tree type, oldfn, prev_dim_size;
+ gimple call_stmt_0, use_stmt;
struct cgraph_node *c_node;
struct cgraph_edge *e;
- block_stmt_iterator bsi;
- struct malloc_call_data mcd;
+ gimple_stmt_iterator gsi;
+ struct malloc_call_data mcd = {NULL, NULL_TREE, NULL_TREE};
HOST_WIDE_INT element_size;
imm_use_iterator imm_iter;
int min_escape_l;
int id;
- mi = *slot;
+ mi = (struct matrix_info *) *slot;
min_escape_l = mi->min_indirect_level_escape;
for (i = 1; i < mi->min_indirect_level_escape; i++)
{
tree t;
+ check_var_data data;
/* mi->dimension_size must contain the expression of the size calculated
in check_allocation_function. */
gcc_assert (mi->dimension_size[i]);
+ data.fn = mi->allocation_function_decl;
+ data.stmt = NULL;
t = walk_tree_without_duplicates (&(mi->dimension_size[i]),
check_var_notmodified_p,
- mi->allocation_function_decl);
+ &data);
if (t != NULL_TREE)
{
- mark_min_matrix_escape_level (mi, i, t);
+ mark_min_matrix_escape_level (mi, i, data.stmt);
break;
}
}
/* Since we should make sure that the size expression is available
before the call to malloc of level 0. */
- bsi = bsi_for_stmt (call_stmt_0);
+ gsi = gsi_for_stmt (call_stmt_0);
/* Find out the size of each dimension by looking at the malloc
sites and create a global variable to hold it.
/* To be able to produce gimple temporaries. */
oldfn = current_function_decl;
current_function_decl = mi->allocation_function_decl;
- cfun = DECL_STRUCT_FUNCTION (mi->allocation_function_decl);
+ push_cfun (DECL_STRUCT_FUNCTION (mi->allocation_function_decl));
/* Set the dimension sizes as follows:
DIM_SIZE[i] = DIM_SIZE[n] * ... * DIM_SIZE[i]
for (i = mi->min_indirect_level_escape - 1; i >= 0; i--)
{
- tree dim_size, dim_var, tmp;
+ tree dim_size, dim_var;
+ gimple stmt;
tree d_type_size;
- tree_stmt_iterator tsi;
/* Now put the size expression in a global variable and initialize it to
the size expression before the malloc of level 0. */
add_new_static_var (TREE_TYPE
(mi->dimension_size_orig[mi->dim_map[i]]));
type = TREE_TYPE (mi->dimension_size_orig[mi->dim_map[i]]);
- d_type_size =
- build_int_cst (type, mi->dimension_type_size[mi->dim_map[i] + 1]);
/* DIM_SIZE = MALLOC_SIZE_PARAM / TYPE_SIZE. */
/* Find which dim ID becomes dim I. */
for (id = 0; id < mi->min_indirect_level_escape; id++)
if (mi->dim_map[id] == i)
break;
+ d_type_size =
+ build_int_cst (type, mi->dimension_type_size[id + 1]);
if (!prev_dim_size)
prev_dim_size = build_int_cst (type, element_size);
if (!check_transpose_p && i == mi->min_indirect_level_escape - 1)
dim_size = fold_build2 (MULT_EXPR, type, dim_size, prev_dim_size);
}
- dim_size = force_gimple_operand (dim_size, &stmts, true, NULL);
- if (stmts)
- {
- for (tsi = tsi_start (stmts); !tsi_end_p (tsi); tsi_next (&tsi))
- mark_symbols_for_renaming (tsi_stmt (tsi));
- bsi_insert_before (&bsi, stmts, BSI_SAME_STMT);
- bsi = bsi_for_stmt (call_stmt_0);
- }
+ dim_size = force_gimple_operand_gsi (&gsi, dim_size, true, NULL,
+ true, GSI_SAME_STMT);
/* GLOBAL_HOLDING_THE_SIZE = DIM_SIZE. */
- tmp = fold_build2 (GIMPLE_MODIFY_STMT, type, dim_var, dim_size);
- GIMPLE_STMT_OPERAND (tmp, 0) = dim_var;
- mark_symbols_for_renaming (tmp);
- bsi_insert_before (&bsi, tmp, BSI_NEW_STMT);
- bsi = bsi_for_stmt (call_stmt_0);
+ stmt = gimple_build_assign (dim_var, dim_size);
+ mark_symbols_for_renaming (stmt);
+ gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
prev_dim_size = mi->dimension_size[i] = dim_var;
}
update_ssa (TODO_update_ssa);
/* Replace the malloc size argument in the malloc of level 0 to be
the size of all the dimensions. */
- malloc_stmt = GIMPLE_STMT_OPERAND (call_stmt_0, 1);
- c_node = cgraph_node (mi->allocation_function_decl);
- old_size_0 = CALL_EXPR_ARG (malloc_stmt, 0);
- bsi = bsi_for_stmt (call_stmt_0);
- tmp = force_gimple_operand (mi->dimension_size[0], &stmts, true, NULL);
- if (stmts)
- {
- tree_stmt_iterator tsi;
-
- for (tsi = tsi_start (stmts); !tsi_end_p (tsi); tsi_next (&tsi))
- mark_symbols_for_renaming (tsi_stmt (tsi));
- bsi_insert_before (&bsi, stmts, BSI_SAME_STMT);
- bsi = bsi_for_stmt (call_stmt_0);
- }
+ c_node = cgraph_get_node (mi->allocation_function_decl);
+ gcc_checking_assert (c_node);
+ old_size_0 = gimple_call_arg (call_stmt_0, 0);
+ tmp = force_gimple_operand_gsi (&gsi, mi->dimension_size[0], true,
+ NULL, true, GSI_SAME_STMT);
if (TREE_CODE (old_size_0) == SSA_NAME)
{
FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, old_size_0)
check this outside of "cgraph.c". */
for (i = 1; i < mi->min_indirect_level_escape; i++)
{
- block_stmt_iterator bsi;
- tree use_stmt1 = NULL;
- tree call;
+ gimple_stmt_iterator gsi;
- tree call_stmt = mi->malloc_for_level[i];
- call = GIMPLE_STMT_OPERAND (call_stmt, 1);
- gcc_assert (TREE_CODE (call) == CALL_EXPR);
+ gimple call_stmt = mi->malloc_for_level[i];
+ gcc_assert (is_gimple_call (call_stmt));
e = cgraph_edge (c_node, call_stmt);
gcc_assert (e);
cgraph_remove_edge (e);
- bsi = bsi_for_stmt (call_stmt);
+ gsi = gsi_for_stmt (call_stmt);
/* Remove the call stmt. */
- bsi_remove (&bsi, true);
- /* remove the type cast stmt. */
- FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter,
- GIMPLE_STMT_OPERAND (call_stmt, 0))
- {
- use_stmt1 = use_stmt;
- bsi = bsi_for_stmt (use_stmt);
- bsi_remove (&bsi, true);
- }
+ gsi_remove (&gsi, true);
/* Remove the assignment of the allocated area. */
FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter,
- GIMPLE_STMT_OPERAND (use_stmt1, 0))
+ gimple_call_lhs (call_stmt))
{
- bsi = bsi_for_stmt (use_stmt);
- bsi_remove (&bsi, true);
+ gsi = gsi_for_stmt (use_stmt);
+ gsi_remove (&gsi, true);
}
}
update_ssa (TODO_update_ssa);
/* Delete the calls to free. */
for (i = 1; i < mi->min_indirect_level_escape; i++)
{
- block_stmt_iterator bsi;
- tree call;
+ gimple_stmt_iterator gsi;
/* ??? wonder why this case is possible but we failed on it once. */
if (!mi->free_stmts[i].stmt)
continue;
- call = TREE_OPERAND (mi->free_stmts[i].stmt, 1);
- c_node = cgraph_node (mi->free_stmts[i].func);
-
- gcc_assert (TREE_CODE (mi->free_stmts[i].stmt) == CALL_EXPR);
+ c_node = cgraph_get_node (mi->free_stmts[i].func);
+ gcc_checking_assert (c_node);
+ gcc_assert (is_gimple_call (mi->free_stmts[i].stmt));
e = cgraph_edge (c_node, mi->free_stmts[i].stmt);
gcc_assert (e);
cgraph_remove_edge (e);
current_function_decl = mi->free_stmts[i].func;
- cfun = DECL_STRUCT_FUNCTION (mi->free_stmts[i].func);
- bsi = bsi_for_stmt (mi->free_stmts[i].stmt);
- bsi_remove (&bsi, true);
+ set_cfun (DECL_STRUCT_FUNCTION (mi->free_stmts[i].func));
+ gsi = gsi_for_stmt (mi->free_stmts[i].stmt);
+ gsi_remove (&gsi, true);
}
/* Return to the previous situation. */
current_function_decl = oldfn;
- cfun = oldfn ? DECL_STRUCT_FUNCTION (oldfn) : NULL;
+ pop_cfun ();
return 1;
}
static int
dump_matrix_reorg_analysis (void **slot, void *data ATTRIBUTE_UNUSED)
{
- struct matrix_info *mi = *slot;
+ struct matrix_info *mi = (struct matrix_info *) *slot;
if (!dump_file)
return 1;
return 1;
}
-
/* Perform matrix flattening. */
static unsigned int
current_function_decl = node->decl;
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
bitmap_obstack_initialize (NULL);
- tree_register_cfg_hooks ();
+ gimple_register_cfg_hooks ();
if (!gimple_in_ssa_p (cfun))
{
free_dominance_info (CDI_POST_DOMINATORS);
pop_cfun ();
current_function_decl = temp_fn;
+ bitmap_obstack_release (NULL);
return 0;
}
free_dominance_info (CDI_POST_DOMINATORS);
pop_cfun ();
current_function_decl = temp_fn;
+ bitmap_obstack_release (NULL);
return 0;
}
free_dominance_info (CDI_POST_DOMINATORS);
pop_cfun ();
current_function_decl = temp_fn;
+ bitmap_obstack_release (NULL);
}
htab_traverse (matrices_to_reorg, transform_allocation_sites, NULL);
/* Now transform the accesses. */
current_function_decl = node->decl;
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
bitmap_obstack_initialize (NULL);
- tree_register_cfg_hooks ();
+ gimple_register_cfg_hooks ();
record_all_accesses_in_func ();
htab_traverse (matrices_to_reorg, transform_access_sites, NULL);
+ cgraph_rebuild_references ();
free_dominance_info (CDI_DOMINATORS);
free_dominance_info (CDI_POST_DOMINATORS);
pop_cfun ();
current_function_decl = temp_fn;
+ bitmap_obstack_release (NULL);
}
htab_traverse (matrices_to_reorg, dump_matrix_reorg_analysis, NULL);
current_function_decl = NULL;
- cfun = NULL;
+ set_cfun (NULL);
matrices_to_reorg = NULL;
return 0;
}
static bool
gate_matrix_reorg (void)
{
- return flag_ipa_matrix_reorg /*&& flag_whole_program */ ;
+ return flag_ipa_matrix_reorg && flag_whole_program;
}
-struct tree_opt_pass pass_ipa_matrix_reorg = {
+struct simple_ipa_opt_pass pass_ipa_matrix_reorg =
+{
+ {
+ SIMPLE_IPA_PASS,
"matrix-reorg", /* name */
gate_matrix_reorg, /* gate */
matrix_reorg, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
- 0, /* tv_id */
+ TV_NONE, /* tv_id */
0, /* properties_required */
- PROP_trees, /* properties_provided */
+ 0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_cgraph | TODO_dump_func, /* todo_flags_finish */
- 0 /* letter */
+ TODO_dump_cgraph /* todo_flags_finish */
+ }
};