1 /* Matrix layout transformations.
2 Copyright (C) 2006, 2007 Free Software Foundation, Inc.
3 Contributed by Razya Ladelsky <razya@il.ibm.com>
4 Originally written by Revital Eres and Mustafa Hagog.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
24 Matrix flattening optimization tries to replace a N-dimensional
25 matrix with its equivalent M-dimensional matrix, where M < N.
26 This first implementation focuses on global matrices defined dynamically.
28 When N==1, we actually flatten the whole matrix.
29 For instance consider a two-dimensional array a [dim1] [dim2].
30 The code for allocating space for it usually looks like:
32 a = (int **) malloc(dim1 * sizeof(int *));
33 for (i=0; i<dim1; i++)
34 a[i] = (int *) malloc (dim2 * sizeof(int));
36 If the array "a" is found suitable for this optimization,
37 its allocation is replaced by:
39 a = (int *) malloc (dim1 * dim2 *sizeof(int));
41 and all the references to a[i][j] are replaced by a[i * dim2 + j].
43 The two main phases of the optimization are the analysis
45 The driver of the optimization is matrix_reorg ().
52 We'll number the dimensions outside-in, meaning the most external
53 is 0, then 1, and so on.
54 The analysis part of the optimization determines K, the escape
55 level of a N-dimensional matrix (K <= N), that allows flattening of
56 the external dimensions 0,1,..., K-1. Escape level 0 means that the
57 whole matrix escapes and no flattening is possible.
59 The analysis part is implemented in analyze_matrix_allocation_site()
60 and analyze_matrix_accesses().
64 In this phase we define the new flattened matrices that replace the
65 original matrices in the code.
66 Implemented in transform_allocation_sites(),
67 transform_access_sites().
71 The idea of Matrix Transposing is organizing the matrix in a different
72 layout such that the dimensions are reordered.
73 This could produce better cache behavior in some cases.
75 For example, lets look at the matrix accesses in the following loop:
81 This loop can produce good cache behavior because the elements of
82 the inner dimension are accessed sequentially.
84 However, if the accesses of the matrix were of the following form:
90 In this loop we iterate the columns and not the rows.
91 Therefore, replacing the rows and columns
92 would have had an organization with better (cache) locality.
93 Replacing the dimensions of the matrix is called matrix transposing.
95 This example, of course, could be enhanced to multiple dimensions matrices
98 Since a program could include all kind of accesses, there is a decision
99 mechanism, implemented in analyze_transpose(), which implements a
100 heuristic that tries to determine whether to transpose the matrix or not,
101 according to the form of the more dominant accesses.
102 This decision is transferred to the flattening mechanism, and whether
103 the matrix was transposed or not, the matrix is flattened (if possible).
105 This decision making is based on profiling information and loop information.
106 If profiling information is available, decision making mechanism will be
107 operated, otherwise the matrix will only be flattened (if possible).
109 Both optimizations are described in the paper "Matrix flattening and
110 transposing in GCC" which was presented in GCC summit 2006.
111 http://www.gccsummit.org/2006/2006-GCC-Summit-Proceedings.pdf
117 #include "coretypes.h"
122 #include "tree-inline.h"
123 #include "tree-flow.h"
124 #include "tree-flow-inline.h"
125 #include "langhooks.h"
133 #include "diagnostic.h"
137 #include "c-common.h"
139 #include "function.h"
140 #include "basic-block.h"
142 #include "tree-iterator.h"
143 #include "tree-pass.h"
145 #include "tree-data-ref.h"
146 #include "tree-chrec.h"
147 #include "tree-scalar-evolution.h"
150 We need to collect a lot of data from the original malloc,
151 particularly as the gimplifier has converted:
153 orig_var = (struct_type *) malloc (x * sizeof (struct_type *));
157 T3 = <constant> ; ** <constant> is amount to malloc; precomputed **
159 T5 = (struct_type *) T4;
162 The following struct fields allow us to collect all the necessary data from
163 the gimplified program. The comments in the struct below are all based
164 on the gimple example above. */
166 struct malloc_call_data
168 tree call_stmt; /* Tree for "T4 = malloc (T3);" */
169 tree size_var; /* Var decl for T3. */
170 tree malloc_size; /* Tree for "<constant>", the rhs assigned to T3. */
173 /* The front end of the compiler, when parsing statements of the form:
175 var = (type_cast) malloc (sizeof (type));
177 always converts this single statement into the following statements
182 T.3 = (type_cast) T.2;
185 Since we need to create new malloc statements and modify the original
186 statements somewhat, we need to find all four of the above statements.
187 Currently record_call_1 (called for building cgraph edges) finds and
188 records the statements containing the actual call to malloc, but we
189 need to find the rest of the variables/statements on our own. That
190 is what the following function does. */
192 collect_data_for_malloc_call (tree stmt, struct malloc_call_data *m_data)
194 tree size_var = NULL;
199 gcc_assert (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT);
201 tmp = get_call_expr_in (stmt);
202 malloc_fn_decl = CALL_EXPR_FN (tmp);
203 if (TREE_CODE (malloc_fn_decl) != ADDR_EXPR
204 || TREE_CODE (TREE_OPERAND (malloc_fn_decl, 0)) != FUNCTION_DECL
205 || DECL_FUNCTION_CODE (TREE_OPERAND (malloc_fn_decl, 0)) !=
209 arg1 = CALL_EXPR_ARG (tmp, 0);
212 m_data->call_stmt = stmt;
213 m_data->size_var = size_var;
214 if (TREE_CODE (size_var) != VAR_DECL)
215 m_data->malloc_size = size_var;
217 m_data->malloc_size = NULL_TREE;
220 /* Information about matrix access site.
221 For example: if an access site of matrix arr is arr[i][j]
222 the ACCESS_SITE_INFO structure will have the address
223 of arr as its stmt. The INDEX_INFO will hold information about the
224 initial address and index of each dimension. */
225 struct access_site_info
227 /* The statement (INDIRECT_REF or PLUS_EXPR). */
230 /* In case of PLUS_EXPR, what is the offset. */
233 /* The index which created the offset. */
236 /* The indirection level of this statement. */
239 /* TRUE for allocation site FALSE for access site. */
242 /* The function containing the access site. */
245 /* This access is iterated in the inner most loop */
246 bool iterated_by_inner_most_loop_p;
249 typedef struct access_site_info *access_site_info_p;
250 DEF_VEC_P (access_site_info_p);
251 DEF_VEC_ALLOC_P (access_site_info_p, heap);
253 /* Information about matrix to flatten. */
256 /* Decl tree of this matrix. */
258 /* Number of dimensions; number
259 of "*" in the type declaration. */
262 /* Minimum indirection level that escapes, 0 means that
263 the whole matrix escapes, k means that dimensions
264 0 to ACTUAL_DIM - k escapes. */
265 int min_indirect_level_escape;
267 tree min_indirect_level_escape_stmt;
269 /* Is the matrix transposed. */
270 bool is_transposed_p;
272 /* Hold the allocation site for each level (dimension).
273 We can use NUM_DIMS as the upper bound and allocate the array
274 once with this number of elements and no need to use realloc and
275 MAX_MALLOCED_LEVEL. */
276 tree *malloc_for_level;
278 int max_malloced_level;
280 /* The location of the allocation sites (they must be in one
282 tree allocation_function_decl;
284 /* The calls to free for each level of indirection. */
291 /* An array which holds for each dimension its size. where
292 dimension 0 is the outer most (one that contains all the others).
294 tree *dimension_size;
296 /* An array which holds for each dimension it's original size
297 (before transposing and flattening take place). */
298 tree *dimension_size_orig;
300 /* An array which holds for each dimension the size of the type of
301 of elements accessed in that level (in bytes). */
302 HOST_WIDE_INT *dimension_type_size;
304 int dimension_type_size_len;
306 /* An array collecting the count of accesses for each dimension. */
307 gcov_type *dim_hot_level;
309 /* An array of the accesses to be flattened.
310 elements are of type "struct access_site_info *". */
311 VEC (access_site_info_p, heap) * access_l;
313 /* A map of how the dimensions will be organized at the end of
318 /* In each phi node we want to record the indirection level we have when we
319 get to the phi node. Usually we will have phi nodes with more than two
320 arguments, then we must assure that all of them get to the phi node with
321 the same indirection level, otherwise it's not safe to do the flattening.
322 So we record the information regarding the indirection level each time we
323 get to the phi node in this hash table. */
325 struct matrix_access_phi_node
328 int indirection_level;
331 /* We use this structure to find if the SSA variable is accessed inside the
332 tree and record the tree containing it. */
334 struct ssa_acc_in_tree
336 /* The variable whose accesses in the tree we are looking for. */
338 /* The tree and code inside it the ssa_var is accessed, currently
339 it could be an INDIRECT_REF or CALL_EXPR. */
340 enum tree_code t_code;
342 /* The place in the containing tree. */
348 static void analyze_matrix_accesses (struct matrix_info *, tree, int, bool,
350 static int transform_allocation_sites (void **, void *);
351 static int transform_access_sites (void **, void *);
352 static int analyze_transpose (void **, void *);
353 static int dump_matrix_reorg_analysis (void **, void *);
355 static bool check_transpose_p;
357 /* Hash function used for the phi nodes. */
360 mat_acc_phi_hash (const void *p)
362 const struct matrix_access_phi_node *ma_phi = p;
364 return htab_hash_pointer (ma_phi->phi);
367 /* Equality means phi node pointers are the same. */
370 mat_acc_phi_eq (const void *p1, const void *p2)
372 const struct matrix_access_phi_node *phi1 = p1;
373 const struct matrix_access_phi_node *phi2 = p2;
375 if (phi1->phi == phi2->phi)
381 /* Hold the PHI nodes we visit during the traversal for escaping
383 static htab_t htab_mat_acc_phi_nodes = NULL;
385 /* This hash-table holds the information about the matrices we are
387 static htab_t matrices_to_reorg = NULL;
389 /* Return a hash for MTT, which is really a "matrix_info *". */
391 mtt_info_hash (const void *mtt)
393 return htab_hash_pointer (((struct matrix_info *) mtt)->decl);
396 /* Return true if MTT1 and MTT2 (which are really both of type
397 "matrix_info *") refer to the same decl. */
399 mtt_info_eq (const void *mtt1, const void *mtt2)
401 const struct matrix_info *i1 = mtt1;
402 const struct matrix_info *i2 = mtt2;
404 if (i1->decl == i2->decl)
410 /* Return the inner most tree that is not a cast. */
412 get_inner_of_cast_expr (tree t)
414 while (TREE_CODE (t) == CONVERT_EXPR || TREE_CODE (t) == NOP_EXPR
415 || TREE_CODE (t) == VIEW_CONVERT_EXPR)
416 t = TREE_OPERAND (t, 0);
421 /* Return false if STMT may contain a vector expression.
422 In this situation, all matrices should not be flattened. */
424 may_flatten_matrices_1 (tree stmt)
428 switch (TREE_CODE (stmt))
430 case GIMPLE_MODIFY_STMT:
431 t = GIMPLE_STMT_OPERAND (stmt, 1);
432 while (TREE_CODE (t) == CONVERT_EXPR || TREE_CODE (t) == NOP_EXPR)
434 if (TREE_TYPE (t) && POINTER_TYPE_P (TREE_TYPE (t)))
438 pointee = TREE_TYPE (t);
439 while (POINTER_TYPE_P (pointee))
440 pointee = TREE_TYPE (pointee);
441 if (TREE_CODE (pointee) == VECTOR_TYPE)
445 "Found vector type, don't flatten matrix\n");
449 t = TREE_OPERAND (t, 0);
453 /* Asm code could contain vector operations. */
462 /* Return false if there are hand-written vectors in the program.
463 We disable the flattening in such a case. */
465 may_flatten_matrices (struct cgraph_node *node)
468 struct function *func;
470 block_stmt_iterator bsi;
475 func = DECL_STRUCT_FUNCTION (decl);
476 FOR_EACH_BB_FN (bb, func)
477 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
478 if (!may_flatten_matrices_1 (bsi_stmt (bsi)))
484 /* Given a VAR_DECL, check its type to determine whether it is
485 a definition of a dynamic allocated matrix and therefore is
486 a suitable candidate for the matrix flattening optimization.
487 Return NULL if VAR_DECL is not such decl. Otherwise, allocate
488 a MATRIX_INFO structure, fill it with the relevant information
489 and return a pointer to it.
490 TODO: handle also statically defined arrays. */
491 static struct matrix_info *
492 analyze_matrix_decl (tree var_decl)
494 struct matrix_info *m_node, tmpmi, *mi;
498 gcc_assert (matrices_to_reorg);
500 if (TREE_CODE (var_decl) == PARM_DECL)
501 var_type = DECL_ARG_TYPE (var_decl);
502 else if (TREE_CODE (var_decl) == VAR_DECL)
503 var_type = TREE_TYPE (var_decl);
507 if (!POINTER_TYPE_P (var_type))
510 while (POINTER_TYPE_P (var_type))
512 var_type = TREE_TYPE (var_type);
519 if (!COMPLETE_TYPE_P (var_type)
520 || TREE_CODE (TYPE_SIZE_UNIT (var_type)) != INTEGER_CST)
523 /* Check to see if this pointer is already in there. */
524 tmpmi.decl = var_decl;
525 mi = htab_find (matrices_to_reorg, &tmpmi);
530 /* Record the matrix. */
532 m_node = (struct matrix_info *) xcalloc (1, sizeof (struct matrix_info));
533 m_node->decl = var_decl;
534 m_node->num_dims = dim_num;
536 = (struct free_info *) xcalloc (dim_num, sizeof (struct free_info));
538 /* Init min_indirect_level_escape to -1 to indicate that no escape
539 analysis has been done yet. */
540 m_node->min_indirect_level_escape = -1;
541 m_node->is_transposed_p = false;
550 struct matrix_info *mat = (struct matrix_info *) e;
556 free (mat->free_stmts);
557 if (mat->dim_hot_level)
558 free (mat->dim_hot_level);
559 if (mat->malloc_for_level)
560 free (mat->malloc_for_level);
563 /* Find all potential matrices.
564 TODO: currently we handle only multidimensional
565 dynamically allocated arrays. */
567 find_matrices_decl (void)
569 struct matrix_info *tmp;
571 struct varpool_node *vnode;
573 gcc_assert (matrices_to_reorg);
575 /* For every global variable in the program:
576 Check to see if it's of a candidate type and record it. */
577 for (vnode = varpool_nodes_queue; vnode; vnode = vnode->next_needed)
579 tree var_decl = vnode->decl;
581 if (!var_decl || TREE_CODE (var_decl) != VAR_DECL)
584 if (matrices_to_reorg)
585 if ((tmp = analyze_matrix_decl (var_decl)))
587 if (!TREE_ADDRESSABLE (var_decl))
589 slot = htab_find_slot (matrices_to_reorg, tmp, INSERT);
597 /* Mark that the matrix MI escapes at level L. */
599 mark_min_matrix_escape_level (struct matrix_info *mi, int l, tree s)
601 if (mi->min_indirect_level_escape == -1
602 || (mi->min_indirect_level_escape > l))
604 mi->min_indirect_level_escape = l;
605 mi->min_indirect_level_escape_stmt = s;
609 /* Find if the SSA variable is accessed inside the
610 tree and record the tree containing it.
611 The only relevant uses are the case of SSA_NAME, or SSA inside
612 INDIRECT_REF, CALL_EXPR, PLUS_EXPR, MULT_EXPR. */
614 ssa_accessed_in_tree (tree t, struct ssa_acc_in_tree *a)
618 call_expr_arg_iterator iter;
620 a->t_code = TREE_CODE (t);
630 if (SSA_VAR_P (TREE_OPERAND (t, 0))
631 && TREE_OPERAND (t, 0) == a->ssa_var)
635 FOR_EACH_CALL_EXPR_ARG (arg, iter, t)
637 if (arg == a->ssa_var)
640 call = get_call_expr_in (t);
641 if (call && (decl = get_callee_fndecl (call)))
649 op1 = TREE_OPERAND (t, 0);
650 op2 = TREE_OPERAND (t, 1);
652 if (op1 == a->ssa_var)
657 else if (op2 == a->ssa_var)
668 /* Record the access/allocation site information for matrix MI so we can
669 handle it later in transformation. */
671 record_access_alloc_site_info (struct matrix_info *mi, tree stmt, tree offset,
672 tree index, int level, bool is_alloc)
674 struct access_site_info *acc_info;
677 mi->access_l = VEC_alloc (access_site_info_p, heap, 100);
680 = (struct access_site_info *)
681 xcalloc (1, sizeof (struct access_site_info));
682 acc_info->stmt = stmt;
683 acc_info->offset = offset;
684 acc_info->index = index;
685 acc_info->function_decl = current_function_decl;
686 acc_info->level = level;
687 acc_info->is_alloc = is_alloc;
689 VEC_safe_push (access_site_info_p, heap, mi->access_l, acc_info);
693 /* Record the malloc as the allocation site of the given LEVEL. But
694 first we Make sure that all the size parameters passed to malloc in
695 all the allocation sites could be pre-calculated before the call to
696 the malloc of level 0 (the main malloc call). */
698 add_allocation_site (struct matrix_info *mi, tree stmt, int level)
700 struct malloc_call_data mcd;
702 /* Make sure that the allocation sites are in the same function. */
703 if (!mi->allocation_function_decl)
704 mi->allocation_function_decl = current_function_decl;
705 else if (mi->allocation_function_decl != current_function_decl)
707 int min_malloc_level;
709 gcc_assert (mi->malloc_for_level);
711 /* Find the minimum malloc level that already has been seen;
712 we known its allocation function must be
713 MI->allocation_function_decl since it's different than
714 CURRENT_FUNCTION_DECL then the escaping level should be
715 MIN (LEVEL, MIN_MALLOC_LEVEL) - 1 , and the allocation function
716 must be set accordingly. */
717 for (min_malloc_level = 0;
718 min_malloc_level < mi->max_malloced_level
719 && mi->malloc_for_level[min_malloc_level]; min_malloc_level++);
720 if (level < min_malloc_level)
722 mi->allocation_function_decl = current_function_decl;
723 mark_min_matrix_escape_level (mi, min_malloc_level, stmt);
727 mark_min_matrix_escape_level (mi, level, stmt);
728 /* cannot be that (level == min_malloc_level)
729 we would have returned earlier. */
734 /* Find the correct malloc information. */
735 collect_data_for_malloc_call (stmt, &mcd);
737 /* We accept only calls to malloc function; we do not accept
738 calls like calloc and realloc. */
739 if (!mi->malloc_for_level)
741 mi->malloc_for_level = xcalloc (level + 1, sizeof (tree));
742 mi->max_malloced_level = level + 1;
744 else if (mi->max_malloced_level <= level)
747 = xrealloc (mi->malloc_for_level, (level + 1) * sizeof (tree));
749 /* Zero the newly allocated items. */
750 memset (&(mi->malloc_for_level[mi->max_malloced_level + 1]),
751 0, (level - mi->max_malloced_level) * sizeof (tree));
753 mi->max_malloced_level = level + 1;
755 mi->malloc_for_level[level] = stmt;
758 /* Given an assignment statement STMT that we know that its
759 left-hand-side is the matrix MI variable, we traverse the immediate
760 uses backwards until we get to a malloc site. We make sure that
761 there is one and only one malloc site that sets this variable. When
762 we are performing the flattening we generate a new variable that
763 will hold the size for each dimension; each malloc that allocates a
764 dimension has the size parameter; we use that parameter to
765 initialize the dimension size variable so we can use it later in
766 the address calculations. LEVEL is the dimension we're inspecting.
767 Return if STMT is related to an allocation site. */
770 analyze_matrix_allocation_site (struct matrix_info *mi, tree stmt,
771 int level, sbitmap visited)
773 if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT)
775 tree rhs = GIMPLE_STMT_OPERAND (stmt, 1);
777 rhs = get_inner_of_cast_expr (rhs);
778 if (TREE_CODE (rhs) == SSA_NAME)
780 tree def = SSA_NAME_DEF_STMT (rhs);
782 analyze_matrix_allocation_site (mi, def, level, visited);
786 /* A result of call to malloc. */
787 else if (TREE_CODE (rhs) == CALL_EXPR)
789 int call_flags = call_expr_flags (rhs);
791 if (!(call_flags & ECF_MALLOC))
793 mark_min_matrix_escape_level (mi, level, stmt);
799 const char *malloc_fname;
801 malloc_fn_decl = CALL_EXPR_FN (rhs);
802 if (TREE_CODE (malloc_fn_decl) != ADDR_EXPR
803 || TREE_CODE (TREE_OPERAND (malloc_fn_decl, 0)) !=
806 mark_min_matrix_escape_level (mi, level, stmt);
809 malloc_fn_decl = TREE_OPERAND (malloc_fn_decl, 0);
810 malloc_fname = IDENTIFIER_POINTER (DECL_NAME (malloc_fn_decl));
811 if (DECL_FUNCTION_CODE (malloc_fn_decl) != BUILT_IN_MALLOC)
815 "Matrix %s is an argument to function %s\n",
816 get_name (mi->decl), get_name (malloc_fn_decl));
817 mark_min_matrix_escape_level (mi, level, stmt);
821 /* This is a call to malloc. Check to see if this is the first
822 call in this indirection level; if so, mark it; if not, mark
824 if (mi->malloc_for_level
825 && mi->malloc_for_level[level]
826 && mi->malloc_for_level[level] != stmt)
828 mark_min_matrix_escape_level (mi, level, stmt);
832 add_allocation_site (mi, stmt, level);
835 /* If we are back to the original matrix variable then we
836 are sure that this is analyzed as an access site. */
837 else if (rhs == mi->decl)
840 /* Looks like we don't know what is happening in this
841 statement so be in the safe side and mark it as escaping. */
842 mark_min_matrix_escape_level (mi, level, stmt);
845 /* The transposing decision making.
846 In order to to calculate the profitability of transposing, we collect two
847 types of information regarding the accesses:
848 1. profiling information used to express the hotness of an access, that
849 is how often the matrix is accessed by this access site (count of the
851 2. which dimension in the access site is iterated by the inner
852 most loop containing this access.
854 The matrix will have a calculated value of weighted hotness for each
856 Intuitively the hotness level of a dimension is a function of how
857 many times it was the most frequently accessed dimension in the
858 highly executed access sites of this matrix.
860 As computed by following equation:
863 \ \ dim_hot_level[i] +=
866 acc[j]->dim[i]->iter_by_inner_loop * count(j)
868 Where n is the number of dims and m is the number of the matrix
869 access sites. acc[j]->dim[i]->iter_by_inner_loop is 1 if acc[j]
870 iterates over dim[i] in innermost loop, and is 0 otherwise.
872 The organization of the new matrix should be according to the
873 hotness of each dimension. The hotness of the dimension implies
874 the locality of the elements.*/
876 analyze_transpose (void **slot, void *data ATTRIBUTE_UNUSED)
878 struct matrix_info *mi = *slot;
879 int min_escape_l = mi->min_indirect_level_escape;
882 struct access_site_info *acc_info;
885 if (min_escape_l < 2 || !mi->access_l)
890 VEC_iterate (access_site_info_p, mi->access_l, i, acc_info);
893 VEC_free (access_site_info_p, heap, mi->access_l);
898 if (!mi->dim_hot_level)
900 (gcov_type *) xcalloc (min_escape_l, sizeof (gcov_type));
903 for (i = 0; VEC_iterate (access_site_info_p, mi->access_l, i, acc_info);
906 if (TREE_CODE (GIMPLE_STMT_OPERAND (acc_info->stmt, 1)) == PLUS_EXPR
907 && acc_info->level < min_escape_l)
909 loop = loop_containing_stmt (acc_info->stmt);
910 if (!loop || loop->inner)
915 if (simple_iv (loop, acc_info->stmt, acc_info->offset, &iv, true))
921 istep = int_cst_value (iv.step);
924 acc_info->iterated_by_inner_most_loop_p = 1;
925 mi->dim_hot_level[acc_info->level] +=
926 bb_for_stmt (acc_info->stmt)->count;
934 VEC_free (access_site_info_p, heap, mi->access_l);
939 /* Find the index which defines the OFFSET from base.
940 We walk from use to def until we find how the offset was defined. */
942 get_index_from_offset (tree offset, tree def_stmt)
944 tree op1, op2, expr, index;
946 if (TREE_CODE (def_stmt) == PHI_NODE)
948 expr = get_inner_of_cast_expr (GIMPLE_STMT_OPERAND (def_stmt, 1));
949 if (TREE_CODE (expr) == SSA_NAME)
950 return get_index_from_offset (offset, SSA_NAME_DEF_STMT (expr));
951 else if (TREE_CODE (expr) == MULT_EXPR)
953 op1 = TREE_OPERAND (expr, 0);
954 op2 = TREE_OPERAND (expr, 1);
955 if (TREE_CODE (op1) != INTEGER_CST && TREE_CODE (op2) != INTEGER_CST)
957 index = (TREE_CODE (op1) == INTEGER_CST) ? op2 : op1;
964 /* update MI->dimension_type_size[CURRENT_INDIRECT_LEVEL] with the size
965 of the type related to the SSA_VAR, or the type related to the
966 lhs of STMT, in the case that it is an INDIRECT_REF. */
968 update_type_size (struct matrix_info *mi, tree stmt, tree ssa_var,
969 int current_indirect_level)
972 HOST_WIDE_INT type_size;
974 /* Update type according to the type of the INDIRECT_REF expr. */
975 if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT
976 && TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) == INDIRECT_REF)
978 lhs = GIMPLE_STMT_OPERAND (stmt, 0);
979 gcc_assert (POINTER_TYPE_P
980 (TREE_TYPE (SSA_NAME_VAR (TREE_OPERAND (lhs, 0)))));
982 int_size_in_bytes (TREE_TYPE
984 (SSA_NAME_VAR (TREE_OPERAND (lhs, 0)))));
987 type_size = int_size_in_bytes (TREE_TYPE (ssa_var));
989 /* Record the size of elements accessed (as a whole)
990 in the current indirection level (dimension). If the size of
991 elements is not known at compile time, mark it as escaping. */
993 mark_min_matrix_escape_level (mi, current_indirect_level, stmt);
996 int l = current_indirect_level;
998 if (!mi->dimension_type_size)
1000 mi->dimension_type_size
1001 = (HOST_WIDE_INT *) xcalloc (l + 1, sizeof (HOST_WIDE_INT));
1002 mi->dimension_type_size_len = l + 1;
1004 else if (mi->dimension_type_size_len < l + 1)
1006 mi->dimension_type_size
1007 = (HOST_WIDE_INT *) xrealloc (mi->dimension_type_size,
1008 (l + 1) * sizeof (HOST_WIDE_INT));
1009 memset (&mi->dimension_type_size[mi->dimension_type_size_len],
1010 0, (l + 1 - mi->dimension_type_size_len)
1011 * sizeof (HOST_WIDE_INT));
1012 mi->dimension_type_size_len = l + 1;
1014 /* Make sure all the accesses in the same level have the same size
1016 if (!mi->dimension_type_size[l])
1017 mi->dimension_type_size[l] = type_size;
1018 else if (mi->dimension_type_size[l] != type_size)
1019 mark_min_matrix_escape_level (mi, l, stmt);
1023 /* USE_STMT represents a call_expr ,where one of the arguments is the
1024 ssa var that we want to check because it came from some use of matrix
1025 MI. CURRENT_INDIRECT_LEVEL is the indirection level we reached so
1029 analyze_accesses_for_call_expr (struct matrix_info *mi, tree use_stmt,
1030 int current_indirect_level)
1032 tree call = get_call_expr_in (use_stmt);
1033 if (call && get_callee_fndecl (call))
1035 if (DECL_FUNCTION_CODE (get_callee_fndecl (call)) != BUILT_IN_FREE)
1039 "Matrix %s: Function call %s, level %d escapes.\n",
1040 get_name (mi->decl), get_name (get_callee_fndecl (call)),
1041 current_indirect_level);
1042 mark_min_matrix_escape_level (mi, current_indirect_level, use_stmt);
1044 else if (mi->free_stmts[current_indirect_level].stmt != NULL
1045 && mi->free_stmts[current_indirect_level].stmt != use_stmt)
1046 mark_min_matrix_escape_level (mi, current_indirect_level, use_stmt);
1049 /*Record the free statements so we can delete them
1051 int l = current_indirect_level;
1053 mi->free_stmts[l].stmt = use_stmt;
1054 mi->free_stmts[l].func = current_function_decl;
1059 /* USE_STMT represents a phi node of the ssa var that we want to
1060 check because it came from some use of matrix
1062 We check all the escaping levels that get to the PHI node
1063 and make sure they are all the same escaping;
1064 if not (which is rare) we let the escaping level be the
1065 minimum level that gets into that PHI because starting from
1066 that level we cannot expect the behavior of the indirections.
1067 CURRENT_INDIRECT_LEVEL is the indirection level we reached so far. */
1070 analyze_accesses_for_phi_node (struct matrix_info *mi, tree use_stmt,
1071 int current_indirect_level, sbitmap visited,
1072 bool record_accesses)
1075 struct matrix_access_phi_node tmp_maphi, *maphi, **pmaphi;
1077 tmp_maphi.phi = use_stmt;
1078 if ((maphi = htab_find (htab_mat_acc_phi_nodes, &tmp_maphi)))
1080 if (maphi->indirection_level == current_indirect_level)
1084 int level = MIN (maphi->indirection_level,
1085 current_indirect_level);
1089 maphi->indirection_level = level;
1090 for (j = 0; j < PHI_NUM_ARGS (use_stmt); j++)
1092 tree def = PHI_ARG_DEF (use_stmt, j);
1094 if (TREE_CODE (SSA_NAME_DEF_STMT (def)) != PHI_NODE)
1095 t = SSA_NAME_DEF_STMT (def);
1097 mark_min_matrix_escape_level (mi, level, t);
1101 maphi = (struct matrix_access_phi_node *)
1102 xcalloc (1, sizeof (struct matrix_access_phi_node));
1103 maphi->phi = use_stmt;
1104 maphi->indirection_level = current_indirect_level;
1106 /* Insert to hash table. */
1107 pmaphi = (struct matrix_access_phi_node **)
1108 htab_find_slot (htab_mat_acc_phi_nodes, maphi, INSERT);
1109 gcc_assert (pmaphi);
1112 if (!TEST_BIT (visited, SSA_NAME_VERSION (PHI_RESULT (use_stmt))))
1114 SET_BIT (visited, SSA_NAME_VERSION (PHI_RESULT (use_stmt)));
1115 analyze_matrix_accesses (mi, PHI_RESULT (use_stmt),
1116 current_indirect_level, false, visited,
1118 RESET_BIT (visited, SSA_NAME_VERSION (PHI_RESULT (use_stmt)));
1122 /* USE_STMT represents a modify statement (the rhs or lhs include
1123 the ssa var that we want to check because it came from some use of matrix
1125 CURRENT_INDIRECT_LEVEL is the indirection level we reached so far. */
1128 analyze_accesses_for_modify_stmt (struct matrix_info *mi, tree ssa_var,
1129 tree use_stmt, int current_indirect_level,
1130 bool last_op, sbitmap visited,
1131 bool record_accesses)
1134 tree lhs = GIMPLE_STMT_OPERAND (use_stmt, 0);
1135 tree rhs = GIMPLE_STMT_OPERAND (use_stmt, 1);
1136 struct ssa_acc_in_tree lhs_acc, rhs_acc;
1138 memset (&lhs_acc, 0, sizeof (lhs_acc));
1139 memset (&rhs_acc, 0, sizeof (rhs_acc));
1141 lhs_acc.ssa_var = ssa_var;
1142 lhs_acc.t_code = ERROR_MARK;
1143 ssa_accessed_in_tree (lhs, &lhs_acc);
1144 rhs_acc.ssa_var = ssa_var;
1145 rhs_acc.t_code = ERROR_MARK;
1146 ssa_accessed_in_tree (get_inner_of_cast_expr (rhs), &rhs_acc);
1148 /* The SSA must be either in the left side or in the right side,
1149 to understand what is happening.
1150 In case the SSA_NAME is found in both sides we should be escaping
1151 at this level because in this case we cannot calculate the
1152 address correctly. */
1153 if ((lhs_acc.var_found && rhs_acc.var_found
1154 && lhs_acc.t_code == INDIRECT_REF)
1155 || (!rhs_acc.var_found && !lhs_acc.var_found))
1157 mark_min_matrix_escape_level (mi, current_indirect_level, use_stmt);
1158 return current_indirect_level;
1160 gcc_assert (!rhs_acc.var_found || !lhs_acc.var_found);
1162 /* If we are storing to the matrix at some level, then mark it as
1163 escaping at that level. */
1164 if (lhs_acc.var_found)
1167 int l = current_indirect_level + 1;
1169 gcc_assert (lhs_acc.t_code == INDIRECT_REF);
1170 def = get_inner_of_cast_expr (rhs);
1171 if (TREE_CODE (def) != SSA_NAME)
1172 mark_min_matrix_escape_level (mi, l, use_stmt);
1175 def = SSA_NAME_DEF_STMT (def);
1176 analyze_matrix_allocation_site (mi, def, l, visited);
1177 if (record_accesses)
1178 record_access_alloc_site_info (mi, use_stmt, NULL_TREE,
1179 NULL_TREE, l, true);
1180 update_type_size (mi, use_stmt, NULL, l);
1182 return current_indirect_level;
1184 /* Now, check the right-hand-side, to see how the SSA variable
1186 if (rhs_acc.var_found)
1188 /* If we are passing the ssa name to a function call and
1189 the pointer escapes when passed to the function
1190 (not the case of free), then we mark the matrix as
1191 escaping at this level. */
1192 if (rhs_acc.t_code == CALL_EXPR)
1194 analyze_accesses_for_call_expr (mi, use_stmt,
1195 current_indirect_level);
1197 return current_indirect_level;
1199 if (rhs_acc.t_code != INDIRECT_REF
1200 && rhs_acc.t_code != PLUS_EXPR && rhs_acc.t_code != SSA_NAME)
1202 mark_min_matrix_escape_level (mi, current_indirect_level, use_stmt);
1203 return current_indirect_level;
1205 /* If the access in the RHS has an indirection increase the
1206 indirection level. */
1207 if (rhs_acc.t_code == INDIRECT_REF)
1209 if (record_accesses)
1210 record_access_alloc_site_info (mi, use_stmt, NULL_TREE,
1212 current_indirect_level, true);
1213 current_indirect_level += 1;
1215 else if (rhs_acc.t_code == PLUS_EXPR)
1217 /* ??? maybe we should check
1218 the type of the PLUS_EXP and make sure it's
1220 gcc_assert (rhs_acc.second_op);
1222 /* Currently we support only one PLUS expression on the
1223 SSA_NAME that holds the base address of the current
1224 indirection level; to support more general case there
1225 is a need to hold a stack of expressions and regenerate
1226 the calculation later. */
1227 mark_min_matrix_escape_level (mi, current_indirect_level,
1234 op1 = TREE_OPERAND (rhs, 0);
1235 op2 = TREE_OPERAND (rhs, 1);
1237 op2 = (op1 == ssa_var) ? op2 : op1;
1238 if (TREE_CODE (op2) == INTEGER_CST)
1240 build_int_cst (TREE_TYPE (op1),
1241 TREE_INT_CST_LOW (op2) /
1242 int_size_in_bytes (TREE_TYPE (op1)));
1246 get_index_from_offset (op2, SSA_NAME_DEF_STMT (op2));
1247 if (index == NULL_TREE)
1249 mark_min_matrix_escape_level (mi,
1250 current_indirect_level,
1252 return current_indirect_level;
1255 if (record_accesses)
1256 record_access_alloc_site_info (mi, use_stmt, op2,
1258 current_indirect_level, false);
1261 /* If we are storing this level of indirection mark it as
1263 if (lhs_acc.t_code == INDIRECT_REF || TREE_CODE (lhs) != SSA_NAME)
1265 int l = current_indirect_level;
1267 /* One exception is when we are storing to the matrix
1268 variable itself; this is the case of malloc, we must make
1269 sure that it's the one and only one call to malloc so
1270 we call analyze_matrix_allocation_site to check
1272 if (TREE_CODE (lhs) != VAR_DECL || lhs != mi->decl)
1273 mark_min_matrix_escape_level (mi, current_indirect_level,
1277 /* Also update the escaping level. */
1278 analyze_matrix_allocation_site (mi, use_stmt, l, visited);
1279 if (record_accesses)
1280 record_access_alloc_site_info (mi, use_stmt, NULL_TREE,
1281 NULL_TREE, l, true);
1286 /* We are placing it in an SSA, follow that SSA. */
1287 analyze_matrix_accesses (mi, lhs,
1288 current_indirect_level,
1289 rhs_acc.t_code == PLUS_EXPR,
1290 visited, record_accesses);
1293 return current_indirect_level;
1296 /* Given a SSA_VAR (coming from a use statement of the matrix MI),
1297 follow its uses and level of indirection and find out the minimum
1298 indirection level it escapes in (the highest dimension) and the maximum
1299 level it is accessed in (this will be the actual dimension of the
1300 matrix). The information is accumulated in MI.
1301 We look at the immediate uses, if one escapes we finish; if not,
1302 we make a recursive call for each one of the immediate uses of the
1303 resulting SSA name. */
1305 analyze_matrix_accesses (struct matrix_info *mi, tree ssa_var,
1306 int current_indirect_level, bool last_op,
1307 sbitmap visited, bool record_accesses)
1309 imm_use_iterator imm_iter;
1310 use_operand_p use_p;
1312 update_type_size (mi, SSA_NAME_DEF_STMT (ssa_var), ssa_var,
1313 current_indirect_level);
1315 /* We don't go beyond the escaping level when we are performing the
1316 flattening. NOTE: we keep the last indirection level that doesn't
1318 if (mi->min_indirect_level_escape > -1
1319 && mi->min_indirect_level_escape <= current_indirect_level)
1322 /* Now go over the uses of the SSA_NAME and check how it is used in
1323 each one of them. We are mainly looking for the pattern INDIRECT_REF,
1324 then a PLUS_EXPR, then INDIRECT_REF etc. while in between there could
1325 be any number of copies and casts. */
1326 gcc_assert (TREE_CODE (ssa_var) == SSA_NAME);
1328 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, ssa_var)
1330 tree use_stmt = USE_STMT (use_p);
1331 if (TREE_CODE (use_stmt) == PHI_NODE)
1332 /* We check all the escaping levels that get to the PHI node
1333 and make sure they are all the same escaping;
1334 if not (which is rare) we let the escaping level be the
1335 minimum level that gets into that PHI because starting from
1336 that level we cannot expect the behavior of the indirections. */
1338 analyze_accesses_for_phi_node (mi, use_stmt, current_indirect_level,
1339 visited, record_accesses);
1341 else if (TREE_CODE (use_stmt) == CALL_EXPR)
1342 analyze_accesses_for_call_expr (mi, use_stmt, current_indirect_level);
1343 else if (TREE_CODE (use_stmt) == GIMPLE_MODIFY_STMT)
1344 current_indirect_level =
1345 analyze_accesses_for_modify_stmt (mi, ssa_var, use_stmt,
1346 current_indirect_level, last_op,
1347 visited, record_accesses);
1352 /* A walk_tree function to go over the VAR_DECL, PARM_DECL nodes of
1353 the malloc size expression and check that those aren't changed
1354 over the function. */
1356 check_var_notmodified_p (tree * tp, int *walk_subtrees, void *data)
1361 block_stmt_iterator bsi;
1364 if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != PARM_DECL)
1367 FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (fn))
1369 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
1371 stmt = bsi_stmt (bsi);
1372 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
1374 if (GIMPLE_STMT_OPERAND (stmt, 0) == t)
1382 /* Go backwards in the use-def chains and find out the expression
1383 represented by the possible SSA name in EXPR, until it is composed
1384 of only VAR_DECL, PARM_DECL and INT_CST. In case of phi nodes
1385 we make sure that all the arguments represent the same subexpression,
1386 otherwise we fail. */
1388 can_calculate_expr_before_stmt (tree expr, sbitmap visited)
1390 tree def_stmt, op1, op2, res;
1392 switch (TREE_CODE (expr))
1395 /* Case of loop, we don't know to represent this expression. */
1396 if (TEST_BIT (visited, SSA_NAME_VERSION (expr)))
1399 SET_BIT (visited, SSA_NAME_VERSION (expr));
1400 def_stmt = SSA_NAME_DEF_STMT (expr);
1401 res = can_calculate_expr_before_stmt (def_stmt, visited);
1402 RESET_BIT (visited, SSA_NAME_VERSION (expr));
1411 op1 = TREE_OPERAND (expr, 0);
1412 op2 = TREE_OPERAND (expr, 1);
1414 op1 = can_calculate_expr_before_stmt (op1, visited);
1417 op2 = can_calculate_expr_before_stmt (op2, visited);
1419 return fold_build2 (TREE_CODE (expr), TREE_TYPE (expr), op1, op2);
1421 case GIMPLE_MODIFY_STMT:
1422 return can_calculate_expr_before_stmt (GIMPLE_STMT_OPERAND (expr, 1),
1429 /* Make sure all the arguments represent the same value. */
1430 for (j = 0; j < PHI_NUM_ARGS (expr); j++)
1433 tree def = PHI_ARG_DEF (expr, j);
1435 new_res = can_calculate_expr_before_stmt (def, visited);
1436 if (res == NULL_TREE)
1438 else if (!new_res || !expressions_equal_p (res, new_res))
1445 res = can_calculate_expr_before_stmt (TREE_OPERAND (expr, 0), visited);
1446 if (res != NULL_TREE)
1447 return build1 (TREE_CODE (expr), TREE_TYPE (expr), res);
1456 /* There should be only one allocation function for the dimensions
1457 that don't escape. Here we check the allocation sites in this
1458 function. We must make sure that all the dimensions are allocated
1459 using malloc and that the malloc size parameter expression could be
1460 pre-calculated before the call to the malloc of dimension 0.
1462 Given a candidate matrix for flattening -- MI -- check if it's
1463 appropriate for flattening -- we analyze the allocation
1464 sites that we recorded in the previous analysis. The result of the
1465 analysis is a level of indirection (matrix dimension) in which the
1466 flattening is safe. We check the following conditions:
1467 1. There is only one allocation site for each dimension.
1468 2. The allocation sites of all the dimensions are in the same
1470 (The above two are being taken care of during the analysis when
1471 we check the allocation site).
1472 3. All the dimensions that we flatten are allocated at once; thus
1473 the total size must be known before the allocation of the
1474 dimension 0 (top level) -- we must make sure we represent the
1475 size of the allocation as an expression of global parameters or
1476 constants and that those doesn't change over the function. */
1479 check_allocation_function (void **slot, void *data ATTRIBUTE_UNUSED)
1482 block_stmt_iterator bsi;
1483 basic_block bb_level_0;
1484 struct matrix_info *mi = *slot;
1485 sbitmap visited = sbitmap_alloc (num_ssa_names);
1487 if (!mi->malloc_for_level)
1489 /* Do nothing if the current function is not the allocation
1491 if (mi->allocation_function_decl != current_function_decl
1492 /* We aren't in the main allocation function yet. */
1493 || !mi->malloc_for_level[0])
1496 for (level = 1; level < mi->max_malloced_level; level++)
1497 if (!mi->malloc_for_level[level])
1500 mark_min_matrix_escape_level (mi, level, NULL_TREE);
1502 bsi = bsi_for_stmt (mi->malloc_for_level[0]);
1503 bb_level_0 = bsi.bb;
1505 /* Check if the expression of the size passed to malloc could be
1506 pre-calculated before the malloc of level 0. */
1507 for (level = 1; level < mi->min_indirect_level_escape; level++)
1509 tree call_stmt, size;
1510 struct malloc_call_data mcd;
1512 call_stmt = mi->malloc_for_level[level];
1514 /* Find the correct malloc information. */
1515 collect_data_for_malloc_call (call_stmt, &mcd);
1517 /* No need to check anticipation for constants. */
1518 if (TREE_CODE (mcd.size_var) == INTEGER_CST)
1520 if (!mi->dimension_size)
1522 mi->dimension_size =
1523 (tree *) xcalloc (mi->min_indirect_level_escape,
1525 mi->dimension_size_orig =
1526 (tree *) xcalloc (mi->min_indirect_level_escape,
1529 mi->dimension_size[level] = mcd.size_var;
1530 mi->dimension_size_orig[level] = mcd.size_var;
1533 /* ??? Here we should also add the way to calculate the size
1534 expression not only know that it is anticipated. */
1535 sbitmap_zero (visited);
1536 size = can_calculate_expr_before_stmt (mcd.size_var, visited);
1537 if (size == NULL_TREE)
1539 mark_min_matrix_escape_level (mi, level, call_stmt);
1542 "Matrix %s: Cannot calculate the size of allocation. escaping at level %d\n",
1543 get_name (mi->decl), level);
1546 if (!mi->dimension_size)
1548 mi->dimension_size =
1549 (tree *) xcalloc (mi->min_indirect_level_escape, sizeof (tree));
1550 mi->dimension_size_orig =
1551 (tree *) xcalloc (mi->min_indirect_level_escape, sizeof (tree));
1553 mi->dimension_size[level] = size;
1554 mi->dimension_size_orig[level] = size;
1557 /* We don't need those anymore. */
1558 for (level = mi->min_indirect_level_escape;
1559 level < mi->max_malloced_level; level++)
1560 mi->malloc_for_level[level] = NULL;
1564 /* Track all access and allocation sites. */
1566 find_sites_in_func (bool record)
1568 sbitmap visited_stmts_1;
1570 block_stmt_iterator bsi;
1573 struct matrix_info tmpmi, *mi;
1575 visited_stmts_1 = sbitmap_alloc (num_ssa_names);
1579 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
1581 stmt = bsi_stmt (bsi);
1582 if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT
1583 && TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) == VAR_DECL)
1585 tmpmi.decl = GIMPLE_STMT_OPERAND (stmt, 0);
1586 if ((mi = htab_find (matrices_to_reorg, &tmpmi)))
1588 sbitmap_zero (visited_stmts_1);
1589 analyze_matrix_allocation_site (mi, stmt, 0, visited_stmts_1);
1592 if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT
1593 && TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) == SSA_NAME
1594 && TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 1)) == VAR_DECL)
1596 tmpmi.decl = GIMPLE_STMT_OPERAND (stmt, 1);
1597 if ((mi = htab_find (matrices_to_reorg, &tmpmi)))
1599 sbitmap_zero (visited_stmts_1);
1600 analyze_matrix_accesses (mi,
1601 GIMPLE_STMT_OPERAND (stmt, 0), 0,
1602 false, visited_stmts_1, record);
1607 sbitmap_free (visited_stmts_1);
1610 /* Traverse the use-def chains to see if there are matrices that
1611 are passed through pointers and we cannot know how they are accessed.
1612 For each SSA-name defined by a global variable of our interest,
1613 we traverse the use-def chains of the SSA and follow the indirections,
1614 and record in what level of indirection the use of the variable
1615 escapes. A use of a pointer escapes when it is passed to a function,
1616 stored into memory or assigned (except in malloc and free calls). */
1619 record_all_accesses_in_func (void)
1622 sbitmap visited_stmts_1;
1624 visited_stmts_1 = sbitmap_alloc (num_ssa_names);
1626 for (i = 0; i < num_ssa_names; i++)
1628 struct matrix_info tmpmi, *mi;
1629 tree ssa_var = ssa_name (i);
1633 || TREE_CODE (SSA_NAME_DEF_STMT (ssa_var)) != GIMPLE_MODIFY_STMT)
1635 rhs = GIMPLE_STMT_OPERAND (SSA_NAME_DEF_STMT (ssa_var), 1);
1636 lhs = GIMPLE_STMT_OPERAND (SSA_NAME_DEF_STMT (ssa_var), 0);
1637 if (TREE_CODE (rhs) != VAR_DECL && TREE_CODE (lhs) != VAR_DECL)
1640 /* If the RHS is a matrix that we want to analyze, follow the def-use
1641 chain for this SSA_VAR and check for escapes or apply the
1644 if ((mi = htab_find (matrices_to_reorg, &tmpmi)))
1646 /* This variable will track the visited PHI nodes, so we can limit
1647 its size to the maximum number of SSA names. */
1648 sbitmap_zero (visited_stmts_1);
1649 analyze_matrix_accesses (mi, ssa_var,
1650 0, false, visited_stmts_1, true);
1654 sbitmap_free (visited_stmts_1);
1657 /* We know that we are allowed to perform matrix flattening (according to the
1658 escape analysis), so we traverse the use-def chains of the SSA vars
1659 defined by the global variables pointing to the matrices of our interest.
1660 in each use of the SSA we calculate the offset from the base address
1661 according to the following equation:
1663 a[I1][I2]...[Ik] , where D1..Dk is the length of each dimension and the
1664 escaping level is m <= k, and a' is the new allocated matrix,
1665 will be translated to :
1670 b = a' + I1*D2...*Dm + I2*D3...Dm + ... + Im
1674 transform_access_sites (void **slot, void *data ATTRIBUTE_UNUSED)
1677 block_stmt_iterator bsi;
1678 struct matrix_info *mi = *slot;
1679 int min_escape_l = mi->min_indirect_level_escape;
1680 struct access_site_info *acc_info;
1683 if (min_escape_l < 2 || !mi->access_l)
1685 for (i = 0; VEC_iterate (access_site_info_p, mi->access_l, i, acc_info);
1690 /* This is possible because we collect the access sites before
1691 we determine the final minimum indirection level. */
1692 if (acc_info->level >= min_escape_l)
1697 if (acc_info->is_alloc)
1699 if (acc_info->level >= 0 && bb_for_stmt (acc_info->stmt))
1703 tree stmt = acc_info->stmt;
1705 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
1706 mark_sym_for_renaming (SSA_NAME_VAR (def));
1707 bsi = bsi_for_stmt (stmt);
1708 gcc_assert (TREE_CODE (acc_info->stmt) == GIMPLE_MODIFY_STMT);
1709 if (TREE_CODE (GIMPLE_STMT_OPERAND (acc_info->stmt, 0)) ==
1710 SSA_NAME && acc_info->level < min_escape_l - 1)
1712 imm_use_iterator imm_iter;
1713 use_operand_p use_p;
1716 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter,
1717 GIMPLE_STMT_OPERAND (acc_info->stmt,
1719 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
1721 tree conv, tmp, stmts;
1723 /* Emit convert statement to convert to type of use. */
1725 fold_build1 (CONVERT_EXPR,
1726 TREE_TYPE (GIMPLE_STMT_OPERAND
1727 (acc_info->stmt, 0)),
1728 TREE_OPERAND (GIMPLE_STMT_OPERAND
1729 (acc_info->stmt, 1), 0));
1731 create_tmp_var (TREE_TYPE
1732 (GIMPLE_STMT_OPERAND
1733 (acc_info->stmt, 0)), "new");
1734 add_referenced_var (tmp);
1736 fold_build2 (GIMPLE_MODIFY_STMT,
1737 TREE_TYPE (GIMPLE_STMT_OPERAND
1738 (acc_info->stmt, 0)), tmp,
1740 tmp = make_ssa_name (tmp, stmts);
1741 GIMPLE_STMT_OPERAND (stmts, 0) = tmp;
1742 bsi = bsi_for_stmt (acc_info->stmt);
1743 bsi_insert_after (&bsi, stmts, BSI_SAME_STMT);
1744 SET_USE (use_p, tmp);
1747 if (acc_info->level < min_escape_l - 1)
1748 bsi_remove (&bsi, true);
1753 orig = GIMPLE_STMT_OPERAND (acc_info->stmt, 1);
1754 type = TREE_TYPE (orig);
1755 if (TREE_CODE (orig) == INDIRECT_REF
1756 && acc_info->level < min_escape_l - 1)
1758 /* Replace the INDIRECT_REF with NOP (cast) usually we are casting
1759 from "pointer to type" to "type". */
1761 build1 (NOP_EXPR, TREE_TYPE (orig),
1762 GIMPLE_STMT_OPERAND (orig, 0));
1763 GIMPLE_STMT_OPERAND (acc_info->stmt, 1) = orig;
1765 else if (TREE_CODE (orig) == PLUS_EXPR
1766 && acc_info->level < (min_escape_l))
1768 imm_use_iterator imm_iter;
1769 use_operand_p use_p;
1772 int k = acc_info->level;
1773 tree num_elements, total_elements;
1775 tree d_size = mi->dimension_size[k];
1777 /* We already make sure in the analysis that the first operand
1778 is the base and the second is the offset. */
1779 offset = acc_info->offset;
1780 if (mi->dim_map[k] == min_escape_l - 1)
1782 if (!check_transpose_p || mi->is_transposed_p == false)
1789 tree d_type_size, d_type_size_k;
1792 build_int_cst (type,
1793 mi->dimension_type_size[min_escape_l]);
1795 build_int_cst (type, mi->dimension_type_size[k + 1]);
1796 x = exact_log2 (mi->dimension_type_size[min_escape_l]);
1797 y = exact_log2 (mi->dimension_type_size[k + 1]);
1799 if (x != -1 && y != -1)
1802 new_offset = offset;
1805 tree log = build_int_cst (type, x - y);
1807 fold_build2 (LSHIFT_EXPR, TREE_TYPE (offset),
1814 fold_build2 (TRUNC_DIV_EXPR, type, d_type_size,
1817 fold_build2 (MULT_EXPR, type, offset, ratio);
1819 total_elements = new_offset;
1820 if (new_offset != offset)
1823 force_gimple_operand (total_elements, &stmts, true,
1827 tree_stmt_iterator tsi;
1829 for (tsi = tsi_start (stmts); !tsi_end_p (tsi);
1831 mark_symbols_for_renaming (tsi_stmt (tsi));
1832 bsi = bsi_for_stmt (acc_info->stmt);
1833 bsi_insert_before (&bsi, stmts, BSI_SAME_STMT);
1842 d_size = mi->dimension_size[mi->dim_map[k] + 1];
1844 fold_build2 (MULT_EXPR, type, acc_info->index, d_size);
1845 tmp1 = force_gimple_operand (num_elements, &stmts, true, NULL);
1846 add_referenced_var (d_size);
1849 tree_stmt_iterator tsi;
1851 for (tsi = tsi_start (stmts); !tsi_end_p (tsi);
1853 mark_symbols_for_renaming (tsi_stmt (tsi));
1854 bsi = bsi_for_stmt (acc_info->stmt);
1855 bsi_insert_before (&bsi, stmts, BSI_SAME_STMT);
1858 /* Replace the offset if needed. */
1861 if (TREE_CODE (offset) == SSA_NAME)
1865 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, offset)
1866 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
1867 if (use_stmt == acc_info->stmt)
1868 SET_USE (use_p, tmp1);
1872 gcc_assert (TREE_CODE (offset) == INTEGER_CST);
1873 TREE_OPERAND (orig, 1) = tmp1;
1877 /* ??? meanwhile this happens because we record the same access
1878 site more than once; we should be using a hash table to
1879 avoid this and insert the STMT of the access site only
1882 gcc_unreachable (); */
1885 VEC_free (access_site_info_p, heap, mi->access_l);
1887 update_ssa (TODO_update_ssa);
1888 #ifdef ENABLE_CHECKING
1894 /* Sort A array of counts. Arrange DIM_MAP to reflect the new order. */
1897 sort_dim_hot_level (gcov_type * a, int *dim_map, int n)
1902 for (i = 0; i < n - 1; i++)
1904 for (j = 0; j < n - 1 - i; j++)
1906 if (a[j + 1] < a[j])
1908 tmp = a[j]; /* swap a[j] and a[j+1] */
1912 dim_map[j] = dim_map[j + 1];
1913 dim_map[j + 1] = tmp1;
1920 /* Replace multiple mallocs (one for each dimension) to one malloc
1921 with the size of DIM1*DIM2*...*DIMN*size_of_element
1922 Make sure that we hold the size in the malloc site inside a
1923 new global variable; this way we ensure that the size doesn't
1924 change and it is accessible from all the other functions that
1925 uses the matrix. Also, the original calls to free are deleted,
1926 and replaced by a new call to free the flattened matrix. */
1929 transform_allocation_sites (void **slot, void *data ATTRIBUTE_UNUSED)
1932 struct matrix_info *mi;
1933 tree type, call_stmt_0, malloc_stmt, oldfn, stmts, prev_dim_size, use_stmt;
1934 struct cgraph_node *c_node;
1935 struct cgraph_edge *e;
1936 block_stmt_iterator bsi;
1937 struct malloc_call_data mcd;
1938 HOST_WIDE_INT element_size;
1940 imm_use_iterator imm_iter;
1941 use_operand_p use_p;
1942 tree old_size_0, tmp;
1948 min_escape_l = mi->min_indirect_level_escape;
1950 if (!mi->malloc_for_level)
1951 mi->min_indirect_level_escape = 0;
1953 if (mi->min_indirect_level_escape < 2)
1956 mi->dim_map = (int *) xcalloc (mi->min_indirect_level_escape, sizeof (int));
1957 for (i = 0; i < mi->min_indirect_level_escape; i++)
1959 if (check_transpose_p)
1965 fprintf (dump_file, "Matrix %s:\n", get_name (mi->decl));
1966 for (i = 0; i < min_escape_l; i++)
1968 fprintf (dump_file, "dim %d before sort ", i);
1969 if (mi->dim_hot_level)
1971 "count is " HOST_WIDEST_INT_PRINT_DEC " \n",
1972 mi->dim_hot_level[i]);
1975 sort_dim_hot_level (mi->dim_hot_level, mi->dim_map,
1976 mi->min_indirect_level_escape);
1978 for (i = 0; i < min_escape_l; i++)
1980 fprintf (dump_file, "dim %d after sort\n", i);
1981 if (mi->dim_hot_level)
1982 fprintf (dump_file, "count is " HOST_WIDE_INT_PRINT_DEC
1983 " \n", (HOST_WIDE_INT) mi->dim_hot_level[i]);
1985 for (i = 0; i < mi->min_indirect_level_escape; i++)
1988 fprintf (dump_file, "dim_map[%d] after sort %d\n", i,
1990 if (mi->dim_map[i] != i)
1994 "Transposed dimensions: dim %d is now dim %d\n",
1996 mi->is_transposed_p = true;
2002 for (i = 0; i < mi->min_indirect_level_escape; i++)
2005 /* Call statement of allocation site of level 0. */
2006 call_stmt_0 = mi->malloc_for_level[0];
2008 /* Finds the correct malloc information. */
2009 collect_data_for_malloc_call (call_stmt_0, &mcd);
2011 mi->dimension_size[0] = mcd.size_var;
2012 mi->dimension_size_orig[0] = mcd.size_var;
2013 /* Make sure that the variables in the size expression for
2014 all the dimensions (above level 0) aren't modified in
2015 the allocation function. */
2016 for (i = 1; i < mi->min_indirect_level_escape; i++)
2020 /* mi->dimension_size must contain the expression of the size calculated
2021 in check_allocation_function. */
2022 gcc_assert (mi->dimension_size[i]);
2024 t = walk_tree_without_duplicates (&(mi->dimension_size[i]),
2025 check_var_notmodified_p,
2026 mi->allocation_function_decl);
2029 mark_min_matrix_escape_level (mi, i, t);
2034 if (mi->min_indirect_level_escape < 2)
2037 /* Since we should make sure that the size expression is available
2038 before the call to malloc of level 0. */
2039 bsi = bsi_for_stmt (call_stmt_0);
2041 /* Find out the size of each dimension by looking at the malloc
2042 sites and create a global variable to hold it.
2043 We add the assignment to the global before the malloc of level 0. */
2045 /* To be able to produce gimple temporaries. */
2046 oldfn = current_function_decl;
2047 current_function_decl = mi->allocation_function_decl;
2048 cfun = DECL_STRUCT_FUNCTION (mi->allocation_function_decl);
2050 /* Set the dimension sizes as follows:
2051 DIM_SIZE[i] = DIM_SIZE[n] * ... * DIM_SIZE[i]
2052 where n is the maximum non escaping level. */
2053 element_size = mi->dimension_type_size[mi->min_indirect_level_escape];
2054 prev_dim_size = NULL_TREE;
2056 for (i = mi->min_indirect_level_escape - 1; i >= 0; i--)
2058 tree dim_size, dim_var, tmp;
2060 tree_stmt_iterator tsi;
2062 /* Now put the size expression in a global variable and initialize it to
2063 the size expression before the malloc of level 0. */
2065 add_new_static_var (TREE_TYPE
2066 (mi->dimension_size_orig[mi->dim_map[i]]));
2067 type = TREE_TYPE (mi->dimension_size_orig[mi->dim_map[i]]);
2069 build_int_cst (type, mi->dimension_type_size[mi->dim_map[i] + 1]);
2071 /* DIM_SIZE = MALLOC_SIZE_PARAM / TYPE_SIZE. */
2072 /* Find which dim ID becomes dim I. */
2073 for (id = 0; id < mi->min_indirect_level_escape; id++)
2074 if (mi->dim_map[id] == i)
2077 prev_dim_size = build_int_cst (type, element_size);
2078 if (!check_transpose_p && i == mi->min_indirect_level_escape - 1)
2080 dim_size = mi->dimension_size_orig[id];
2085 fold_build2 (TRUNC_DIV_EXPR, type, mi->dimension_size_orig[id],
2088 dim_size = fold_build2 (MULT_EXPR, type, dim_size, prev_dim_size);
2090 dim_size = force_gimple_operand (dim_size, &stmts, true, NULL);
2093 for (tsi = tsi_start (stmts); !tsi_end_p (tsi); tsi_next (&tsi))
2094 mark_symbols_for_renaming (tsi_stmt (tsi));
2095 bsi_insert_before (&bsi, stmts, BSI_SAME_STMT);
2096 bsi = bsi_for_stmt (call_stmt_0);
2098 /* GLOBAL_HOLDING_THE_SIZE = DIM_SIZE. */
2099 tmp = fold_build2 (GIMPLE_MODIFY_STMT, type, dim_var, dim_size);
2100 GIMPLE_STMT_OPERAND (tmp, 0) = dim_var;
2101 mark_symbols_for_renaming (tmp);
2102 bsi_insert_before (&bsi, tmp, BSI_NEW_STMT);
2103 bsi = bsi_for_stmt (call_stmt_0);
2105 prev_dim_size = mi->dimension_size[i] = dim_var;
2107 update_ssa (TODO_update_ssa);
2108 /* Replace the malloc size argument in the malloc of level 0 to be
2109 the size of all the dimensions. */
2110 malloc_stmt = GIMPLE_STMT_OPERAND (call_stmt_0, 1);
2111 c_node = cgraph_node (mi->allocation_function_decl);
2112 old_size_0 = CALL_EXPR_ARG (malloc_stmt, 0);
2113 bsi = bsi_for_stmt (call_stmt_0);
2114 tmp = force_gimple_operand (mi->dimension_size[0], &stmts, true, NULL);
2117 tree_stmt_iterator tsi;
2119 for (tsi = tsi_start (stmts); !tsi_end_p (tsi); tsi_next (&tsi))
2120 mark_symbols_for_renaming (tsi_stmt (tsi));
2121 bsi_insert_before (&bsi, stmts, BSI_SAME_STMT);
2122 bsi = bsi_for_stmt (call_stmt_0);
2124 if (TREE_CODE (old_size_0) == SSA_NAME)
2126 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, old_size_0)
2127 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
2128 if (use_stmt == call_stmt_0)
2129 SET_USE (use_p, tmp);
2131 /* When deleting the calls to malloc we need also to remove the edge from
2132 the call graph to keep it consistent. Notice that cgraph_edge may
2133 create a new node in the call graph if there is no node for the given
2134 declaration; this shouldn't be the case but currently there is no way to
2135 check this outside of "cgraph.c". */
2136 for (i = 1; i < mi->min_indirect_level_escape; i++)
2138 block_stmt_iterator bsi;
2139 tree use_stmt1 = NULL;
2142 tree call_stmt = mi->malloc_for_level[i];
2143 call = GIMPLE_STMT_OPERAND (call_stmt, 1);
2144 gcc_assert (TREE_CODE (call) == CALL_EXPR);
2145 e = cgraph_edge (c_node, call_stmt);
2147 cgraph_remove_edge (e);
2148 bsi = bsi_for_stmt (call_stmt);
2149 /* Remove the call stmt. */
2150 bsi_remove (&bsi, true);
2151 /* remove the type cast stmt. */
2152 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter,
2153 GIMPLE_STMT_OPERAND (call_stmt, 0))
2155 use_stmt1 = use_stmt;
2156 bsi = bsi_for_stmt (use_stmt);
2157 bsi_remove (&bsi, true);
2159 /* Remove the assignment of the allocated area. */
2160 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter,
2161 GIMPLE_STMT_OPERAND (use_stmt1, 0))
2163 bsi = bsi_for_stmt (use_stmt);
2164 bsi_remove (&bsi, true);
2167 update_ssa (TODO_update_ssa);
2168 #ifdef ENABLE_CHECKING
2171 /* Delete the calls to free. */
2172 for (i = 1; i < mi->min_indirect_level_escape; i++)
2174 block_stmt_iterator bsi;
2177 /* ??? wonder why this case is possible but we failed on it once. */
2178 if (!mi->free_stmts[i].stmt)
2181 call = TREE_OPERAND (mi->free_stmts[i].stmt, 1);
2182 c_node = cgraph_node (mi->free_stmts[i].func);
2184 gcc_assert (TREE_CODE (mi->free_stmts[i].stmt) == CALL_EXPR);
2185 e = cgraph_edge (c_node, mi->free_stmts[i].stmt);
2187 cgraph_remove_edge (e);
2188 current_function_decl = mi->free_stmts[i].func;
2189 cfun = DECL_STRUCT_FUNCTION (mi->free_stmts[i].func);
2190 bsi = bsi_for_stmt (mi->free_stmts[i].stmt);
2191 bsi_remove (&bsi, true);
2193 /* Return to the previous situation. */
2194 current_function_decl = oldfn;
2195 cfun = oldfn ? DECL_STRUCT_FUNCTION (oldfn) : NULL;
2201 /* Print out the results of the escape analysis. */
2203 dump_matrix_reorg_analysis (void **slot, void *data ATTRIBUTE_UNUSED)
2205 struct matrix_info *mi = *slot;
2209 fprintf (dump_file, "Matrix \"%s\"; Escaping Level: %d, Num Dims: %d,",
2210 get_name (mi->decl), mi->min_indirect_level_escape, mi->num_dims);
2211 fprintf (dump_file, " Malloc Dims: %d, ", mi->max_malloced_level);
2212 fprintf (dump_file, "\n");
2213 if (mi->min_indirect_level_escape >= 2)
2214 fprintf (dump_file, "Flattened %d dimensions \n",
2215 mi->min_indirect_level_escape);
2220 /* Perform matrix flattening. */
2225 struct cgraph_node *node;
2228 check_transpose_p = true;
2230 check_transpose_p = false;
2231 /* If there are hand written vectors, we skip this optimization. */
2232 for (node = cgraph_nodes; node; node = node->next)
2233 if (!may_flatten_matrices (node))
2235 matrices_to_reorg = htab_create (37, mtt_info_hash, mtt_info_eq, mat_free);
2236 /* Find and record all potential matrices in the program. */
2237 find_matrices_decl ();
2238 /* Analyze the accesses of the matrices (escaping analysis). */
2239 for (node = cgraph_nodes; node; node = node->next)
2244 temp_fn = current_function_decl;
2245 current_function_decl = node->decl;
2246 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2247 bitmap_obstack_initialize (NULL);
2248 tree_register_cfg_hooks ();
2250 if (!gimple_in_ssa_p (cfun))
2252 free_dominance_info (CDI_DOMINATORS);
2253 free_dominance_info (CDI_POST_DOMINATORS);
2255 current_function_decl = temp_fn;
2260 #ifdef ENABLE_CHECKING
2261 verify_flow_info ();
2264 if (!matrices_to_reorg)
2266 free_dominance_info (CDI_DOMINATORS);
2267 free_dominance_info (CDI_POST_DOMINATORS);
2269 current_function_decl = temp_fn;
2274 /* Create htap for phi nodes. */
2275 htab_mat_acc_phi_nodes = htab_create (37, mat_acc_phi_hash,
2276 mat_acc_phi_eq, free);
2277 if (!check_transpose_p)
2278 find_sites_in_func (false);
2281 find_sites_in_func (true);
2282 loop_optimizer_init (LOOPS_NORMAL);
2285 htab_traverse (matrices_to_reorg, analyze_transpose, NULL);
2289 loop_optimizer_finalize ();
2290 current_loops = NULL;
2293 /* If the current function is the allocation function for any of
2294 the matrices we check its allocation and the escaping level. */
2295 htab_traverse (matrices_to_reorg, check_allocation_function, NULL);
2296 free_dominance_info (CDI_DOMINATORS);
2297 free_dominance_info (CDI_POST_DOMINATORS);
2299 current_function_decl = temp_fn;
2301 htab_traverse (matrices_to_reorg, transform_allocation_sites, NULL);
2302 /* Now transform the accesses. */
2303 for (node = cgraph_nodes; node; node = node->next)
2306 /* Remember that allocation sites have been handled. */
2309 temp_fn = current_function_decl;
2310 current_function_decl = node->decl;
2311 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2312 bitmap_obstack_initialize (NULL);
2313 tree_register_cfg_hooks ();
2314 record_all_accesses_in_func ();
2315 htab_traverse (matrices_to_reorg, transform_access_sites, NULL);
2316 free_dominance_info (CDI_DOMINATORS);
2317 free_dominance_info (CDI_POST_DOMINATORS);
2319 current_function_decl = temp_fn;
2321 htab_traverse (matrices_to_reorg, dump_matrix_reorg_analysis, NULL);
2323 current_function_decl = NULL;
2325 matrices_to_reorg = NULL;
2330 /* The condition for matrix flattening to be performed. */
2332 gate_matrix_reorg (void)
2334 return flag_ipa_matrix_reorg /*&& flag_whole_program */ ;
2337 struct tree_opt_pass pass_ipa_matrix_reorg = {
2338 "matrix-reorg", /* name */
2339 gate_matrix_reorg, /* gate */
2340 matrix_reorg, /* execute */
2343 0, /* static_pass_number */
2345 0, /* properties_required */
2346 PROP_trees, /* properties_provided */
2347 0, /* properties_destroyed */
2348 0, /* todo_flags_start */
2349 TODO_dump_cgraph | TODO_dump_func, /* todo_flags_finish */