X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Ftree-sra.c;h=db9b9bf4452338a524bdef01c02ac14fb7b03687;hb=78aa9eb3b402b3199945d3ab09f54895dc753498;hp=f0e4bd02ff5aab3597fc646c657ebe15bbacfa5b;hpb=dd277d48c6583b9ac3a360761cf4484f021c9f0b;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c index f0e4bd02ff5..db9b9bf4452 100644 --- a/gcc/tree-sra.c +++ b/gcc/tree-sra.c @@ -1,19 +1,18 @@ /* Scalar Replacement of Aggregates (SRA) converts some structure references into scalar references, exposing them to the scalar optimizers. - Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 - Free Software Foundation, Inc. - Contributed by Diego Novillo + Copyright (C) 2008, 2009, 2010, 2011 Free Software Foundation, Inc. + Contributed by Martin Jambor This file is part of GCC. -GCC is free software; you can redistribute it and/or modify it -under the terms of the GNU General Public License as published by the -Free Software Foundation; either version 3, or (at your option) any -later version. +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. -GCC is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. @@ -21,3680 +20,4843 @@ You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see . */ +/* This file implements Scalar Reduction of Aggregates (SRA). SRA is run + twice, once in the early stages of compilation (early SRA) and once in the + late stages (late SRA). The aim of both is to turn references to scalar + parts of aggregates into uses of independent scalar variables. + + The two passes are nearly identical, the only difference is that early SRA + does not scalarize unions which are used as the result in a GIMPLE_RETURN + statement because together with inlining this can lead to weird type + conversions. + + Both passes operate in four stages: + + 1. The declarations that have properties which make them candidates for + scalarization are identified in function find_var_candidates(). The + candidates are stored in candidate_bitmap. + + 2. The function body is scanned. In the process, declarations which are + used in a manner that prevent their scalarization are removed from the + candidate bitmap. More importantly, for every access into an aggregate, + an access structure (struct access) is created by create_access() and + stored in a vector associated with the aggregate. Among other + information, the aggregate declaration, the offset and size of the access + and its type are stored in the structure. + + On a related note, assign_link structures are created for every assign + statement between candidate aggregates and attached to the related + accesses. + + 3. The vectors of accesses are analyzed. They are first sorted according to + their offset and size and then scanned for partially overlapping accesses + (i.e. those which overlap but one is not entirely within another). Such + an access disqualifies the whole aggregate from being scalarized. + + If there is no such inhibiting overlap, a representative access structure + is chosen for every unique combination of offset and size. Afterwards, + the pass builds a set of trees from these structures, in which children + of an access are within their parent (in terms of offset and size). + + Then accesses are propagated whenever possible (i.e. in cases when it + does not create a partially overlapping access) across assign_links from + the right hand side to the left hand side. + + Then the set of trees for each declaration is traversed again and those + accesses which should be replaced by a scalar are identified. + + 4. The function is traversed again, and for every reference into an + aggregate that has some component which is about to be scalarized, + statements are amended and new statements are created as necessary. + Finally, if a parameter got scalarized, the scalar replacements are + initialized with values from respective parameter aggregates. */ + #include "config.h" #include "system.h" #include "coretypes.h" +#include "alloc-pool.h" #include "tm.h" -#include "ggc.h" #include "tree.h" - -/* These RTL headers are needed for basic-block.h. */ -#include "rtl.h" -#include "tm_p.h" -#include "hard-reg-set.h" -#include "basic-block.h" -#include "diagnostic.h" -#include "langhooks.h" -#include "tree-inline.h" -#include "tree-flow.h" #include "gimple.h" +#include "cgraph.h" +#include "tree-flow.h" +#include "ipa-prop.h" +#include "tree-pretty-print.h" +#include "statistics.h" #include "tree-dump.h" -#include "tree-pass.h" #include "timevar.h" -#include "flags.h" -#include "bitmap.h" -#include "obstack.h" -#include "target.h" -/* expr.h is needed for MOVE_RATIO. */ -#include "expr.h" #include "params.h" +#include "target.h" +#include "flags.h" +#include "dbgcnt.h" +#include "tree-inline.h" +#include "gimple-pretty-print.h" +#include "ipa-inline.h" +/* Enumeration of all aggregate reductions we can do. */ +enum sra_mode { SRA_MODE_EARLY_IPA, /* early call regularization */ + SRA_MODE_EARLY_INTRA, /* early intraprocedural SRA */ + SRA_MODE_INTRA }; /* late intraprocedural SRA */ -/* This object of this pass is to replace a non-addressable aggregate with a - set of independent variables. Most of the time, all of these variables - will be scalars. But a secondary objective is to break up larger - aggregates into smaller aggregates. In the process we may find that some - bits of the larger aggregate can be deleted as unreferenced. - - This substitution is done globally. More localized substitutions would - be the purvey of a load-store motion pass. - - The optimization proceeds in phases: +/* Global variable describing which aggregate reduction we are performing at + the moment. */ +static enum sra_mode sra_mode; - (1) Identify variables that have types that are candidates for - decomposition. +struct assign_link; - (2) Scan the function looking for the ways these variables are used. - In particular we're interested in the number of times a variable - (or member) is needed as a complete unit, and the number of times - a variable (or member) is copied. +/* ACCESS represents each access to an aggregate variable (as a whole or a + part). It can also represent a group of accesses that refer to exactly the + same fragment of an aggregate (i.e. those that have exactly the same offset + and size). Such representatives for a single aggregate, once determined, + are linked in a linked list and have the group fields set. - (3) Based on the usage profile, instantiate substitution variables. + Moreover, when doing intraprocedural SRA, a tree is built from those + representatives (by the means of first_child and next_sibling pointers), in + which all items in a subtree are "within" the root, i.e. their offset is + greater or equal to offset of the root and offset+size is smaller or equal + to offset+size of the root. Children of an access are sorted by offset. - (4) Scan the function making replacements. -*/ + Note that accesses to parts of vector and complex number types always + represented by an access to the whole complex number or a vector. It is a + duty of the modifying functions to replace them appropriately. */ +struct access +{ + /* Values returned by `get_ref_base_and_extent' for each component reference + If EXPR isn't a component reference just set `BASE = EXPR', `OFFSET = 0', + `SIZE = TREE_SIZE (TREE_TYPE (expr))'. */ + HOST_WIDE_INT offset; + HOST_WIDE_INT size; + tree base; + + /* Expression. It is context dependent so do not use it to create new + expressions to access the original aggregate. See PR 42154 for a + testcase. */ + tree expr; + /* Type. */ + tree type; -/* True if this is the "early" pass, before inlining. */ -static bool early_sra; + /* The statement this access belongs to. */ + gimple stmt; -/* The set of aggregate variables that are candidates for scalarization. */ -static bitmap sra_candidates; + /* Next group representative for this aggregate. */ + struct access *next_grp; -/* Set of scalarizable PARM_DECLs that need copy-in operations at the - beginning of the function. */ -static bitmap needs_copy_in; + /* Pointer to the group representative. Pointer to itself if the struct is + the representative. */ + struct access *group_representative; -/* Sets of bit pairs that cache type decomposition and instantiation. */ -static bitmap sra_type_decomp_cache; -static bitmap sra_type_inst_cache; + /* If this access has any children (in terms of the definition above), this + points to the first one. */ + struct access *first_child; -/* One of these structures is created for each candidate aggregate and - each (accessed) member or group of members of such an aggregate. */ -struct sra_elt -{ - /* A tree of the elements. Used when we want to traverse everything. */ - struct sra_elt *parent; - struct sra_elt *groups; - struct sra_elt *children; - struct sra_elt *sibling; + /* In intraprocedural SRA, pointer to the next sibling in the access tree as + described above. In IPA-SRA this is a pointer to the next access + belonging to the same group (having the same representative). */ + struct access *next_sibling; - /* If this element is a root, then this is the VAR_DECL. If this is - a sub-element, this is some token used to identify the reference. - In the case of COMPONENT_REF, this is the FIELD_DECL. In the case - of an ARRAY_REF, this is the (constant) index. In the case of an - ARRAY_RANGE_REF, this is the (constant) RANGE_EXPR. In the case - of a complex number, this is a zero or one. */ - tree element; + /* Pointers to the first and last element in the linked list of assign + links. */ + struct assign_link *first_link, *last_link; - /* The type of the element. */ - tree type; + /* Pointer to the next access in the work queue. */ + struct access *next_queued; - /* A VAR_DECL, for any sub-element we've decided to replace. */ - tree replacement; + /* Replacement variable for this access "region." Never to be accessed + directly, always only by the means of get_access_replacement() and only + when grp_to_be_replaced flag is set. */ + tree replacement_decl; - /* The number of times the element is referenced as a whole. I.e. - given "a.b.c", this would be incremented for C, but not for A or B. */ - unsigned int n_uses; + /* Is this particular access write access? */ + unsigned write : 1; - /* The number of times the element is copied to or from another - scalarizable element. */ - unsigned int n_copies; + /* Is this access an access to a non-addressable field? */ + unsigned non_addressable : 1; - /* True if TYPE is scalar. */ - bool is_scalar; + /* Is this access currently in the work queue? */ + unsigned grp_queued : 1; - /* True if this element is a group of members of its parent. */ - bool is_group; + /* Does this group contain a write access? This flag is propagated down the + access tree. */ + unsigned grp_write : 1; - /* True if we saw something about this element that prevents scalarization, - such as non-constant indexing. */ - bool cannot_scalarize; + /* Does this group contain a read access? This flag is propagated down the + access tree. */ + unsigned grp_read : 1; - /* True if we've decided that structure-to-structure assignment - should happen via memcpy and not per-element. */ - bool use_block_copy; + /* Does this group contain a read access that comes from an assignment + statement? This flag is propagated down the access tree. */ + unsigned grp_assignment_read : 1; - /* True if everything under this element has been marked TREE_NO_WARNING. */ - bool all_no_warning; + /* Does this group contain a write access that comes from an assignment + statement? This flag is propagated down the access tree. */ + unsigned grp_assignment_write : 1; - /* A flag for use with/after random access traversals. */ - bool visited; + /* Does this group contain a read access through a scalar type? This flag is + not propagated in the access tree in any direction. */ + unsigned grp_scalar_read : 1; - /* True if there is BIT_FIELD_REF on the lhs with a vector. */ - bool is_vector_lhs; + /* Does this group contain a write access through a scalar type? This flag + is not propagated in the access tree in any direction. */ + unsigned grp_scalar_write : 1; - /* 1 if the element is a field that is part of a block, 2 if the field - is the block itself, 0 if it's neither. */ - char in_bitfld_block; -}; + /* Is this access an artificial one created to scalarize some record + entirely? */ + unsigned grp_total_scalarization : 1; -#define IS_ELEMENT_FOR_GROUP(ELEMENT) (TREE_CODE (ELEMENT) == RANGE_EXPR) + /* Other passes of the analysis use this bit to make function + analyze_access_subtree create scalar replacements for this group if + possible. */ + unsigned grp_hint : 1; -#define FOR_EACH_ACTUAL_CHILD(CHILD, ELT) \ - for ((CHILD) = (ELT)->is_group \ - ? next_child_for_group (NULL, (ELT)) \ - : (ELT)->children; \ - (CHILD); \ - (CHILD) = (ELT)->is_group \ - ? next_child_for_group ((CHILD), (ELT)) \ - : (CHILD)->sibling) + /* Is the subtree rooted in this access fully covered by scalar + replacements? */ + unsigned grp_covered : 1; -/* Helper function for above macro. Return next child in group. */ -static struct sra_elt * -next_child_for_group (struct sra_elt *child, struct sra_elt *group) -{ - gcc_assert (group->is_group); + /* If set to true, this access and all below it in an access tree must not be + scalarized. */ + unsigned grp_unscalarizable_region : 1; - /* Find the next child in the parent. */ - if (child) - child = child->sibling; - else - child = group->parent->children; + /* Whether data have been written to parts of the aggregate covered by this + access which is not to be scalarized. This flag is propagated up in the + access tree. */ + unsigned grp_unscalarized_data : 1; - /* Skip siblings that do not belong to the group. */ - while (child) - { - tree g_elt = group->element; - if (TREE_CODE (g_elt) == RANGE_EXPR) - { - if (!tree_int_cst_lt (child->element, TREE_OPERAND (g_elt, 0)) - && !tree_int_cst_lt (TREE_OPERAND (g_elt, 1), child->element)) - break; - } - else - gcc_unreachable (); + /* Does this access and/or group contain a write access through a + BIT_FIELD_REF? */ + unsigned grp_partial_lhs : 1; - child = child->sibling; - } + /* Set when a scalar replacement should be created for this variable. We do + the decision and creation at different places because create_tmp_var + cannot be called from within FOR_EACH_REFERENCED_VAR. */ + unsigned grp_to_be_replaced : 1; - return child; -} + /* Should TREE_NO_WARNING of a replacement be set? */ + unsigned grp_no_warning : 1; -/* Random access to the child of a parent is performed by hashing. - This prevents quadratic behavior, and allows SRA to function - reasonably on larger records. */ -static htab_t sra_map; + /* Is it possible that the group refers to data which might be (directly or + otherwise) modified? */ + unsigned grp_maybe_modified : 1; -/* All structures are allocated out of the following obstack. */ -static struct obstack sra_obstack; + /* Set when this is a representative of a pointer to scalar (i.e. by + reference) parameter which we consider for turning into a plain scalar + (i.e. a by value parameter). */ + unsigned grp_scalar_ptr : 1; + + /* Set when we discover that this pointer is not safe to dereference in the + caller. */ + unsigned grp_not_necessarilly_dereferenced : 1; +}; -/* Debugging functions. */ -static void dump_sra_elt_name (FILE *, struct sra_elt *); -extern void debug_sra_elt_name (struct sra_elt *); +typedef struct access *access_p; -/* Forward declarations. */ -static tree generate_element_ref (struct sra_elt *); -static gimple_seq sra_build_assignment (tree dst, tree src); -static void mark_all_v_defs_seq (gimple_seq); +DEF_VEC_P (access_p); +DEF_VEC_ALLOC_P (access_p, heap); - -/* Return true if DECL is an SRA candidate. */ +/* Alloc pool for allocating access structures. */ +static alloc_pool access_pool; -static bool -is_sra_candidate_decl (tree decl) +/* A structure linking lhs and rhs accesses from an aggregate assignment. They + are used to propagate subaccesses from rhs to lhs as long as they don't + conflict with what is already there. */ +struct assign_link { - return DECL_P (decl) && bitmap_bit_p (sra_candidates, DECL_UID (decl)); -} - -/* Return true if TYPE is a scalar type. */ + struct access *lacc, *racc; + struct assign_link *next; +}; -static bool -is_sra_scalar_type (tree type) -{ - enum tree_code code = TREE_CODE (type); - return (code == INTEGER_TYPE || code == REAL_TYPE || code == VECTOR_TYPE - || code == FIXED_POINT_TYPE - || code == ENUMERAL_TYPE || code == BOOLEAN_TYPE - || code == POINTER_TYPE || code == OFFSET_TYPE - || code == REFERENCE_TYPE); -} +/* Alloc pool for allocating assign link structures. */ +static alloc_pool link_pool; -/* Return true if TYPE can be decomposed into a set of independent variables. +/* Base (tree) -> Vector (VEC(access_p,heap) *) map. */ +static struct pointer_map_t *base_access_vec; - Note that this doesn't imply that all elements of TYPE can be - instantiated, just that if we decide to break up the type into - separate pieces that it can be done. */ +/* Bitmap of candidates. */ +static bitmap candidate_bitmap; -bool -sra_type_can_be_decomposed_p (tree type) -{ - unsigned int cache = TYPE_UID (TYPE_MAIN_VARIANT (type)) * 2; - tree t; +/* Bitmap of candidates which we should try to entirely scalarize away and + those which cannot be (because they are and need be used as a whole). */ +static bitmap should_scalarize_away_bitmap, cannot_scalarize_away_bitmap; - /* Avoid searching the same type twice. */ - if (bitmap_bit_p (sra_type_decomp_cache, cache+0)) - return true; - if (bitmap_bit_p (sra_type_decomp_cache, cache+1)) - return false; +/* Obstack for creation of fancy names. */ +static struct obstack name_obstack; - /* The type must have a definite nonzero size. */ - if (TYPE_SIZE (type) == NULL || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST - || integer_zerop (TYPE_SIZE (type))) - goto fail; +/* Head of a linked list of accesses that need to have its subaccesses + propagated to their assignment counterparts. */ +static struct access *work_queue_head; - /* The type must be a non-union aggregate. */ - switch (TREE_CODE (type)) - { - case RECORD_TYPE: - { - bool saw_one_field = false; +/* Number of parameters of the analyzed function when doing early ipa SRA. */ +static int func_param_count; - for (t = TYPE_FIELDS (type); t ; t = TREE_CHAIN (t)) - if (TREE_CODE (t) == FIELD_DECL) - { - /* Reject incorrectly represented bit fields. */ - if (DECL_BIT_FIELD (t) - && INTEGRAL_TYPE_P (TREE_TYPE (t)) - && (tree_low_cst (DECL_SIZE (t), 1) - != TYPE_PRECISION (TREE_TYPE (t)))) - goto fail; - - saw_one_field = true; - } +/* scan_function sets the following to true if it encounters a call to + __builtin_apply_args. */ +static bool encountered_apply_args; - /* Record types must have at least one field. */ - if (!saw_one_field) - goto fail; - } - break; +/* Set by scan_function when it finds a recursive call. */ +static bool encountered_recursive_call; - case ARRAY_TYPE: - /* Array types must have a fixed lower and upper bound. */ - t = TYPE_DOMAIN (type); - if (t == NULL) - goto fail; - if (TYPE_MIN_VALUE (t) == NULL || !TREE_CONSTANT (TYPE_MIN_VALUE (t))) - goto fail; - if (TYPE_MAX_VALUE (t) == NULL || !TREE_CONSTANT (TYPE_MAX_VALUE (t))) - goto fail; - break; +/* Set by scan_function when it finds a recursive call with less actual + arguments than formal parameters.. */ +static bool encountered_unchangable_recursive_call; - case COMPLEX_TYPE: - break; +/* This is a table in which for each basic block and parameter there is a + distance (offset + size) in that parameter which is dereferenced and + accessed in that BB. */ +static HOST_WIDE_INT *bb_dereferences; +/* Bitmap of BBs that can cause the function to "stop" progressing by + returning, throwing externally, looping infinitely or calling a function + which might abort etc.. */ +static bitmap final_bbs; - default: - goto fail; - } +/* Representative of no accesses at all. */ +static struct access no_accesses_representant; - bitmap_set_bit (sra_type_decomp_cache, cache+0); - return true; +/* Predicate to test the special value. */ - fail: - bitmap_set_bit (sra_type_decomp_cache, cache+1); - return false; +static inline bool +no_accesses_p (struct access *access) +{ + return access == &no_accesses_representant; } -/* Returns true if the TYPE is one of the available va_list types. - Otherwise it returns false. - Note, that for multiple calling conventions there can be more - than just one va_list type present. */ +/* Dump contents of ACCESS to file F in a human friendly way. If GRP is true, + representative fields are dumped, otherwise those which only describe the + individual access are. */ -static bool -is_va_list_type (tree type) +static struct { - tree h; + /* Number of processed aggregates is readily available in + analyze_all_variable_accesses and so is not stored here. */ - if (type == NULL_TREE) - return false; - h = targetm.canonical_va_list_type (type); - if (h == NULL_TREE) - return false; - if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (h)) - return true; - return false; -} + /* Number of created scalar replacements. */ + int replacements; -/* Return true if DECL can be decomposed into a set of independent - (though not necessarily scalar) variables. */ + /* Number of times sra_modify_expr or sra_modify_assign themselves changed an + expression. */ + int exprs; -static bool -decl_can_be_decomposed_p (tree var) -{ - /* Early out for scalars. */ - if (is_sra_scalar_type (TREE_TYPE (var))) - return false; + /* Number of statements created by generate_subtree_copies. */ + int subtree_copies; - /* The variable must not be aliased. */ - if (!is_gimple_non_addressable (var)) - { - if (dump_file && (dump_flags & TDF_DETAILS)) - { - fprintf (dump_file, "Cannot scalarize variable "); - print_generic_expr (dump_file, var, dump_flags); - fprintf (dump_file, " because it must live in memory\n"); - } - return false; - } + /* Number of statements created by load_assign_lhs_subreplacements. */ + int subreplacements; - /* The variable must not be volatile. */ - if (TREE_THIS_VOLATILE (var)) - { - if (dump_file && (dump_flags & TDF_DETAILS)) - { - fprintf (dump_file, "Cannot scalarize variable "); - print_generic_expr (dump_file, var, dump_flags); - fprintf (dump_file, " because it is declared volatile\n"); - } - return false; - } + /* Number of times sra_modify_assign has deleted a statement. */ + int deleted; - /* We must be able to decompose the variable's type. */ - if (!sra_type_can_be_decomposed_p (TREE_TYPE (var))) - { - if (dump_file && (dump_flags & TDF_DETAILS)) - { - fprintf (dump_file, "Cannot scalarize variable "); - print_generic_expr (dump_file, var, dump_flags); - fprintf (dump_file, " because its type cannot be decomposed\n"); - } - return false; - } + /* Number of times sra_modify_assign has to deal with subaccesses of LHS and + RHS reparately due to type conversions or nonexistent matching + references. */ + int separate_lhs_rhs_handling; - /* HACK: if we decompose a va_list_type_node before inlining, then we'll - confuse tree-stdarg.c, and we won't be able to figure out which and - how many arguments are accessed. This really should be improved in - tree-stdarg.c, as the decomposition is truly a win. This could also - be fixed if the stdarg pass ran early, but this can't be done until - we've aliasing information early too. See PR 30791. */ - if (early_sra && is_va_list_type (TREE_TYPE (var))) - return false; + /* Number of parameters that were removed because they were unused. */ + int deleted_unused_parameters; - return true; -} + /* Number of scalars passed as parameters by reference that have been + converted to be passed by value. */ + int scalar_by_ref_to_by_val; -/* Return true if TYPE can be *completely* decomposed into scalars. */ + /* Number of aggregate parameters that were replaced by one or more of their + components. */ + int aggregate_params_reduced; -static bool -type_can_instantiate_all_elements (tree type) -{ - if (is_sra_scalar_type (type)) - return true; - if (!sra_type_can_be_decomposed_p (type)) - return false; + /* Numbber of components created when splitting aggregate parameters. */ + int param_reductions_created; +} sra_stats; - switch (TREE_CODE (type)) - { - case RECORD_TYPE: - { - unsigned int cache = TYPE_UID (TYPE_MAIN_VARIANT (type)) * 2; - tree f; +static void +dump_access (FILE *f, struct access *access, bool grp) +{ + fprintf (f, "access { "); + fprintf (f, "base = (%d)'", DECL_UID (access->base)); + print_generic_expr (f, access->base, 0); + fprintf (f, "', offset = " HOST_WIDE_INT_PRINT_DEC, access->offset); + fprintf (f, ", size = " HOST_WIDE_INT_PRINT_DEC, access->size); + fprintf (f, ", expr = "); + print_generic_expr (f, access->expr, 0); + fprintf (f, ", type = "); + print_generic_expr (f, access->type, 0); + if (grp) + fprintf (f, ", grp_read = %d, grp_write = %d, grp_assignment_read = %d, " + "grp_assignment_write = %d, grp_scalar_read = %d, " + "grp_scalar_write = %d, grp_total_scalarization = %d, " + "grp_hint = %d, grp_covered = %d, " + "grp_unscalarizable_region = %d, grp_unscalarized_data = %d, " + "grp_partial_lhs = %d, grp_to_be_replaced = %d, " + "grp_maybe_modified = %d, " + "grp_not_necessarilly_dereferenced = %d\n", + access->grp_read, access->grp_write, access->grp_assignment_read, + access->grp_assignment_write, access->grp_scalar_read, + access->grp_scalar_write, access->grp_total_scalarization, + access->grp_hint, access->grp_covered, + access->grp_unscalarizable_region, access->grp_unscalarized_data, + access->grp_partial_lhs, access->grp_to_be_replaced, + access->grp_maybe_modified, + access->grp_not_necessarilly_dereferenced); + else + fprintf (f, ", write = %d, grp_total_scalarization = %d, " + "grp_partial_lhs = %d\n", + access->write, access->grp_total_scalarization, + access->grp_partial_lhs); +} - if (bitmap_bit_p (sra_type_inst_cache, cache+0)) - return true; - if (bitmap_bit_p (sra_type_inst_cache, cache+1)) - return false; +/* Dump a subtree rooted in ACCESS to file F, indent by LEVEL. */ - for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f)) - if (TREE_CODE (f) == FIELD_DECL) - { - if (!type_can_instantiate_all_elements (TREE_TYPE (f))) - { - bitmap_set_bit (sra_type_inst_cache, cache+1); - return false; - } - } +static void +dump_access_tree_1 (FILE *f, struct access *access, int level) +{ + do + { + int i; - bitmap_set_bit (sra_type_inst_cache, cache+0); - return true; - } + for (i = 0; i < level; i++) + fputs ("* ", dump_file); - case ARRAY_TYPE: - return type_can_instantiate_all_elements (TREE_TYPE (type)); + dump_access (f, access, true); - case COMPLEX_TYPE: - return true; + if (access->first_child) + dump_access_tree_1 (f, access->first_child, level + 1); - default: - gcc_unreachable (); + access = access->next_sibling; } + while (access); } -/* Test whether ELT or some sub-element cannot be scalarized. */ +/* Dump all access trees for a variable, given the pointer to the first root in + ACCESS. */ -static bool -can_completely_scalarize_p (struct sra_elt *elt) +static void +dump_access_tree (FILE *f, struct access *access) { - struct sra_elt *c; + for (; access; access = access->next_grp) + dump_access_tree_1 (f, access, 0); +} - if (elt->cannot_scalarize) - return false; +/* Return true iff ACC is non-NULL and has subaccesses. */ - for (c = elt->children; c; c = c->sibling) - if (!can_completely_scalarize_p (c)) - return false; +static inline bool +access_has_children_p (struct access *acc) +{ + return acc && acc->first_child; +} - for (c = elt->groups; c; c = c->sibling) - if (!can_completely_scalarize_p (c)) - return false; +/* Return a vector of pointers to accesses for the variable given in BASE or + NULL if there is none. */ - return true; +static VEC (access_p, heap) * +get_base_access_vector (tree base) +{ + void **slot; + + slot = pointer_map_contains (base_access_vec, base); + if (!slot) + return NULL; + else + return *(VEC (access_p, heap) **) slot; } - -/* A simplified tree hashing algorithm that only handles the types of - trees we expect to find in sra_elt->element. */ +/* Find an access with required OFFSET and SIZE in a subtree of accesses rooted + in ACCESS. Return NULL if it cannot be found. */ -static hashval_t -sra_hash_tree (tree t) +static struct access * +find_access_in_subtree (struct access *access, HOST_WIDE_INT offset, + HOST_WIDE_INT size) { - hashval_t h; - - switch (TREE_CODE (t)) + while (access && (access->offset != offset || access->size != size)) { - case VAR_DECL: - case PARM_DECL: - case RESULT_DECL: - h = DECL_UID (t); - break; + struct access *child = access->first_child; - case INTEGER_CST: - h = TREE_INT_CST_LOW (t) ^ TREE_INT_CST_HIGH (t); - break; + while (child && (child->offset + child->size <= offset)) + child = child->next_sibling; + access = child; + } - case RANGE_EXPR: - h = iterative_hash_expr (TREE_OPERAND (t, 0), 0); - h = iterative_hash_expr (TREE_OPERAND (t, 1), h); - break; + return access; +} - case FIELD_DECL: - /* We can have types that are compatible, but have different member - lists, so we can't hash fields by ID. Use offsets instead. */ - h = iterative_hash_expr (DECL_FIELD_OFFSET (t), 0); - h = iterative_hash_expr (DECL_FIELD_BIT_OFFSET (t), h); - break; +/* Return the first group representative for DECL or NULL if none exists. */ - case BIT_FIELD_REF: - /* Don't take operand 0 into account, that's our parent. */ - h = iterative_hash_expr (TREE_OPERAND (t, 1), 0); - h = iterative_hash_expr (TREE_OPERAND (t, 2), h); - break; +static struct access * +get_first_repr_for_decl (tree base) +{ + VEC (access_p, heap) *access_vec; - default: - gcc_unreachable (); - } + access_vec = get_base_access_vector (base); + if (!access_vec) + return NULL; - return h; + return VEC_index (access_p, access_vec, 0); } -/* Hash function for type SRA_PAIR. */ +/* Find an access representative for the variable BASE and given OFFSET and + SIZE. Requires that access trees have already been built. Return NULL if + it cannot be found. */ -static hashval_t -sra_elt_hash (const void *x) +static struct access * +get_var_base_offset_size_access (tree base, HOST_WIDE_INT offset, + HOST_WIDE_INT size) { - const struct sra_elt *const e = (const struct sra_elt *) x; - const struct sra_elt *p; - hashval_t h; - - h = sra_hash_tree (e->element); + struct access *access; - /* Take into account everything except bitfield blocks back up the - chain. Given that chain lengths are rarely very long, this - should be acceptable. If we truly identify this as a performance - problem, it should work to hash the pointer value - "e->parent". */ - for (p = e->parent; p ; p = p->parent) - if (!p->in_bitfld_block) - h = (h * 65521) ^ sra_hash_tree (p->element); + access = get_first_repr_for_decl (base); + while (access && (access->offset + access->size <= offset)) + access = access->next_grp; + if (!access) + return NULL; - return h; + return find_access_in_subtree (access, offset, size); } -/* Equality function for type SRA_PAIR. */ - -static int -sra_elt_eq (const void *x, const void *y) -{ - const struct sra_elt *const a = (const struct sra_elt *) x; - const struct sra_elt *const b = (const struct sra_elt *) y; - tree ae, be; - const struct sra_elt *ap = a->parent; - const struct sra_elt *bp = b->parent; - - if (ap) - while (ap->in_bitfld_block) - ap = ap->parent; - if (bp) - while (bp->in_bitfld_block) - bp = bp->parent; - - if (ap != bp) - return false; +/* Add LINK to the linked list of assign links of RACC. */ +static void +add_link_to_rhs (struct access *racc, struct assign_link *link) +{ + gcc_assert (link->racc == racc); - ae = a->element; - be = b->element; + if (!racc->first_link) + { + gcc_assert (!racc->last_link); + racc->first_link = link; + } + else + racc->last_link->next = link; - if (ae == be) - return true; - if (TREE_CODE (ae) != TREE_CODE (be)) - return false; + racc->last_link = link; + link->next = NULL; +} - switch (TREE_CODE (ae)) +/* Move all link structures in their linked list in OLD_RACC to the linked list + in NEW_RACC. */ +static void +relink_to_new_repr (struct access *new_racc, struct access *old_racc) +{ + if (!old_racc->first_link) { - case VAR_DECL: - case PARM_DECL: - case RESULT_DECL: - /* These are all pointer unique. */ - return false; + gcc_assert (!old_racc->last_link); + return; + } - case INTEGER_CST: - /* Integers are not pointer unique, so compare their values. */ - return tree_int_cst_equal (ae, be); + if (new_racc->first_link) + { + gcc_assert (!new_racc->last_link->next); + gcc_assert (!old_racc->last_link || !old_racc->last_link->next); - case RANGE_EXPR: - return - tree_int_cst_equal (TREE_OPERAND (ae, 0), TREE_OPERAND (be, 0)) - && tree_int_cst_equal (TREE_OPERAND (ae, 1), TREE_OPERAND (be, 1)); + new_racc->last_link->next = old_racc->first_link; + new_racc->last_link = old_racc->last_link; + } + else + { + gcc_assert (!new_racc->last_link); - case FIELD_DECL: - /* Fields are unique within a record, but not between - compatible records. */ - if (DECL_FIELD_CONTEXT (ae) == DECL_FIELD_CONTEXT (be)) - return false; - return fields_compatible_p (ae, be); + new_racc->first_link = old_racc->first_link; + new_racc->last_link = old_racc->last_link; + } + old_racc->first_link = old_racc->last_link = NULL; +} - case BIT_FIELD_REF: - return - tree_int_cst_equal (TREE_OPERAND (ae, 1), TREE_OPERAND (be, 1)) - && tree_int_cst_equal (TREE_OPERAND (ae, 2), TREE_OPERAND (be, 2)); +/* Add ACCESS to the work queue (which is actually a stack). */ - default: - gcc_unreachable (); +static void +add_access_to_work_queue (struct access *access) +{ + if (!access->grp_queued) + { + gcc_assert (!access->next_queued); + access->next_queued = work_queue_head; + access->grp_queued = 1; + work_queue_head = access; } } -/* Create or return the SRA_ELT structure for CHILD in PARENT. PARENT - may be null, in which case CHILD must be a DECL. */ +/* Pop an access from the work queue, and return it, assuming there is one. */ -static struct sra_elt * -lookup_element (struct sra_elt *parent, tree child, tree type, - enum insert_option insert) +static struct access * +pop_access_from_work_queue (void) { - struct sra_elt dummy; - struct sra_elt **slot; - struct sra_elt *elt; + struct access *access = work_queue_head; - if (parent) - dummy.parent = parent->is_group ? parent->parent : parent; - else - dummy.parent = NULL; - dummy.element = child; + work_queue_head = access->next_queued; + access->next_queued = NULL; + access->grp_queued = 0; + return access; +} - slot = (struct sra_elt **) htab_find_slot (sra_map, &dummy, insert); - if (!slot && insert == NO_INSERT) - return NULL; - elt = *slot; - if (!elt && insert == INSERT) - { - *slot = elt = XOBNEW (&sra_obstack, struct sra_elt); - memset (elt, 0, sizeof (*elt)); +/* Allocate necessary structures. */ - elt->parent = parent; - elt->element = child; - elt->type = type; - elt->is_scalar = is_sra_scalar_type (type); +static void +sra_initialize (void) +{ + candidate_bitmap = BITMAP_ALLOC (NULL); + should_scalarize_away_bitmap = BITMAP_ALLOC (NULL); + cannot_scalarize_away_bitmap = BITMAP_ALLOC (NULL); + gcc_obstack_init (&name_obstack); + access_pool = create_alloc_pool ("SRA accesses", sizeof (struct access), 16); + link_pool = create_alloc_pool ("SRA links", sizeof (struct assign_link), 16); + base_access_vec = pointer_map_create (); + memset (&sra_stats, 0, sizeof (sra_stats)); + encountered_apply_args = false; + encountered_recursive_call = false; + encountered_unchangable_recursive_call = false; +} - if (parent) - { - if (IS_ELEMENT_FOR_GROUP (elt->element)) - { - elt->is_group = true; - elt->sibling = parent->groups; - parent->groups = elt; - } - else - { - elt->sibling = parent->children; - parent->children = elt; - } - } +/* Hook fed to pointer_map_traverse, deallocate stored vectors. */ - /* If this is a parameter, then if we want to scalarize, we have - one copy from the true function parameter. Count it now. */ - if (TREE_CODE (child) == PARM_DECL) - { - elt->n_copies = 1; - bitmap_set_bit (needs_copy_in, DECL_UID (child)); - } - } +static bool +delete_base_accesses (const void *key ATTRIBUTE_UNUSED, void **value, + void *data ATTRIBUTE_UNUSED) +{ + VEC (access_p, heap) *access_vec; + access_vec = (VEC (access_p, heap) *) *value; + VEC_free (access_p, heap, access_vec); - return elt; + return true; } -/* Create or return the SRA_ELT structure for EXPR if the expression - refers to a scalarizable variable. */ +/* Deallocate all general structures. */ -static struct sra_elt * -maybe_lookup_element_for_expr (tree expr) +static void +sra_deinitialize (void) { - struct sra_elt *elt; - tree child; + BITMAP_FREE (candidate_bitmap); + BITMAP_FREE (should_scalarize_away_bitmap); + BITMAP_FREE (cannot_scalarize_away_bitmap); + free_alloc_pool (access_pool); + free_alloc_pool (link_pool); + obstack_free (&name_obstack, NULL); + + pointer_map_traverse (base_access_vec, delete_base_accesses, NULL); + pointer_map_destroy (base_access_vec); +} - switch (TREE_CODE (expr)) +/* Remove DECL from candidates for SRA and write REASON to the dump file if + there is one. */ +static void +disqualify_candidate (tree decl, const char *reason) +{ + bitmap_clear_bit (candidate_bitmap, DECL_UID (decl)); + + if (dump_file && (dump_flags & TDF_DETAILS)) { - case VAR_DECL: - case PARM_DECL: - case RESULT_DECL: - if (is_sra_candidate_decl (expr)) - return lookup_element (NULL, expr, TREE_TYPE (expr), INSERT); - return NULL; + fprintf (dump_file, "! Disqualifying "); + print_generic_expr (dump_file, decl, 0); + fprintf (dump_file, " - %s\n", reason); + } +} - case ARRAY_REF: - /* We can't scalarize variable array indices. */ - if (in_array_bounds_p (expr)) - child = TREE_OPERAND (expr, 1); - else - return NULL; - break; +/* Return true iff the type contains a field or an element which does not allow + scalarization. */ - case ARRAY_RANGE_REF: - /* We can't scalarize variable array indices. */ - if (range_in_array_bounds_p (expr)) +static bool +type_internals_preclude_sra_p (tree type, const char **msg) +{ + tree fld; + tree et; + + switch (TREE_CODE (type)) + { + case RECORD_TYPE: + case UNION_TYPE: + case QUAL_UNION_TYPE: + for (fld = TYPE_FIELDS (type); fld; fld = DECL_CHAIN (fld)) + if (TREE_CODE (fld) == FIELD_DECL) + { + tree ft = TREE_TYPE (fld); + + if (TREE_THIS_VOLATILE (fld)) + { + *msg = "volatile structure field"; + return true; + } + if (!DECL_FIELD_OFFSET (fld)) + { + *msg = "no structure field offset"; + return true; + } + if (!DECL_SIZE (fld)) + { + *msg = "zero structure field size"; + return true; + } + if (!host_integerp (DECL_FIELD_OFFSET (fld), 1)) + { + *msg = "structure field offset not fixed"; + return true; + } + if (!host_integerp (DECL_SIZE (fld), 1)) + { + *msg = "structure field size not fixed"; + return true; + } + if (AGGREGATE_TYPE_P (ft) + && int_bit_position (fld) % BITS_PER_UNIT != 0) + { + *msg = "structure field is bit field"; + return true; + } + + if (AGGREGATE_TYPE_P (ft) && type_internals_preclude_sra_p (ft, msg)) + return true; + } + + return false; + + case ARRAY_TYPE: + et = TREE_TYPE (type); + + if (TYPE_VOLATILE (et)) { - tree domain = TYPE_DOMAIN (TREE_TYPE (expr)); - child = build2 (RANGE_EXPR, integer_type_node, - TYPE_MIN_VALUE (domain), TYPE_MAX_VALUE (domain)); + *msg = "element type is volatile"; + return true; } - else - return NULL; - break; - case COMPONENT_REF: - { - tree type = TREE_TYPE (TREE_OPERAND (expr, 0)); - /* Don't look through unions. */ - if (TREE_CODE (type) != RECORD_TYPE) - return NULL; - /* Neither through variable-sized records. */ - if (TYPE_SIZE (type) == NULL_TREE - || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) - return NULL; - child = TREE_OPERAND (expr, 1); - } - break; + if (AGGREGATE_TYPE_P (et) && type_internals_preclude_sra_p (et, msg)) + return true; - case REALPART_EXPR: - child = integer_zero_node; - break; - case IMAGPART_EXPR: - child = integer_one_node; - break; + return false; default: - return NULL; + return false; } - - elt = maybe_lookup_element_for_expr (TREE_OPERAND (expr, 0)); - if (elt) - return lookup_element (elt, child, TREE_TYPE (expr), INSERT); - return NULL; } - -/* Functions to walk just enough of the tree to see all scalarizable - references, and categorize them. */ - -/* A set of callbacks for phases 2 and 4. They'll be invoked for the - various kinds of references seen. In all cases, *GSI is an iterator - pointing to the statement being processed. */ -struct sra_walk_fns -{ - /* Invoked when ELT is required as a unit. Note that ELT might refer to - a leaf node, in which case this is a simple scalar reference. *EXPR_P - points to the location of the expression. IS_OUTPUT is true if this - is a left-hand-side reference. USE_ALL is true if we saw something we - couldn't quite identify and had to force the use of the entire object. */ - void (*use) (struct sra_elt *elt, tree *expr_p, - gimple_stmt_iterator *gsi, bool is_output, bool use_all); - - /* Invoked when we have a copy between two scalarizable references. */ - void (*copy) (struct sra_elt *lhs_elt, struct sra_elt *rhs_elt, - gimple_stmt_iterator *gsi); - - /* Invoked when ELT is initialized from a constant. VALUE may be NULL, - in which case it should be treated as an empty CONSTRUCTOR. */ - void (*init) (struct sra_elt *elt, tree value, gimple_stmt_iterator *gsi); - - /* Invoked when we have a copy between one scalarizable reference ELT - and one non-scalarizable reference OTHER without side-effects. - IS_OUTPUT is true if ELT is on the left-hand side. */ - void (*ldst) (struct sra_elt *elt, tree other, - gimple_stmt_iterator *gsi, bool is_output); - - /* True during phase 2, false during phase 4. */ - /* ??? This is a hack. */ - bool initial_scan; -}; - -#ifdef ENABLE_CHECKING -/* Invoked via walk_tree, if *TP contains a candidate decl, return it. */ +/* If T is an SSA_NAME, return NULL if it is not a default def or return its + base variable if it is. Return T if it is not an SSA_NAME. */ static tree -sra_find_candidate_decl (tree *tp, int *walk_subtrees, - void *data ATTRIBUTE_UNUSED) +get_ssa_base_param (tree t) { - tree t = *tp; - enum tree_code code = TREE_CODE (t); - - if (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL) + if (TREE_CODE (t) == SSA_NAME) { - *walk_subtrees = 0; - if (is_sra_candidate_decl (t)) - return t; + if (SSA_NAME_IS_DEFAULT_DEF (t)) + return SSA_NAME_VAR (t); + else + return NULL_TREE; } - else if (TYPE_P (t)) - *walk_subtrees = 0; - - return NULL; + return t; } -#endif -/* Walk most expressions looking for a scalarizable aggregate. - If we find one, invoke FNS->USE. */ +/* Mark a dereference of BASE of distance DIST in a basic block tht STMT + belongs to, unless the BB has already been marked as a potentially + final. */ static void -sra_walk_expr (tree *expr_p, gimple_stmt_iterator *gsi, bool is_output, - const struct sra_walk_fns *fns) +mark_parm_dereference (tree base, HOST_WIDE_INT dist, gimple stmt) { - tree expr = *expr_p; - tree inner = expr; - bool disable_scalarization = false; - bool use_all_p = false; + basic_block bb = gimple_bb (stmt); + int idx, parm_index = 0; + tree parm; - /* We're looking to collect a reference expression between EXPR and INNER, - such that INNER is a scalarizable decl and all other nodes through EXPR - are references that we can scalarize. If we come across something that - we can't scalarize, we reset EXPR. This has the effect of making it - appear that we're referring to the larger expression as a whole. */ + if (bitmap_bit_p (final_bbs, bb->index)) + return; - while (1) - switch (TREE_CODE (inner)) - { - case VAR_DECL: - case PARM_DECL: - case RESULT_DECL: - /* If there is a scalarizable decl at the bottom, then process it. */ - if (is_sra_candidate_decl (inner)) - { - struct sra_elt *elt = maybe_lookup_element_for_expr (expr); - if (disable_scalarization) - elt->cannot_scalarize = true; - else - fns->use (elt, expr_p, gsi, is_output, use_all_p); - } - return; - - case ARRAY_REF: - /* Non-constant index means any member may be accessed. Prevent the - expression from being scalarized. If we were to treat this as a - reference to the whole array, we can wind up with a single dynamic - index reference inside a loop being overridden by several constant - index references during loop setup. It's possible that this could - be avoided by using dynamic usage counts based on BB trip counts - (based on loop analysis or profiling), but that hardly seems worth - the effort. */ - /* ??? Hack. Figure out how to push this into the scan routines - without duplicating too much code. */ - if (!in_array_bounds_p (inner)) - { - disable_scalarization = true; - goto use_all; - } - /* ??? Are we assured that non-constant bounds and stride will have - the same value everywhere? I don't think Fortran will... */ - if (TREE_OPERAND (inner, 2) || TREE_OPERAND (inner, 3)) - goto use_all; - inner = TREE_OPERAND (inner, 0); - break; - - case ARRAY_RANGE_REF: - if (!range_in_array_bounds_p (inner)) - { - disable_scalarization = true; - goto use_all; - } - /* ??? See above non-constant bounds and stride . */ - if (TREE_OPERAND (inner, 2) || TREE_OPERAND (inner, 3)) - goto use_all; - inner = TREE_OPERAND (inner, 0); - break; - - case COMPONENT_REF: - { - tree type = TREE_TYPE (TREE_OPERAND (inner, 0)); - /* Don't look through unions. */ - if (TREE_CODE (type) != RECORD_TYPE) - goto use_all; - /* Neither through variable-sized records. */ - if (TYPE_SIZE (type) == NULL_TREE - || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) - goto use_all; - inner = TREE_OPERAND (inner, 0); - } - break; - - case REALPART_EXPR: - case IMAGPART_EXPR: - inner = TREE_OPERAND (inner, 0); - break; - - case BIT_FIELD_REF: - /* A bit field reference to a specific vector is scalarized but for - ones for inputs need to be marked as used on the left hand size so - when we scalarize it, we can mark that variable as non renamable. */ - if (is_output - && TREE_CODE (TREE_TYPE (TREE_OPERAND (inner, 0))) == VECTOR_TYPE) - { - struct sra_elt *elt - = maybe_lookup_element_for_expr (TREE_OPERAND (inner, 0)); - if (elt) - elt->is_vector_lhs = true; - } + for (parm = DECL_ARGUMENTS (current_function_decl); + parm && parm != base; + parm = DECL_CHAIN (parm)) + parm_index++; - /* A bit field reference (access to *multiple* fields simultaneously) - is not currently scalarized. Consider this an access to the full - outer element, to which walk_tree will bring us next. */ - goto use_all; - - CASE_CONVERT: - /* Similarly, a nop explicitly wants to look at an object in a - type other than the one we've scalarized. */ - goto use_all; - - case VIEW_CONVERT_EXPR: - /* Likewise for a view conversion, but with an additional twist: - it can be on the LHS and, in this case, an access to the full - outer element would mean a killing def. So we need to punt - if we haven't already a full access to the current element, - because we cannot pretend to have a killing def if we only - have a partial access at some level. */ - if (is_output && !use_all_p && inner != expr) - disable_scalarization = true; - goto use_all; - - case WITH_SIZE_EXPR: - /* This is a transparent wrapper. The entire inner expression really - is being used. */ - goto use_all; - - use_all: - expr_p = &TREE_OPERAND (inner, 0); - inner = expr = *expr_p; - use_all_p = true; - break; + gcc_assert (parm_index < func_param_count); - default: -#ifdef ENABLE_CHECKING - /* Validate that we're not missing any references. */ - gcc_assert (!walk_tree (&inner, sra_find_candidate_decl, NULL, NULL)); -#endif - return; - } + idx = bb->index * func_param_count + parm_index; + if (bb_dereferences[idx] < dist) + bb_dereferences[idx] = dist; } -/* Walk the arguments of a GIMPLE_CALL looking for scalarizable aggregates. - If we find one, invoke FNS->USE. */ +/* Allocate an access structure for BASE, OFFSET and SIZE, clear it, fill in + the three fields. Also add it to the vector of accesses corresponding to + the base. Finally, return the new access. */ -static void -sra_walk_gimple_call (gimple stmt, gimple_stmt_iterator *gsi, - const struct sra_walk_fns *fns) +static struct access * +create_access_1 (tree base, HOST_WIDE_INT offset, HOST_WIDE_INT size) { - int i; - int nargs = gimple_call_num_args (stmt); - - for (i = 0; i < nargs; i++) - sra_walk_expr (gimple_call_arg_ptr (stmt, i), gsi, false, fns); + VEC (access_p, heap) *vec; + struct access *access; + void **slot; + + access = (struct access *) pool_alloc (access_pool); + memset (access, 0, sizeof (struct access)); + access->base = base; + access->offset = offset; + access->size = size; + + slot = pointer_map_contains (base_access_vec, base); + if (slot) + vec = (VEC (access_p, heap) *) *slot; + else + vec = VEC_alloc (access_p, heap, 32); - if (gimple_call_lhs (stmt)) - sra_walk_expr (gimple_call_lhs_ptr (stmt), gsi, true, fns); -} + VEC_safe_push (access_p, heap, vec, access); -/* Walk the inputs and outputs of a GIMPLE_ASM looking for scalarizable - aggregates. If we find one, invoke FNS->USE. */ + *((struct VEC (access_p,heap) **) + pointer_map_insert (base_access_vec, base)) = vec; -static void -sra_walk_gimple_asm (gimple stmt, gimple_stmt_iterator *gsi, - const struct sra_walk_fns *fns) -{ - size_t i; - for (i = 0; i < gimple_asm_ninputs (stmt); i++) - sra_walk_expr (&TREE_VALUE (gimple_asm_input_op (stmt, i)), gsi, false, fns); - for (i = 0; i < gimple_asm_noutputs (stmt); i++) - sra_walk_expr (&TREE_VALUE (gimple_asm_output_op (stmt, i)), gsi, true, fns); + return access; } -/* Walk a GIMPLE_ASSIGN and categorize the assignment appropriately. */ +/* Create and insert access for EXPR. Return created access, or NULL if it is + not possible. */ -static void -sra_walk_gimple_assign (gimple stmt, gimple_stmt_iterator *gsi, - const struct sra_walk_fns *fns) +static struct access * +create_access (tree expr, gimple stmt, bool write) { - struct sra_elt *lhs_elt = NULL, *rhs_elt = NULL; - tree lhs, rhs; + struct access *access; + HOST_WIDE_INT offset, size, max_size; + tree base = expr; + bool ptr, unscalarizable_region = false; - /* If there is more than 1 element on the RHS, only walk the lhs. */ - if (!gimple_assign_single_p (stmt)) + base = get_ref_base_and_extent (expr, &offset, &size, &max_size); + + if (sra_mode == SRA_MODE_EARLY_IPA + && TREE_CODE (base) == MEM_REF) { - sra_walk_expr (gimple_assign_lhs_ptr (stmt), gsi, true, fns); - return; + base = get_ssa_base_param (TREE_OPERAND (base, 0)); + if (!base) + return NULL; + ptr = true; } + else + ptr = false; - lhs = gimple_assign_lhs (stmt); - rhs = gimple_assign_rhs1 (stmt); - lhs_elt = maybe_lookup_element_for_expr (lhs); - rhs_elt = maybe_lookup_element_for_expr (rhs); + if (!DECL_P (base) || !bitmap_bit_p (candidate_bitmap, DECL_UID (base))) + return NULL; - /* If both sides are scalarizable, this is a COPY operation. */ - if (lhs_elt && rhs_elt) + if (sra_mode == SRA_MODE_EARLY_IPA) { - fns->copy (lhs_elt, rhs_elt, gsi); - return; - } + if (size < 0 || size != max_size) + { + disqualify_candidate (base, "Encountered a variable sized access."); + return NULL; + } + if (TREE_CODE (expr) == COMPONENT_REF + && DECL_BIT_FIELD (TREE_OPERAND (expr, 1))) + { + disqualify_candidate (base, "Encountered a bit-field access."); + return NULL; + } + gcc_checking_assert ((offset % BITS_PER_UNIT) == 0); - /* If the RHS is scalarizable, handle it. There are only two cases. */ - if (rhs_elt) - { - if (!rhs_elt->is_scalar && !TREE_SIDE_EFFECTS (lhs)) - fns->ldst (rhs_elt, lhs, gsi, false); - else - fns->use (rhs_elt, gimple_assign_rhs1_ptr (stmt), gsi, false, false); + if (ptr) + mark_parm_dereference (base, offset + size, stmt); } - - /* If it isn't scalarizable, there may be scalarizable variables within, so - check for a call or else walk the RHS to see if we need to do any - copy-in operations. We need to do it before the LHS is scalarized so - that the statements get inserted in the proper place, before any - copy-out operations. */ else - sra_walk_expr (gimple_assign_rhs1_ptr (stmt), gsi, false, fns); - - /* Likewise, handle the LHS being scalarizable. We have cases similar - to those above, but also want to handle RHS being constant. */ - if (lhs_elt) - { - /* If this is an assignment from a constant, or constructor, then - we have access to all of the elements individually. Invoke INIT. */ - if (TREE_CODE (rhs) == COMPLEX_EXPR - || TREE_CODE (rhs) == COMPLEX_CST - || TREE_CODE (rhs) == CONSTRUCTOR) - fns->init (lhs_elt, rhs, gsi); - - /* If this is an assignment from read-only memory, treat this as if - we'd been passed the constructor directly. Invoke INIT. */ - else if (TREE_CODE (rhs) == VAR_DECL - && TREE_STATIC (rhs) - && !DECL_EXTERNAL (rhs) - && TREE_READONLY (rhs) - && targetm.binds_local_p (rhs)) - fns->init (lhs_elt, DECL_INITIAL (rhs), gsi); - - /* If this is a copy from a non-scalarizable lvalue, invoke LDST. - The lvalue requirement prevents us from trying to directly scalarize - the result of a function call. Which would result in trying to call - the function multiple times, and other evil things. */ - else if (!lhs_elt->is_scalar - && !TREE_SIDE_EFFECTS (rhs) && is_gimple_addressable (rhs)) - fns->ldst (lhs_elt, rhs, gsi, true); - - /* Otherwise we're being used in some context that requires the - aggregate to be seen as a whole. Invoke USE. */ - else - fns->use (lhs_elt, gimple_assign_lhs_ptr (stmt), gsi, true, false); + { + if (size != max_size) + { + size = max_size; + unscalarizable_region = true; + } + if (size < 0) + { + disqualify_candidate (base, "Encountered an unconstrained access."); + return NULL; + } } - /* Similarly to above, LHS_ELT being null only means that the LHS as a - whole is not a scalarizable reference. There may be occurrences of - scalarizable variables within, which implies a USE. */ - else - sra_walk_expr (gimple_assign_lhs_ptr (stmt), gsi, true, fns); -} - -/* Entry point to the walk functions. Search the entire function, - invoking the callbacks in FNS on each of the references to - scalarizable variables. */ - -static void -sra_walk_function (const struct sra_walk_fns *fns) -{ - basic_block bb; - gimple_stmt_iterator si, ni; - - /* ??? Phase 4 could derive some benefit to walking the function in - dominator tree order. */ + access = create_access_1 (base, offset, size); + access->expr = expr; + access->type = TREE_TYPE (expr); + access->write = write; + access->grp_unscalarizable_region = unscalarizable_region; + access->stmt = stmt; - FOR_EACH_BB (bb) - for (si = gsi_start_bb (bb); !gsi_end_p (si); si = ni) - { - gimple stmt; - - stmt = gsi_stmt (si); - - ni = si; - gsi_next (&ni); + if (TREE_CODE (expr) == COMPONENT_REF + && DECL_NONADDRESSABLE_P (TREE_OPERAND (expr, 1))) + access->non_addressable = 1; - /* If the statement does not reference memory, then it doesn't - make any structure references that we care about. */ - if (!gimple_references_memory_p (stmt)) - continue; - - switch (gimple_code (stmt)) - { - case GIMPLE_RETURN: - /* If we have "return " then the return value is - already exposed for our pleasure. Walk it as a USE to - force all the components back in place for the return. - */ - if (gimple_return_retval (stmt) == NULL_TREE) - ; - else - sra_walk_expr (gimple_return_retval_ptr (stmt), &si, false, - fns); - break; + return access; +} - case GIMPLE_ASSIGN: - sra_walk_gimple_assign (stmt, &si, fns); - break; - case GIMPLE_CALL: - sra_walk_gimple_call (stmt, &si, fns); - break; - case GIMPLE_ASM: - sra_walk_gimple_asm (stmt, &si, fns); - break; - default: - break; - } - } -} - -/* Phase One: Scan all referenced variables in the program looking for - structures that could be decomposed. */ +/* Return true iff TYPE is a RECORD_TYPE with fields that are either of gimple + register types or (recursively) records with only these two kinds of fields. + It also returns false if any of these records contains a bit-field. */ static bool -find_candidates_for_sra (void) +type_consists_of_records_p (tree type) { - bool any_set = false; - tree var; - referenced_var_iterator rvi; + tree fld; - FOR_EACH_REFERENCED_VAR (var, rvi) - { - if (decl_can_be_decomposed_p (var)) - { - bitmap_set_bit (sra_candidates, DECL_UID (var)); - any_set = true; - } - } + if (TREE_CODE (type) != RECORD_TYPE) + return false; - return any_set; -} + for (fld = TYPE_FIELDS (type); fld; fld = DECL_CHAIN (fld)) + if (TREE_CODE (fld) == FIELD_DECL) + { + tree ft = TREE_TYPE (fld); - -/* Phase Two: Scan all references to scalarizable variables. Count the - number of times they are used or copied respectively. */ + if (DECL_BIT_FIELD (fld)) + return false; -/* Callbacks to fill in SRA_WALK_FNS. Everything but USE is - considered a copy, because we can decompose the reference such that - the sub-elements needn't be contiguous. */ + if (!is_gimple_reg_type (ft) + && !type_consists_of_records_p (ft)) + return false; + } -static void -scan_use (struct sra_elt *elt, tree *expr_p ATTRIBUTE_UNUSED, - gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED, - bool is_output ATTRIBUTE_UNUSED, bool use_all ATTRIBUTE_UNUSED) -{ - elt->n_uses += 1; + return true; } -static void -scan_copy (struct sra_elt *lhs_elt, struct sra_elt *rhs_elt, - gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED) -{ - lhs_elt->n_copies += 1; - rhs_elt->n_copies += 1; -} +/* Create total_scalarization accesses for all scalar type fields in DECL that + must be of a RECORD_TYPE conforming to type_consists_of_records_p. BASE + must be the top-most VAR_DECL representing the variable, OFFSET must be the + offset of DECL within BASE. REF must be the memory reference expression for + the given decl. */ static void -scan_init (struct sra_elt *lhs_elt, tree rhs ATTRIBUTE_UNUSED, - gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED) +completely_scalarize_record (tree base, tree decl, HOST_WIDE_INT offset, + tree ref) { - lhs_elt->n_copies += 1; -} + tree fld, decl_type = TREE_TYPE (decl); -static void -scan_ldst (struct sra_elt *elt, tree other ATTRIBUTE_UNUSED, - gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED, - bool is_output ATTRIBUTE_UNUSED) -{ - elt->n_copies += 1; + for (fld = TYPE_FIELDS (decl_type); fld; fld = DECL_CHAIN (fld)) + if (TREE_CODE (fld) == FIELD_DECL) + { + HOST_WIDE_INT pos = offset + int_bit_position (fld); + tree ft = TREE_TYPE (fld); + tree nref = build3 (COMPONENT_REF, TREE_TYPE (fld), ref, fld, + NULL_TREE); + + if (is_gimple_reg_type (ft)) + { + struct access *access; + HOST_WIDE_INT size; + + size = tree_low_cst (DECL_SIZE (fld), 1); + access = create_access_1 (base, pos, size); + access->expr = nref; + access->type = ft; + access->grp_total_scalarization = 1; + /* Accesses for intraprocedural SRA can have their stmt NULL. */ + } + else + completely_scalarize_record (base, fld, pos, nref); + } } -/* Dump the values we collected during the scanning phase. */ +/* Create total_scalarization accesses for all scalar type fields in VAR and + for VAR a a whole. VAR must be of a RECORD_TYPE conforming to + type_consists_of_records_p. */ static void -scan_dump (struct sra_elt *elt) +completely_scalarize_var (tree var) { - struct sra_elt *c; + HOST_WIDE_INT size = tree_low_cst (DECL_SIZE (var), 1); + struct access *access; - dump_sra_elt_name (dump_file, elt); - fprintf (dump_file, ": n_uses=%u n_copies=%u\n", elt->n_uses, elt->n_copies); + access = create_access_1 (var, 0, size); + access->expr = var; + access->type = TREE_TYPE (var); + access->grp_total_scalarization = 1; - for (c = elt->children; c ; c = c->sibling) - scan_dump (c); - - for (c = elt->groups; c ; c = c->sibling) - scan_dump (c); + completely_scalarize_record (var, var, 0, var); } -/* Entry point to phase 2. Scan the entire function, building up - scalarization data structures, recording copies and uses. */ +/* Search the given tree for a declaration by skipping handled components and + exclude it from the candidates. */ static void -scan_function (void) +disqualify_base_of_expr (tree t, const char *reason) { - static const struct sra_walk_fns fns = { - scan_use, scan_copy, scan_init, scan_ldst, true - }; - bitmap_iterator bi; - - sra_walk_function (&fns); + t = get_base_address (t); + if (sra_mode == SRA_MODE_EARLY_IPA + && TREE_CODE (t) == MEM_REF) + t = get_ssa_base_param (TREE_OPERAND (t, 0)); - if (dump_file && (dump_flags & TDF_DETAILS)) - { - unsigned i; - - fputs ("\nScan results:\n", dump_file); - EXECUTE_IF_SET_IN_BITMAP (sra_candidates, 0, i, bi) - { - tree var = referenced_var (i); - struct sra_elt *elt = lookup_element (NULL, var, NULL, NO_INSERT); - if (elt) - scan_dump (elt); - } - fputc ('\n', dump_file); - } + if (t && DECL_P (t)) + disqualify_candidate (t, reason); } - -/* Phase Three: Make decisions about which variables to scalarize, if any. - All elements to be scalarized have replacement variables made for them. */ -/* A subroutine of build_element_name. Recursively build the element - name on the obstack. */ +/* Scan expression EXPR and create access structures for all accesses to + candidates for scalarization. Return the created access or NULL if none is + created. */ -static void -build_element_name_1 (struct sra_elt *elt) +static struct access * +build_access_from_expr_1 (tree expr, gimple stmt, bool write) { - tree t; - char buffer[32]; + struct access *ret = NULL; + bool partial_ref; - if (elt->parent) + if (TREE_CODE (expr) == BIT_FIELD_REF + || TREE_CODE (expr) == IMAGPART_EXPR + || TREE_CODE (expr) == REALPART_EXPR) { - build_element_name_1 (elt->parent); - obstack_1grow (&sra_obstack, '$'); - - if (TREE_CODE (elt->parent->type) == COMPLEX_TYPE) - { - if (elt->element == integer_zero_node) - obstack_grow (&sra_obstack, "real", 4); - else - obstack_grow (&sra_obstack, "imag", 4); - return; - } + expr = TREE_OPERAND (expr, 0); + partial_ref = true; } + else + partial_ref = false; - t = elt->element; - if (TREE_CODE (t) == INTEGER_CST) - { - /* ??? Eh. Don't bother doing double-wide printing. */ - sprintf (buffer, HOST_WIDE_INT_PRINT_DEC, TREE_INT_CST_LOW (t)); - obstack_grow (&sra_obstack, buffer, strlen (buffer)); - } - else if (TREE_CODE (t) == BIT_FIELD_REF) + /* We need to dive through V_C_Es in order to get the size of its parameter + and not the result type. Ada produces such statements. We are also + capable of handling the topmost V_C_E but not any of those buried in other + handled components. */ + if (TREE_CODE (expr) == VIEW_CONVERT_EXPR) + expr = TREE_OPERAND (expr, 0); + + if (contains_view_convert_expr_p (expr)) { - sprintf (buffer, "B" HOST_WIDE_INT_PRINT_DEC, - tree_low_cst (TREE_OPERAND (t, 2), 1)); - obstack_grow (&sra_obstack, buffer, strlen (buffer)); - sprintf (buffer, "F" HOST_WIDE_INT_PRINT_DEC, - tree_low_cst (TREE_OPERAND (t, 1), 1)); - obstack_grow (&sra_obstack, buffer, strlen (buffer)); + disqualify_base_of_expr (expr, "V_C_E under a different handled " + "component."); + return NULL; } - else + + switch (TREE_CODE (expr)) { - tree name = DECL_NAME (t); - if (name) - obstack_grow (&sra_obstack, IDENTIFIER_POINTER (name), - IDENTIFIER_LENGTH (name)); - else - { - sprintf (buffer, "D%u", DECL_UID (t)); - obstack_grow (&sra_obstack, buffer, strlen (buffer)); - } + case MEM_REF: + if (TREE_CODE (TREE_OPERAND (expr, 0)) != ADDR_EXPR + && sra_mode != SRA_MODE_EARLY_IPA) + return NULL; + /* fall through */ + case VAR_DECL: + case PARM_DECL: + case RESULT_DECL: + case COMPONENT_REF: + case ARRAY_REF: + case ARRAY_RANGE_REF: + ret = create_access (expr, stmt, write); + break; + + default: + break; } -} -/* Construct a pretty variable name for an element's replacement variable. - The name is built on the obstack. */ + if (write && partial_ref && ret) + ret->grp_partial_lhs = 1; -static char * -build_element_name (struct sra_elt *elt) -{ - build_element_name_1 (elt); - obstack_1grow (&sra_obstack, '\0'); - return XOBFINISH (&sra_obstack, char *); + return ret; } -/* Instantiate an element as an independent variable. */ +/* Scan expression EXPR and create access structures for all accesses to + candidates for scalarization. Return true if any access has been inserted. + STMT must be the statement from which the expression is taken, WRITE must be + true if the expression is a store and false otherwise. */ -static void -instantiate_element (struct sra_elt *elt) +static bool +build_access_from_expr (tree expr, gimple stmt, bool write) { - struct sra_elt *base_elt; - tree var, base; - bool nowarn = TREE_NO_WARNING (elt->element); - - for (base_elt = elt; base_elt->parent; base_elt = base_elt->parent) - if (!nowarn) - nowarn = TREE_NO_WARNING (base_elt->parent->element); - base = base_elt->element; - - elt->replacement = var = make_rename_temp (elt->type, "SR"); + struct access *access; - if (DECL_P (elt->element) - && !tree_int_cst_equal (DECL_SIZE (var), DECL_SIZE (elt->element))) + access = build_access_from_expr_1 (expr, stmt, write); + if (access) { - DECL_SIZE (var) = DECL_SIZE (elt->element); - DECL_SIZE_UNIT (var) = DECL_SIZE_UNIT (elt->element); + /* This means the aggregate is accesses as a whole in a way other than an + assign statement and thus cannot be removed even if we had a scalar + replacement for everything. */ + if (cannot_scalarize_away_bitmap) + bitmap_set_bit (cannot_scalarize_away_bitmap, DECL_UID (access->base)); + return true; + } + return false; +} - elt->in_bitfld_block = 1; - elt->replacement = fold_build3 (BIT_FIELD_REF, elt->type, var, - DECL_SIZE (var), - BYTES_BIG_ENDIAN - ? size_binop (MINUS_EXPR, - TYPE_SIZE (elt->type), - DECL_SIZE (var)) - : bitsize_int (0)); +/* Disqualify LHS and RHS for scalarization if STMT must end its basic block in + modes in which it matters, return true iff they have been disqualified. RHS + may be NULL, in that case ignore it. If we scalarize an aggregate in + intra-SRA we may need to add statements after each statement. This is not + possible if a statement unconditionally has to end the basic block. */ +static bool +disqualify_ops_if_throwing_stmt (gimple stmt, tree lhs, tree rhs) +{ + if ((sra_mode == SRA_MODE_EARLY_INTRA || sra_mode == SRA_MODE_INTRA) + && (stmt_can_throw_internal (stmt) || stmt_ends_bb_p (stmt))) + { + disqualify_base_of_expr (lhs, "LHS of a throwing stmt."); + if (rhs) + disqualify_base_of_expr (rhs, "RHS of a throwing stmt."); + return true; } + return false; +} - /* For vectors, if used on the left hand side with BIT_FIELD_REF, - they are not a gimple register. */ - if (TREE_CODE (TREE_TYPE (var)) == VECTOR_TYPE && elt->is_vector_lhs) - DECL_GIMPLE_REG_P (var) = 0; +/* Return true if EXP is a memory reference less aligned than ALIGN. This is + invoked only on strict-alignment targets. */ - DECL_SOURCE_LOCATION (var) = DECL_SOURCE_LOCATION (base); - DECL_ARTIFICIAL (var) = 1; +static bool +tree_non_aligned_mem_p (tree exp, unsigned int align) +{ + unsigned int exp_align; - if (TREE_THIS_VOLATILE (elt->type)) - { - TREE_THIS_VOLATILE (var) = 1; - TREE_SIDE_EFFECTS (var) = 1; - } + if (TREE_CODE (exp) == VIEW_CONVERT_EXPR) + exp = TREE_OPERAND (exp, 0); - if (DECL_NAME (base) && !DECL_IGNORED_P (base)) - { - char *pretty_name = build_element_name (elt); - DECL_NAME (var) = get_identifier (pretty_name); - obstack_free (&sra_obstack, pretty_name); + if (TREE_CODE (exp) == SSA_NAME || is_gimple_min_invariant (exp)) + return false; - SET_DECL_DEBUG_EXPR (var, generate_element_ref (elt)); - DECL_DEBUG_EXPR_IS_FROM (var) = 1; - - DECL_IGNORED_P (var) = 0; - TREE_NO_WARNING (var) = nowarn; - } + /* get_object_alignment will fall back to BITS_PER_UNIT if it cannot + compute an explicit alignment. Pretend that dereferenced pointers + are always aligned on strict-alignment targets. */ + if (TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == TARGET_MEM_REF) + exp_align = get_object_or_type_alignment (exp); else - { - DECL_IGNORED_P (var) = 1; - /* ??? We can't generate any warning that would be meaningful. */ - TREE_NO_WARNING (var) = 1; - } + exp_align = get_object_alignment (exp); - /* Zero-initialize bit-field scalarization variables, to avoid - triggering undefined behavior. */ - if (TREE_CODE (elt->element) == BIT_FIELD_REF - || (var != elt->replacement - && TREE_CODE (elt->replacement) == BIT_FIELD_REF)) - { - gimple_seq init = sra_build_assignment (var, - fold_convert (TREE_TYPE (var), - integer_zero_node) - ); - insert_edge_copies_seq (init, ENTRY_BLOCK_PTR); - mark_all_v_defs_seq (init); - } + if (exp_align < align) + return true; - if (dump_file) - { - fputs (" ", dump_file); - dump_sra_elt_name (dump_file, elt); - fputs (" -> ", dump_file); - print_generic_expr (dump_file, var, dump_flags); - fputc ('\n', dump_file); - } + return false; } -/* Make one pass across an element tree deciding whether or not it's - profitable to instantiate individual leaf scalars. +/* Scan expressions occuring in STMT, create access structures for all accesses + to candidates for scalarization and remove those candidates which occur in + statements or expressions that prevent them from being split apart. Return + true if any access has been inserted. */ - PARENT_USES and PARENT_COPIES are the sum of the N_USES and N_COPIES - fields all the way up the tree. */ - -static void -decide_instantiation_1 (struct sra_elt *elt, unsigned int parent_uses, - unsigned int parent_copies) +static bool +build_accesses_from_assign (gimple stmt) { - if (dump_file && !elt->parent) - { - fputs ("Initial instantiation for ", dump_file); - dump_sra_elt_name (dump_file, elt); - fputc ('\n', dump_file); - } + tree lhs, rhs; + struct access *lacc, *racc; - if (elt->cannot_scalarize) - return; + if (!gimple_assign_single_p (stmt) + /* Scope clobbers don't influence scalarization. */ + || gimple_clobber_p (stmt)) + return false; - if (elt->is_scalar) - { - /* The decision is simple: instantiate if we're used more frequently - than the parent needs to be seen as a complete unit. */ - if (elt->n_uses + elt->n_copies + parent_copies > parent_uses) - instantiate_element (elt); - } - else - { - struct sra_elt *c, *group; - unsigned int this_uses = elt->n_uses + parent_uses; - unsigned int this_copies = elt->n_copies + parent_copies; + lhs = gimple_assign_lhs (stmt); + rhs = gimple_assign_rhs1 (stmt); - /* Consider groups of sub-elements as weighing in favour of - instantiation whatever their size. */ - for (group = elt->groups; group ; group = group->sibling) - FOR_EACH_ACTUAL_CHILD (c, group) - { - c->n_uses += group->n_uses; - c->n_copies += group->n_copies; - } + if (disqualify_ops_if_throwing_stmt (stmt, lhs, rhs)) + return false; - for (c = elt->children; c ; c = c->sibling) - decide_instantiation_1 (c, this_uses, this_copies); - } -} + racc = build_access_from_expr_1 (rhs, stmt, false); + lacc = build_access_from_expr_1 (lhs, stmt, true); -/* Compute the size and number of all instantiated elements below ELT. - We will only care about this if the size of the complete structure - fits in a HOST_WIDE_INT, so we don't have to worry about overflow. */ + if (lacc) + { + lacc->grp_assignment_write = 1; + if (STRICT_ALIGNMENT + && tree_non_aligned_mem_p (rhs, get_object_alignment (lhs))) + lacc->grp_unscalarizable_region = 1; + } -static unsigned int -sum_instantiated_sizes (struct sra_elt *elt, unsigned HOST_WIDE_INT *sizep) -{ - if (elt->replacement) + if (racc) { - *sizep += TREE_INT_CST_LOW (TYPE_SIZE_UNIT (elt->type)); - return 1; + racc->grp_assignment_read = 1; + if (should_scalarize_away_bitmap && !gimple_has_volatile_ops (stmt) + && !is_gimple_reg_type (racc->type)) + bitmap_set_bit (should_scalarize_away_bitmap, DECL_UID (racc->base)); + if (STRICT_ALIGNMENT + && tree_non_aligned_mem_p (lhs, get_object_alignment (rhs))) + racc->grp_unscalarizable_region = 1; } - else + + if (lacc && racc + && (sra_mode == SRA_MODE_EARLY_INTRA || sra_mode == SRA_MODE_INTRA) + && !lacc->grp_unscalarizable_region + && !racc->grp_unscalarizable_region + && AGGREGATE_TYPE_P (TREE_TYPE (lhs)) + /* FIXME: Turn the following line into an assert after PR 40058 is + fixed. */ + && lacc->size == racc->size + && useless_type_conversion_p (lacc->type, racc->type)) { - struct sra_elt *c; - unsigned int count = 0; + struct assign_link *link; + + link = (struct assign_link *) pool_alloc (link_pool); + memset (link, 0, sizeof (struct assign_link)); - for (c = elt->children; c ; c = c->sibling) - count += sum_instantiated_sizes (c, sizep); + link->lacc = lacc; + link->racc = racc; - return count; + add_link_to_rhs (racc, link); } -} -/* Instantiate fields in ELT->TYPE that are not currently present as - children of ELT. */ + return lacc || racc; +} -static void instantiate_missing_elements (struct sra_elt *elt); +/* Callback of walk_stmt_load_store_addr_ops visit_addr used to determine + GIMPLE_ASM operands with memory constrains which cannot be scalarized. */ -static struct sra_elt * -instantiate_missing_elements_1 (struct sra_elt *elt, tree child, tree type) +static bool +asm_visit_addr (gimple stmt ATTRIBUTE_UNUSED, tree op, + void *data ATTRIBUTE_UNUSED) { - struct sra_elt *sub = lookup_element (elt, child, type, INSERT); - if (sub->is_scalar) - { - if (sub->replacement == NULL) - instantiate_element (sub); - } - else - instantiate_missing_elements (sub); - return sub; + op = get_base_address (op); + if (op + && DECL_P (op)) + disqualify_candidate (op, "Non-scalarizable GIMPLE_ASM operand."); + + return false; } -/* Obtain the canonical type for field F of ELEMENT. */ +/* Return true iff callsite CALL has at least as many actual arguments as there + are formal parameters of the function currently processed by IPA-SRA. */ -static tree -canon_type_for_field (tree f, tree element) +static inline bool +callsite_has_enough_arguments_p (gimple call) { - tree field_type = TREE_TYPE (f); - - /* canonicalize_component_ref() unwidens some bit-field types (not - marked as DECL_BIT_FIELD in C++), so we must do the same, lest we - may introduce type mismatches. */ - if (INTEGRAL_TYPE_P (field_type) - && DECL_MODE (f) != TYPE_MODE (field_type)) - field_type = TREE_TYPE (get_unwidened (build3 (COMPONENT_REF, - field_type, - element, - f, NULL_TREE), - NULL_TREE)); - - return field_type; + return gimple_call_num_args (call) >= (unsigned) func_param_count; } -/* Look for adjacent fields of ELT starting at F that we'd like to - scalarize as a single variable. Return the last field of the - group. */ +/* Scan function and look for interesting expressions and create access + structures for them. Return true iff any access is created. */ -static tree -try_instantiate_multiple_fields (struct sra_elt *elt, tree f) +static bool +scan_function (void) { - int count; - unsigned HOST_WIDE_INT align, bit, size, alchk; - enum machine_mode mode; - tree first = f, prev; - tree type, var; - struct sra_elt *block; + basic_block bb; + bool ret = false; - /* Point fields are typically best handled as standalone entities. */ - if (POINTER_TYPE_P (TREE_TYPE (f))) - return f; - - if (!is_sra_scalar_type (TREE_TYPE (f)) - || !host_integerp (DECL_FIELD_OFFSET (f), 1) - || !host_integerp (DECL_FIELD_BIT_OFFSET (f), 1) - || !host_integerp (DECL_SIZE (f), 1) - || lookup_element (elt, f, NULL, NO_INSERT)) - return f; + FOR_EACH_BB (bb) + { + gimple_stmt_iterator gsi; + for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) + { + gimple stmt = gsi_stmt (gsi); + tree t; + unsigned i; - block = elt; + if (final_bbs && stmt_can_throw_external (stmt)) + bitmap_set_bit (final_bbs, bb->index); + switch (gimple_code (stmt)) + { + case GIMPLE_RETURN: + t = gimple_return_retval (stmt); + if (t != NULL_TREE) + ret |= build_access_from_expr (t, stmt, false); + if (final_bbs) + bitmap_set_bit (final_bbs, bb->index); + break; - /* For complex and array objects, there are going to be integer - literals as child elements. In this case, we can't just take the - alignment and mode of the decl, so we instead rely on the element - type. + case GIMPLE_ASSIGN: + ret |= build_accesses_from_assign (stmt); + break; - ??? We could try to infer additional alignment from the full - object declaration and the location of the sub-elements we're - accessing. */ - for (count = 0; !DECL_P (block->element); count++) - block = block->parent; + case GIMPLE_CALL: + for (i = 0; i < gimple_call_num_args (stmt); i++) + ret |= build_access_from_expr (gimple_call_arg (stmt, i), + stmt, false); - align = DECL_ALIGN (block->element); - alchk = GET_MODE_BITSIZE (DECL_MODE (block->element)); + if (sra_mode == SRA_MODE_EARLY_IPA) + { + tree dest = gimple_call_fndecl (stmt); + int flags = gimple_call_flags (stmt); + + if (dest) + { + if (DECL_BUILT_IN_CLASS (dest) == BUILT_IN_NORMAL + && DECL_FUNCTION_CODE (dest) == BUILT_IN_APPLY_ARGS) + encountered_apply_args = true; + if (cgraph_get_node (dest) + == cgraph_get_node (current_function_decl)) + { + encountered_recursive_call = true; + if (!callsite_has_enough_arguments_p (stmt)) + encountered_unchangable_recursive_call = true; + } + } + + if (final_bbs + && (flags & (ECF_CONST | ECF_PURE)) == 0) + bitmap_set_bit (final_bbs, bb->index); + } - if (count) - { - type = TREE_TYPE (block->element); - while (count--) - type = TREE_TYPE (type); + t = gimple_call_lhs (stmt); + if (t && !disqualify_ops_if_throwing_stmt (stmt, t, NULL)) + ret |= build_access_from_expr (t, stmt, true); + break; - align = TYPE_ALIGN (type); - alchk = GET_MODE_BITSIZE (TYPE_MODE (type)); - } + case GIMPLE_ASM: + walk_stmt_load_store_addr_ops (stmt, NULL, NULL, NULL, + asm_visit_addr); + if (final_bbs) + bitmap_set_bit (final_bbs, bb->index); - if (align < alchk) - align = alchk; + for (i = 0; i < gimple_asm_ninputs (stmt); i++) + { + t = TREE_VALUE (gimple_asm_input_op (stmt, i)); + ret |= build_access_from_expr (t, stmt, false); + } + for (i = 0; i < gimple_asm_noutputs (stmt); i++) + { + t = TREE_VALUE (gimple_asm_output_op (stmt, i)); + ret |= build_access_from_expr (t, stmt, true); + } + break; - /* Coalescing wider fields is probably pointless and - inefficient. */ - if (align > BITS_PER_WORD) - align = BITS_PER_WORD; + default: + break; + } + } + } - bit = tree_low_cst (DECL_FIELD_OFFSET (f), 1) * BITS_PER_UNIT - + tree_low_cst (DECL_FIELD_BIT_OFFSET (f), 1); - size = tree_low_cst (DECL_SIZE (f), 1); + return ret; +} - alchk = align - 1; - alchk = ~alchk; +/* Helper of QSORT function. There are pointers to accesses in the array. An + access is considered smaller than another if it has smaller offset or if the + offsets are the same but is size is bigger. */ - if ((bit & alchk) != ((bit + size - 1) & alchk)) - return f; +static int +compare_access_positions (const void *a, const void *b) +{ + const access_p *fp1 = (const access_p *) a; + const access_p *fp2 = (const access_p *) b; + const access_p f1 = *fp1; + const access_p f2 = *fp2; - /* Find adjacent fields in the same alignment word. */ + if (f1->offset != f2->offset) + return f1->offset < f2->offset ? -1 : 1; - for (prev = f, f = TREE_CHAIN (f); - f && TREE_CODE (f) == FIELD_DECL - && is_sra_scalar_type (TREE_TYPE (f)) - && host_integerp (DECL_FIELD_OFFSET (f), 1) - && host_integerp (DECL_FIELD_BIT_OFFSET (f), 1) - && host_integerp (DECL_SIZE (f), 1) - && !lookup_element (elt, f, NULL, NO_INSERT); - prev = f, f = TREE_CHAIN (f)) + if (f1->size == f2->size) { - unsigned HOST_WIDE_INT nbit, nsize; + if (f1->type == f2->type) + return 0; + /* Put any non-aggregate type before any aggregate type. */ + else if (!is_gimple_reg_type (f1->type) + && is_gimple_reg_type (f2->type)) + return 1; + else if (is_gimple_reg_type (f1->type) + && !is_gimple_reg_type (f2->type)) + return -1; + /* Put any complex or vector type before any other scalar type. */ + else if (TREE_CODE (f1->type) != COMPLEX_TYPE + && TREE_CODE (f1->type) != VECTOR_TYPE + && (TREE_CODE (f2->type) == COMPLEX_TYPE + || TREE_CODE (f2->type) == VECTOR_TYPE)) + return 1; + else if ((TREE_CODE (f1->type) == COMPLEX_TYPE + || TREE_CODE (f1->type) == VECTOR_TYPE) + && TREE_CODE (f2->type) != COMPLEX_TYPE + && TREE_CODE (f2->type) != VECTOR_TYPE) + return -1; + /* Put the integral type with the bigger precision first. */ + else if (INTEGRAL_TYPE_P (f1->type) + && INTEGRAL_TYPE_P (f2->type)) + return TYPE_PRECISION (f2->type) - TYPE_PRECISION (f1->type); + /* Put any integral type with non-full precision last. */ + else if (INTEGRAL_TYPE_P (f1->type) + && (TREE_INT_CST_LOW (TYPE_SIZE (f1->type)) + != TYPE_PRECISION (f1->type))) + return 1; + else if (INTEGRAL_TYPE_P (f2->type) + && (TREE_INT_CST_LOW (TYPE_SIZE (f2->type)) + != TYPE_PRECISION (f2->type))) + return -1; + /* Stabilize the sort. */ + return TYPE_UID (f1->type) - TYPE_UID (f2->type); + } - nbit = tree_low_cst (DECL_FIELD_OFFSET (f), 1) * BITS_PER_UNIT - + tree_low_cst (DECL_FIELD_BIT_OFFSET (f), 1); - nsize = tree_low_cst (DECL_SIZE (f), 1); + /* We want the bigger accesses first, thus the opposite operator in the next + line: */ + return f1->size > f2->size ? -1 : 1; +} - if (bit + size == nbit) - { - if ((bit & alchk) != ((nbit + nsize - 1) & alchk)) - { - /* If we're at an alignment boundary, don't bother - growing alignment such that we can include this next - field. */ - if ((nbit & alchk) - || GET_MODE_BITSIZE (DECL_MODE (f)) <= align) - break; - align = GET_MODE_BITSIZE (DECL_MODE (f)); - alchk = align - 1; - alchk = ~alchk; +/* Append a name of the declaration to the name obstack. A helper function for + make_fancy_name. */ - if ((bit & alchk) != ((nbit + nsize - 1) & alchk)) - break; - } - size += nsize; - } - else if (nbit + nsize == bit) - { - if ((nbit & alchk) != ((bit + size - 1) & alchk)) - { - if ((bit & alchk) - || GET_MODE_BITSIZE (DECL_MODE (f)) <= align) - break; - - align = GET_MODE_BITSIZE (DECL_MODE (f)); - alchk = align - 1; - alchk = ~alchk; +static void +make_fancy_decl_name (tree decl) +{ + char buffer[32]; - if ((nbit & alchk) != ((bit + size - 1) & alchk)) - break; - } - bit = nbit; - size += nsize; - } - else - break; + tree name = DECL_NAME (decl); + if (name) + obstack_grow (&name_obstack, IDENTIFIER_POINTER (name), + IDENTIFIER_LENGTH (name)); + else + { + sprintf (buffer, "D%u", DECL_UID (decl)); + obstack_grow (&name_obstack, buffer, strlen (buffer)); } +} - f = prev; - - if (f == first) - return f; +/* Helper for make_fancy_name. */ - gcc_assert ((bit & alchk) == ((bit + size - 1) & alchk)); +static void +make_fancy_name_1 (tree expr) +{ + char buffer[32]; + tree index; - /* Try to widen the bit range so as to cover padding bits as well. */ + if (DECL_P (expr)) + { + make_fancy_decl_name (expr); + return; + } - if ((bit & ~alchk) || size != align) + switch (TREE_CODE (expr)) { - unsigned HOST_WIDE_INT mbit = bit & alchk; - unsigned HOST_WIDE_INT msize = align; + case COMPONENT_REF: + make_fancy_name_1 (TREE_OPERAND (expr, 0)); + obstack_1grow (&name_obstack, '$'); + make_fancy_decl_name (TREE_OPERAND (expr, 1)); + break; - for (f = TYPE_FIELDS (elt->type); - f; f = TREE_CHAIN (f)) - { - unsigned HOST_WIDE_INT fbit, fsize; + case ARRAY_REF: + make_fancy_name_1 (TREE_OPERAND (expr, 0)); + obstack_1grow (&name_obstack, '$'); + /* Arrays with only one element may not have a constant as their + index. */ + index = TREE_OPERAND (expr, 1); + if (TREE_CODE (index) != INTEGER_CST) + break; + sprintf (buffer, HOST_WIDE_INT_PRINT_DEC, TREE_INT_CST_LOW (index)); + obstack_grow (&name_obstack, buffer, strlen (buffer)); + break; - /* Skip the fields from first to prev. */ - if (f == first) - { - f = prev; - continue; - } + case ADDR_EXPR: + make_fancy_name_1 (TREE_OPERAND (expr, 0)); + break; - if (!(TREE_CODE (f) == FIELD_DECL - && host_integerp (DECL_FIELD_OFFSET (f), 1) - && host_integerp (DECL_FIELD_BIT_OFFSET (f), 1))) - continue; + case MEM_REF: + make_fancy_name_1 (TREE_OPERAND (expr, 0)); + if (!integer_zerop (TREE_OPERAND (expr, 1))) + { + obstack_1grow (&name_obstack, '$'); + sprintf (buffer, HOST_WIDE_INT_PRINT_DEC, + TREE_INT_CST_LOW (TREE_OPERAND (expr, 1))); + obstack_grow (&name_obstack, buffer, strlen (buffer)); + } + break; - fbit = tree_low_cst (DECL_FIELD_OFFSET (f), 1) * BITS_PER_UNIT - + tree_low_cst (DECL_FIELD_BIT_OFFSET (f), 1); + case BIT_FIELD_REF: + case REALPART_EXPR: + case IMAGPART_EXPR: + gcc_unreachable (); /* we treat these as scalars. */ + break; + default: + break; + } +} - /* If we're past the selected word, we're fine. */ - if ((bit & alchk) < (fbit & alchk)) - continue; +/* Create a human readable name for replacement variable of ACCESS. */ - if (host_integerp (DECL_SIZE (f), 1)) - fsize = tree_low_cst (DECL_SIZE (f), 1); - else - /* Assume a variable-sized field takes up all space till - the end of the word. ??? Endianness issues? */ - fsize = align - (fbit & alchk); +static char * +make_fancy_name (tree expr) +{ + make_fancy_name_1 (expr); + obstack_1grow (&name_obstack, '\0'); + return XOBFINISH (&name_obstack, char *); +} - if ((fbit & alchk) < (bit & alchk)) - { - /* A large field might start at a previous word and - extend into the selected word. Exclude those - bits. ??? Endianness issues? */ - HOST_WIDE_INT diff = fbit + fsize - mbit; +/* Construct a MEM_REF that would reference a part of aggregate BASE of type + EXP_TYPE at the given OFFSET. If BASE is something for which + get_addr_base_and_unit_offset returns NULL, gsi must be non-NULL and is used + to insert new statements either before or below the current one as specified + by INSERT_AFTER. This function is not capable of handling bitfields. */ - if (diff <= 0) - continue; +tree +build_ref_for_offset (location_t loc, tree base, HOST_WIDE_INT offset, + tree exp_type, gimple_stmt_iterator *gsi, + bool insert_after) +{ + tree prev_base = base; + tree off; + HOST_WIDE_INT base_offset; - mbit += diff; - msize -= diff; - } - else - { - /* Non-overlapping, great. */ - if (fbit + fsize <= mbit - || mbit + msize <= fbit) - continue; + gcc_checking_assert (offset % BITS_PER_UNIT == 0); - if (fbit <= mbit) - { - unsigned HOST_WIDE_INT diff = fbit + fsize - mbit; - mbit += diff; - msize -= diff; - } - else if (fbit > mbit) - msize -= (mbit + msize - fbit); - else - gcc_unreachable (); - } - } + base = get_addr_base_and_unit_offset (base, &base_offset); + + /* get_addr_base_and_unit_offset returns NULL for references with a variable + offset such as array[var_index]. */ + if (!base) + { + gimple stmt; + tree tmp, addr; + + gcc_checking_assert (gsi); + tmp = create_tmp_reg (build_pointer_type (TREE_TYPE (prev_base)), NULL); + add_referenced_var (tmp); + tmp = make_ssa_name (tmp, NULL); + addr = build_fold_addr_expr (unshare_expr (prev_base)); + STRIP_USELESS_TYPE_CONVERSION (addr); + stmt = gimple_build_assign (tmp, addr); + gimple_set_location (stmt, loc); + SSA_NAME_DEF_STMT (tmp) = stmt; + if (insert_after) + gsi_insert_after (gsi, stmt, GSI_NEW_STMT); + else + gsi_insert_before (gsi, stmt, GSI_SAME_STMT); + update_stmt (stmt); - bit = mbit; - size = msize; + off = build_int_cst (reference_alias_ptr_type (prev_base), + offset / BITS_PER_UNIT); + base = tmp; + } + else if (TREE_CODE (base) == MEM_REF) + { + off = build_int_cst (TREE_TYPE (TREE_OPERAND (base, 1)), + base_offset + offset / BITS_PER_UNIT); + off = int_const_binop (PLUS_EXPR, TREE_OPERAND (base, 1), off); + base = unshare_expr (TREE_OPERAND (base, 0)); + } + else + { + off = build_int_cst (reference_alias_ptr_type (base), + base_offset + offset / BITS_PER_UNIT); + base = build_fold_addr_expr (unshare_expr (base)); } - /* Now we know the bit range we're interested in. Find the smallest - machine mode we can use to access it. */ + return fold_build2_loc (loc, MEM_REF, exp_type, base, off); +} - for (mode = smallest_mode_for_size (size, MODE_INT); - ; - mode = GET_MODE_WIDER_MODE (mode)) - { - gcc_assert (mode != VOIDmode); +DEF_VEC_ALLOC_P_STACK (tree); +#define VEC_tree_stack_alloc(alloc) VEC_stack_alloc (tree, alloc) - alchk = GET_MODE_PRECISION (mode) - 1; - alchk = ~alchk; +/* Construct a memory reference to a part of an aggregate BASE at the given + OFFSET and of the type of MODEL. In case this is a chain of references + to component, the function will replicate the chain of COMPONENT_REFs of + the expression of MODEL to access it. GSI and INSERT_AFTER have the same + meaning as in build_ref_for_offset. */ - if ((bit & alchk) == ((bit + size - 1) & alchk)) - break; - } +static tree +build_ref_for_model (location_t loc, tree base, HOST_WIDE_INT offset, + struct access *model, gimple_stmt_iterator *gsi, + bool insert_after) +{ + tree type = model->type, t; + VEC(tree,stack) *cr_stack = NULL; - gcc_assert (~alchk < align); + if (TREE_CODE (model->expr) == COMPONENT_REF) + { + tree expr = model->expr; + + /* Create a stack of the COMPONENT_REFs so later we can walk them in + order from inner to outer. */ + cr_stack = VEC_alloc (tree, stack, 6); - /* Create the field group as a single variable. */ + do { + tree field = TREE_OPERAND (expr, 1); + tree cr_offset = component_ref_field_offset (expr); + gcc_assert (cr_offset && host_integerp (cr_offset, 1)); - /* We used to create a type for the mode above, but size turns - to be out not of mode-size. As we need a matching type - to build a BIT_FIELD_REF, use a nonstandard integer type as - fallback. */ - type = lang_hooks.types.type_for_size (size, 1); - if (!type || TYPE_PRECISION (type) != size) - type = build_nonstandard_integer_type (size, 1); - gcc_assert (type); - var = build3 (BIT_FIELD_REF, type, NULL_TREE, - bitsize_int (size), bitsize_int (bit)); + offset -= TREE_INT_CST_LOW (cr_offset) * BITS_PER_UNIT; + offset -= TREE_INT_CST_LOW (DECL_FIELD_BIT_OFFSET (field)); - block = instantiate_missing_elements_1 (elt, var, type); - gcc_assert (block && block->is_scalar); + VEC_safe_push (tree, stack, cr_stack, expr); - var = block->replacement; - block->in_bitfld_block = 2; + expr = TREE_OPERAND (expr, 0); + type = TREE_TYPE (expr); + } while (TREE_CODE (expr) == COMPONENT_REF); + } - /* Add the member fields to the group, such that they access - portions of the group variable. */ + t = build_ref_for_offset (loc, base, offset, type, gsi, insert_after); - for (f = first; f != TREE_CHAIN (prev); f = TREE_CHAIN (f)) + if (TREE_CODE (model->expr) == COMPONENT_REF) { - tree field_type = canon_type_for_field (f, elt->element); - struct sra_elt *fld = lookup_element (block, f, field_type, INSERT); + unsigned i; + tree expr; - gcc_assert (fld && fld->is_scalar && !fld->replacement); + /* Now replicate the chain of COMPONENT_REFs from inner to outer. */ + FOR_EACH_VEC_ELT_REVERSE (tree, cr_stack, i, expr) + { + tree field = TREE_OPERAND (expr, 1); + t = fold_build3_loc (loc, COMPONENT_REF, TREE_TYPE (field), t, field, + TREE_OPERAND (expr, 2)); + } - fld->replacement = fold_build3 (BIT_FIELD_REF, field_type, var, - bitsize_int (TYPE_PRECISION (field_type)), - bitsize_int - ((TREE_INT_CST_LOW (DECL_FIELD_OFFSET (f)) - * BITS_PER_UNIT - + (TREE_INT_CST_LOW - (DECL_FIELD_BIT_OFFSET (f))) - - (TREE_INT_CST_LOW - (TREE_OPERAND (block->element, 2)))) - & ~alchk)); - fld->in_bitfld_block = 1; + VEC_free (tree, stack, cr_stack); } - return prev; + return t; } -static void -instantiate_missing_elements (struct sra_elt *elt) -{ - tree type = elt->type; +/* Construct a memory reference consisting of component_refs and array_refs to + a part of an aggregate *RES (which is of type TYPE). The requested part + should have type EXP_TYPE at be the given OFFSET. This function might not + succeed, it returns true when it does and only then *RES points to something + meaningful. This function should be used only to build expressions that we + might need to present to user (e.g. in warnings). In all other situations, + build_ref_for_model or build_ref_for_offset should be used instead. */ - switch (TREE_CODE (type)) +static bool +build_user_friendly_ref_for_offset (tree *res, tree type, HOST_WIDE_INT offset, + tree exp_type) +{ + while (1) { - case RECORD_TYPE: - { - tree f; - for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f)) - if (TREE_CODE (f) == FIELD_DECL) + tree fld; + tree tr_size, index, minidx; + HOST_WIDE_INT el_size; + + if (offset == 0 && exp_type + && types_compatible_p (exp_type, type)) + return true; + + switch (TREE_CODE (type)) + { + case UNION_TYPE: + case QUAL_UNION_TYPE: + case RECORD_TYPE: + for (fld = TYPE_FIELDS (type); fld; fld = DECL_CHAIN (fld)) { - tree last = try_instantiate_multiple_fields (elt, f); + HOST_WIDE_INT pos, size; + tree expr, *expr_ptr; + + if (TREE_CODE (fld) != FIELD_DECL) + continue; - if (last != f) + pos = int_bit_position (fld); + gcc_assert (TREE_CODE (type) == RECORD_TYPE || pos == 0); + tr_size = DECL_SIZE (fld); + if (!tr_size || !host_integerp (tr_size, 1)) + continue; + size = tree_low_cst (tr_size, 1); + if (size == 0) { - f = last; - continue; + if (pos != offset) + continue; } + else if (pos > offset || (pos + size) <= offset) + continue; - instantiate_missing_elements_1 (elt, f, - canon_type_for_field - (f, elt->element)); + expr = build3 (COMPONENT_REF, TREE_TYPE (fld), *res, fld, + NULL_TREE); + expr_ptr = &expr; + if (build_user_friendly_ref_for_offset (expr_ptr, TREE_TYPE (fld), + offset - pos, exp_type)) + { + *res = expr; + return true; + } } - break; - } - - case ARRAY_TYPE: - { - tree i, max, subtype; - - i = TYPE_MIN_VALUE (TYPE_DOMAIN (type)); - max = TYPE_MAX_VALUE (TYPE_DOMAIN (type)); - subtype = TREE_TYPE (type); - - while (1) - { - instantiate_missing_elements_1 (elt, i, subtype); - if (tree_int_cst_equal (i, max)) - break; - i = int_const_binop (PLUS_EXPR, i, integer_one_node, true); - } - - break; - } - - case COMPLEX_TYPE: - type = TREE_TYPE (type); - instantiate_missing_elements_1 (elt, integer_zero_node, type); - instantiate_missing_elements_1 (elt, integer_one_node, type); - break; + return false; - default: - gcc_unreachable (); + case ARRAY_TYPE: + tr_size = TYPE_SIZE (TREE_TYPE (type)); + if (!tr_size || !host_integerp (tr_size, 1)) + return false; + el_size = tree_low_cst (tr_size, 1); + + minidx = TYPE_MIN_VALUE (TYPE_DOMAIN (type)); + if (TREE_CODE (minidx) != INTEGER_CST || el_size == 0) + return false; + index = build_int_cst (TYPE_DOMAIN (type), offset / el_size); + if (!integer_zerop (minidx)) + index = int_const_binop (PLUS_EXPR, index, minidx); + *res = build4 (ARRAY_REF, TREE_TYPE (type), *res, index, + NULL_TREE, NULL_TREE); + offset = offset % el_size; + type = TREE_TYPE (type); + break; + + default: + if (offset != 0) + return false; + + if (exp_type) + return false; + else + return true; + } } } -/* Return true if there is only one non aggregate field in the record, TYPE. - Return false otherwise. */ +/* Return true iff TYPE is stdarg va_list type. */ -static bool -single_scalar_field_in_record_p (tree type) +static inline bool +is_va_list_type (tree type) { - int num_fields = 0; - tree field; - if (TREE_CODE (type) != RECORD_TYPE) - return false; - - for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) - if (TREE_CODE (field) == FIELD_DECL) - { - num_fields++; + return TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (va_list_type_node); +} - if (num_fields == 2) - return false; - - if (AGGREGATE_TYPE_P (TREE_TYPE (field))) - return false; - } +/* Print message to dump file why a variable was rejected. */ - return true; +static void +reject (tree var, const char *msg) +{ + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, "Rejected (%d): %s: ", DECL_UID (var), msg); + print_generic_expr (dump_file, var, 0); + fprintf (dump_file, "\n"); + } } -/* Make one pass across an element tree deciding whether to perform block - or element copies. If we decide on element copies, instantiate all - elements. Return true if there are any instantiated sub-elements. */ +/* The very first phase of intraprocedural SRA. It marks in candidate_bitmap + those with type which is suitable for scalarization. */ static bool -decide_block_copy (struct sra_elt *elt) +find_var_candidates (void) { - struct sra_elt *c; - bool any_inst; - - /* We shouldn't be invoked on groups of sub-elements as they must - behave like their parent as far as block copy is concerned. */ - gcc_assert (!elt->is_group); + tree var, type; + referenced_var_iterator rvi; + bool ret = false; + const char *msg; - /* If scalarization is disabled, respect it. */ - if (elt->cannot_scalarize) + FOR_EACH_REFERENCED_VAR (cfun, var, rvi) { - elt->use_block_copy = 1; + if (TREE_CODE (var) != VAR_DECL && TREE_CODE (var) != PARM_DECL) + continue; + type = TREE_TYPE (var); - if (dump_file) - { - fputs ("Scalarization disabled for ", dump_file); - dump_sra_elt_name (dump_file, elt); - fputc ('\n', dump_file); + if (!AGGREGATE_TYPE_P (type)) + { + reject (var, "not aggregate"); + continue; } - - /* Disable scalarization of sub-elements */ - for (c = elt->children; c; c = c->sibling) + if (needs_to_live_in_memory (var)) + { + reject (var, "needs to live in memory"); + continue; + } + if (TREE_THIS_VOLATILE (var)) + { + reject (var, "is volatile"); + continue; + } + if (!COMPLETE_TYPE_P (type)) + { + reject (var, "has incomplete type"); + continue; + } + if (!host_integerp (TYPE_SIZE (type), 1)) + { + reject (var, "type size not fixed"); + continue; + } + if (tree_low_cst (TYPE_SIZE (type), 1) == 0) + { + reject (var, "type size is zero"); + continue; + } + if (type_internals_preclude_sra_p (type, &msg)) { - c->cannot_scalarize = 1; - decide_block_copy (c); + reject (var, msg); + continue; } + if (/* Fix for PR 41089. tree-stdarg.c needs to have va_lists intact but + we also want to schedule it rather late. Thus we ignore it in + the early pass. */ + (sra_mode == SRA_MODE_EARLY_INTRA + && is_va_list_type (type))) + { + reject (var, "is va_list"); + continue; + } + + bitmap_set_bit (candidate_bitmap, DECL_UID (var)); - /* Groups behave like their parent. */ - for (c = elt->groups; c; c = c->sibling) + if (dump_file && (dump_flags & TDF_DETAILS)) { - c->cannot_scalarize = 1; - c->use_block_copy = 1; + fprintf (dump_file, "Candidate (%d): ", DECL_UID (var)); + print_generic_expr (dump_file, var, 0); + fprintf (dump_file, "\n"); } - - return false; + ret = true; } - /* Don't decide if we've no uses and no groups. */ - if (elt->n_uses == 0 && elt->n_copies == 0 && elt->groups == NULL) - ; - - else if (!elt->is_scalar) - { - tree size_tree = TYPE_SIZE_UNIT (elt->type); - bool use_block_copy = true; - - /* Tradeoffs for COMPLEX types pretty much always make it better - to go ahead and split the components. */ - if (TREE_CODE (elt->type) == COMPLEX_TYPE) - use_block_copy = false; + return ret; +} - /* Don't bother trying to figure out the rest if the structure is - so large we can't do easy arithmetic. This also forces block - copies for variable sized structures. */ - else if (host_integerp (size_tree, 1)) - { - unsigned HOST_WIDE_INT full_size, inst_size = 0; - unsigned int max_size, max_count, inst_count, full_count; - - /* If the sra-max-structure-size parameter is 0, then the - user has not overridden the parameter and we can choose a - sensible default. */ - max_size = SRA_MAX_STRUCTURE_SIZE - ? SRA_MAX_STRUCTURE_SIZE - : MOVE_RATIO (optimize_function_for_speed_p (cfun)) * UNITS_PER_WORD; - max_count = SRA_MAX_STRUCTURE_COUNT - ? SRA_MAX_STRUCTURE_COUNT - : MOVE_RATIO (optimize_function_for_speed_p (cfun)); - - full_size = tree_low_cst (size_tree, 1); - full_count = count_type_elements (elt->type, false); - inst_count = sum_instantiated_sizes (elt, &inst_size); - - /* If there is only one scalar field in the record, don't block copy. */ - if (single_scalar_field_in_record_p (elt->type)) - use_block_copy = false; - - /* ??? What to do here. If there are two fields, and we've only - instantiated one, then instantiating the other is clearly a win. - If there are a large number of fields then the size of the copy - is much more of a factor. */ - - /* If the structure is small, and we've made copies, go ahead - and instantiate, hoping that the copies will go away. */ - if (full_size <= max_size - && (full_count - inst_count) <= max_count - && elt->n_copies > elt->n_uses) - use_block_copy = false; - else if (inst_count * 100 >= full_count * SRA_FIELD_STRUCTURE_RATIO - && inst_size * 100 >= full_size * SRA_FIELD_STRUCTURE_RATIO) - use_block_copy = false; - - /* In order to avoid block copy, we have to be able to instantiate - all elements of the type. See if this is possible. */ - if (!use_block_copy - && (!can_completely_scalarize_p (elt) - || !type_can_instantiate_all_elements (elt->type))) - use_block_copy = true; - } +/* Sort all accesses for the given variable, check for partial overlaps and + return NULL if there are any. If there are none, pick a representative for + each combination of offset and size and create a linked list out of them. + Return the pointer to the first representative and make sure it is the first + one in the vector of accesses. */ - elt->use_block_copy = use_block_copy; +static struct access * +sort_and_splice_var_accesses (tree var) +{ + int i, j, access_count; + struct access *res, **prev_acc_ptr = &res; + VEC (access_p, heap) *access_vec; + bool first = true; + HOST_WIDE_INT low = -1, high = 0; + + access_vec = get_base_access_vector (var); + if (!access_vec) + return NULL; + access_count = VEC_length (access_p, access_vec); - /* Groups behave like their parent. */ - for (c = elt->groups; c; c = c->sibling) - c->use_block_copy = use_block_copy; + /* Sort by . */ + VEC_qsort (access_p, access_vec, compare_access_positions); - if (dump_file) + i = 0; + while (i < access_count) + { + struct access *access = VEC_index (access_p, access_vec, i); + bool grp_write = access->write; + bool grp_read = !access->write; + bool grp_scalar_write = access->write + && is_gimple_reg_type (access->type); + bool grp_scalar_read = !access->write + && is_gimple_reg_type (access->type); + bool grp_assignment_read = access->grp_assignment_read; + bool grp_assignment_write = access->grp_assignment_write; + bool multiple_scalar_reads = false; + bool total_scalarization = access->grp_total_scalarization; + bool grp_partial_lhs = access->grp_partial_lhs; + bool first_scalar = is_gimple_reg_type (access->type); + bool unscalarizable_region = access->grp_unscalarizable_region; + + if (first || access->offset >= high) { - fprintf (dump_file, "Using %s for ", - use_block_copy ? "block-copy" : "element-copy"); - dump_sra_elt_name (dump_file, elt); - fputc ('\n', dump_file); + first = false; + low = access->offset; + high = access->offset + access->size; } + else if (access->offset > low && access->offset + access->size > high) + return NULL; + else + gcc_assert (access->offset >= low + && access->offset + access->size <= high); - if (!use_block_copy) + j = i + 1; + while (j < access_count) { - instantiate_missing_elements (elt); - return true; + struct access *ac2 = VEC_index (access_p, access_vec, j); + if (ac2->offset != access->offset || ac2->size != access->size) + break; + if (ac2->write) + { + grp_write = true; + grp_scalar_write = (grp_scalar_write + || is_gimple_reg_type (ac2->type)); + } + else + { + grp_read = true; + if (is_gimple_reg_type (ac2->type)) + { + if (grp_scalar_read) + multiple_scalar_reads = true; + else + grp_scalar_read = true; + } + } + grp_assignment_read |= ac2->grp_assignment_read; + grp_assignment_write |= ac2->grp_assignment_write; + grp_partial_lhs |= ac2->grp_partial_lhs; + unscalarizable_region |= ac2->grp_unscalarizable_region; + total_scalarization |= ac2->grp_total_scalarization; + relink_to_new_repr (access, ac2); + + /* If there are both aggregate-type and scalar-type accesses with + this combination of size and offset, the comparison function + should have put the scalars first. */ + gcc_assert (first_scalar || !is_gimple_reg_type (ac2->type)); + ac2->group_representative = access; + j++; } - } - any_inst = elt->replacement != NULL; - - for (c = elt->children; c ; c = c->sibling) - any_inst |= decide_block_copy (c); + i = j; + + access->group_representative = access; + access->grp_write = grp_write; + access->grp_read = grp_read; + access->grp_scalar_read = grp_scalar_read; + access->grp_scalar_write = grp_scalar_write; + access->grp_assignment_read = grp_assignment_read; + access->grp_assignment_write = grp_assignment_write; + access->grp_hint = multiple_scalar_reads || total_scalarization; + access->grp_total_scalarization = total_scalarization; + access->grp_partial_lhs = grp_partial_lhs; + access->grp_unscalarizable_region = unscalarizable_region; + if (access->first_link) + add_access_to_work_queue (access); + + *prev_acc_ptr = access; + prev_acc_ptr = &access->next_grp; + } - return any_inst; + gcc_assert (res == VEC_index (access_p, access_vec, 0)); + return res; } -/* Entry point to phase 3. Instantiate scalar replacement variables. */ +/* Create a variable for the given ACCESS which determines the type, name and a + few other properties. Return the variable declaration and store it also to + ACCESS->replacement. */ -static void -decide_instantiations (void) +static tree +create_access_replacement (struct access *access, bool rename) { - unsigned int i; - bool cleared_any; - bitmap_head done_head; - bitmap_iterator bi; + tree repl; + + repl = create_tmp_var (access->type, "SR"); + add_referenced_var (repl); + if (rename) + mark_sym_for_renaming (repl); - /* We cannot clear bits from a bitmap we're iterating over, - so save up all the bits to clear until the end. */ - bitmap_initialize (&done_head, &bitmap_default_obstack); - cleared_any = false; + if (!access->grp_partial_lhs + && (TREE_CODE (access->type) == COMPLEX_TYPE + || TREE_CODE (access->type) == VECTOR_TYPE)) + DECL_GIMPLE_REG_P (repl) = 1; - EXECUTE_IF_SET_IN_BITMAP (sra_candidates, 0, i, bi) + DECL_SOURCE_LOCATION (repl) = DECL_SOURCE_LOCATION (access->base); + DECL_ARTIFICIAL (repl) = 1; + DECL_IGNORED_P (repl) = DECL_IGNORED_P (access->base); + + if (DECL_NAME (access->base) + && !DECL_IGNORED_P (access->base) + && !DECL_ARTIFICIAL (access->base)) { - tree var = referenced_var (i); - struct sra_elt *elt = lookup_element (NULL, var, NULL, NO_INSERT); - if (elt) - { - decide_instantiation_1 (elt, 0, 0); - if (!decide_block_copy (elt)) - elt = NULL; - } - if (!elt) - { - bitmap_set_bit (&done_head, i); - cleared_any = true; - } + char *pretty_name = make_fancy_name (access->expr); + tree debug_expr = unshare_expr (access->expr), d; + + DECL_NAME (repl) = get_identifier (pretty_name); + obstack_free (&name_obstack, pretty_name); + + /* Get rid of any SSA_NAMEs embedded in debug_expr, + as DECL_DEBUG_EXPR isn't considered when looking for still + used SSA_NAMEs and thus they could be freed. All debug info + generation cares is whether something is constant or variable + and that get_ref_base_and_extent works properly on the + expression. */ + for (d = debug_expr; handled_component_p (d); d = TREE_OPERAND (d, 0)) + switch (TREE_CODE (d)) + { + case ARRAY_REF: + case ARRAY_RANGE_REF: + if (TREE_OPERAND (d, 1) + && TREE_CODE (TREE_OPERAND (d, 1)) == SSA_NAME) + TREE_OPERAND (d, 1) = SSA_NAME_VAR (TREE_OPERAND (d, 1)); + if (TREE_OPERAND (d, 3) + && TREE_CODE (TREE_OPERAND (d, 3)) == SSA_NAME) + TREE_OPERAND (d, 3) = SSA_NAME_VAR (TREE_OPERAND (d, 3)); + /* FALLTHRU */ + case COMPONENT_REF: + if (TREE_OPERAND (d, 2) + && TREE_CODE (TREE_OPERAND (d, 2)) == SSA_NAME) + TREE_OPERAND (d, 2) = SSA_NAME_VAR (TREE_OPERAND (d, 2)); + break; + default: + break; + } + SET_DECL_DEBUG_EXPR (repl, debug_expr); + DECL_DEBUG_EXPR_IS_FROM (repl) = 1; + if (access->grp_no_warning) + TREE_NO_WARNING (repl) = 1; + else + TREE_NO_WARNING (repl) = TREE_NO_WARNING (access->base); } + else + TREE_NO_WARNING (repl) = 1; - if (cleared_any) + if (dump_file) { - bitmap_and_compl_into (sra_candidates, &done_head); - bitmap_and_compl_into (needs_copy_in, &done_head); + fprintf (dump_file, "Created a replacement for "); + print_generic_expr (dump_file, access->base, 0); + fprintf (dump_file, " offset: %u, size: %u: ", + (unsigned) access->offset, (unsigned) access->size); + print_generic_expr (dump_file, repl, 0); + fprintf (dump_file, "\n"); } - bitmap_clear (&done_head); - - mark_set_for_renaming (sra_candidates); + sra_stats.replacements++; - if (dump_file) - fputc ('\n', dump_file); + return repl; } - -/* Phase Four: Update the function to match the replacements created. */ - -/* Mark all the variables in virtual operands in all the statements in - LIST for renaming. */ +/* Return ACCESS scalar replacement, create it if it does not exist yet. */ -static void -mark_all_v_defs_seq (gimple_seq seq) +static inline tree +get_access_replacement (struct access *access) { - gimple_stmt_iterator gsi; + gcc_assert (access->grp_to_be_replaced); - for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi)) - update_stmt_if_modified (gsi_stmt (gsi)); + if (!access->replacement_decl) + access->replacement_decl = create_access_replacement (access, true); + return access->replacement_decl; } -/* Mark every replacement under ELT with TREE_NO_WARNING. */ +/* Return ACCESS scalar replacement, create it if it does not exist yet but do + not mark it for renaming. */ -static void -mark_no_warning (struct sra_elt *elt) +static inline tree +get_unrenamed_access_replacement (struct access *access) { - if (!elt->all_no_warning) - { - if (elt->replacement) - TREE_NO_WARNING (elt->replacement) = 1; - else - { - struct sra_elt *c; - FOR_EACH_ACTUAL_CHILD (c, elt) - mark_no_warning (c); - } - elt->all_no_warning = true; - } + gcc_assert (!access->grp_to_be_replaced); + + if (!access->replacement_decl) + access->replacement_decl = create_access_replacement (access, false); + return access->replacement_decl; } -/* Build a single level component reference to ELT rooted at BASE. */ -static tree -generate_one_element_ref (struct sra_elt *elt, tree base) +/* Build a subtree of accesses rooted in *ACCESS, and move the pointer in the + linked list along the way. Stop when *ACCESS is NULL or the access pointed + to it is not "within" the root. Return false iff some accesses partially + overlap. */ + +static bool +build_access_subtree (struct access **access) { - switch (TREE_CODE (TREE_TYPE (base))) + struct access *root = *access, *last_child = NULL; + HOST_WIDE_INT limit = root->offset + root->size; + + *access = (*access)->next_grp; + while (*access && (*access)->offset + (*access)->size <= limit) { - case RECORD_TYPE: - { - tree field = elt->element; + if (!last_child) + root->first_child = *access; + else + last_child->next_sibling = *access; + last_child = *access; - /* We can't test elt->in_bitfld_block here because, when this is - called from instantiate_element, we haven't set this field - yet. */ - if (TREE_CODE (field) == BIT_FIELD_REF) - { - tree ret = unshare_expr (field); - TREE_OPERAND (ret, 0) = base; - return ret; - } + if (!build_access_subtree (access)) + return false; + } - /* Watch out for compatible records with differing field lists. */ - if (DECL_FIELD_CONTEXT (field) != TYPE_MAIN_VARIANT (TREE_TYPE (base))) - field = find_compatible_field (TREE_TYPE (base), field); + if (*access && (*access)->offset < limit) + return false; - return build3 (COMPONENT_REF, elt->type, base, field, NULL); - } + return true; +} - case ARRAY_TYPE: - if (TREE_CODE (elt->element) == RANGE_EXPR) - return build4 (ARRAY_RANGE_REF, elt->type, base, - TREE_OPERAND (elt->element, 0), NULL, NULL); - else - return build4 (ARRAY_REF, elt->type, base, elt->element, NULL, NULL); +/* Build a tree of access representatives, ACCESS is the pointer to the first + one, others are linked in a list by the next_grp field. Return false iff + some accesses partially overlap. */ - case COMPLEX_TYPE: - if (elt->element == integer_zero_node) - return build1 (REALPART_EXPR, elt->type, base); - else - return build1 (IMAGPART_EXPR, elt->type, base); +static bool +build_access_trees (struct access *access) +{ + while (access) + { + struct access *root = access; - default: - gcc_unreachable (); + if (!build_access_subtree (&access)) + return false; + root->next_grp = access; } + return true; } -/* Build a full component reference to ELT rooted at its native variable. */ +/* Return true if expr contains some ARRAY_REFs into a variable bounded + array. */ -static tree -generate_element_ref (struct sra_elt *elt) +static bool +expr_with_var_bounded_array_refs_p (tree expr) { - if (elt->parent) - return generate_one_element_ref (elt, generate_element_ref (elt->parent)); - else - return elt->element; + while (handled_component_p (expr)) + { + if (TREE_CODE (expr) == ARRAY_REF + && !host_integerp (array_ref_low_bound (expr), 0)) + return true; + expr = TREE_OPERAND (expr, 0); + } + return false; } -/* Return true if BF is a bit-field that we can handle like a scalar. */ +/* Analyze the subtree of accesses rooted in ROOT, scheduling replacements when + both seeming beneficial and when ALLOW_REPLACEMENTS allows it. Also set all + sorts of access flags appropriately along the way, notably always set + grp_read and grp_assign_read according to MARK_READ and grp_write when + MARK_WRITE is true. + + Creating a replacement for a scalar access is considered beneficial if its + grp_hint is set (this means we are either attempting total scalarization or + there is more than one direct read access) or according to the following + table: + + Access written to through a scalar type (once or more times) + | + | Written to in an assignment statement + | | + | | Access read as scalar _once_ + | | | + | | | Read in an assignment statement + | | | | + | | | | Scalarize Comment +----------------------------------------------------------------------------- + 0 0 0 0 No access for the scalar + 0 0 0 1 No access for the scalar + 0 0 1 0 No Single read - won't help + 0 0 1 1 No The same case + 0 1 0 0 No access for the scalar + 0 1 0 1 No access for the scalar + 0 1 1 0 Yes s = *g; return s.i; + 0 1 1 1 Yes The same case as above + 1 0 0 0 No Won't help + 1 0 0 1 Yes s.i = 1; *g = s; + 1 0 1 0 Yes s.i = 5; g = s.i; + 1 0 1 1 Yes The same case as above + 1 1 0 0 No Won't help. + 1 1 0 1 Yes s.i = 1; *g = s; + 1 1 1 0 Yes s = *g; return s.i; + 1 1 1 1 Yes Any of the above yeses */ static bool -scalar_bitfield_p (tree bf) +analyze_access_subtree (struct access *root, struct access *parent, + bool allow_replacements) { - return (TREE_CODE (bf) == BIT_FIELD_REF - && (is_gimple_reg (TREE_OPERAND (bf, 0)) - || (TYPE_MODE (TREE_TYPE (TREE_OPERAND (bf, 0))) != BLKmode - && (!TREE_SIDE_EFFECTS (TREE_OPERAND (bf, 0)) - || (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE - (TREE_OPERAND (bf, 0)))) - <= BITS_PER_WORD))))); -} + struct access *child; + HOST_WIDE_INT limit = root->offset + root->size; + HOST_WIDE_INT covered_to = root->offset; + bool scalar = is_gimple_reg_type (root->type); + bool hole = false, sth_created = false; -/* Create an assignment statement from SRC to DST. */ + if (parent) + { + if (parent->grp_read) + root->grp_read = 1; + if (parent->grp_assignment_read) + root->grp_assignment_read = 1; + if (parent->grp_write) + root->grp_write = 1; + if (parent->grp_assignment_write) + root->grp_assignment_write = 1; + if (parent->grp_total_scalarization) + root->grp_total_scalarization = 1; + } -static gimple_seq -sra_build_assignment (tree dst, tree src) -{ - gimple stmt; - gimple_seq seq = NULL, seq2 = NULL; - /* Turning BIT_FIELD_REFs into bit operations enables other passes - to do a much better job at optimizing the code. - From dst = BIT_FIELD_REF we produce - - SR.1 = (scalar type) var; - SR.2 = SR.1 >> off; - SR.3 = SR.2 & ((1 << sz) - 1); - ... possible sign extension of SR.3 ... - dst = (destination type) SR.3; - */ - if (scalar_bitfield_p (src)) - { - tree var, shift, width; - tree utype, stype; - bool unsignedp = (INTEGRAL_TYPE_P (TREE_TYPE (src)) - ? TYPE_UNSIGNED (TREE_TYPE (src)) : true); - struct gimplify_ctx gctx; - - var = TREE_OPERAND (src, 0); - width = TREE_OPERAND (src, 1); - /* The offset needs to be adjusted to a right shift quantity - depending on the endianness. */ - if (BYTES_BIG_ENDIAN) - { - tree tmp = size_binop (PLUS_EXPR, width, TREE_OPERAND (src, 2)); - shift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), tmp); - } - else - shift = TREE_OPERAND (src, 2); - - /* In weird cases we have non-integral types for the source or - destination object. - ??? For unknown reasons we also want an unsigned scalar type. */ - stype = TREE_TYPE (var); - if (!INTEGRAL_TYPE_P (stype)) - stype = lang_hooks.types.type_for_size (TREE_INT_CST_LOW - (TYPE_SIZE (stype)), 1); - else if (!TYPE_UNSIGNED (stype)) - stype = unsigned_type_for (stype); - - utype = TREE_TYPE (dst); - if (!INTEGRAL_TYPE_P (utype)) - utype = lang_hooks.types.type_for_size (TREE_INT_CST_LOW - (TYPE_SIZE (utype)), 1); - else if (!TYPE_UNSIGNED (utype)) - utype = unsigned_type_for (utype); - - /* Convert the base var of the BIT_FIELD_REF to the scalar type - we use for computation if we cannot use it directly. */ - if (INTEGRAL_TYPE_P (TREE_TYPE (var))) - var = fold_convert (stype, var); - else - var = fold_build1 (VIEW_CONVERT_EXPR, stype, var); + if (root->grp_unscalarizable_region) + allow_replacements = false; - if (!integer_zerop (shift)) - var = fold_build2 (RSHIFT_EXPR, stype, var, shift); + if (allow_replacements && expr_with_var_bounded_array_refs_p (root->expr)) + allow_replacements = false; - /* If we need a masking operation, produce one. */ - if (TREE_INT_CST_LOW (width) == TYPE_PRECISION (stype)) - unsignedp = true; + for (child = root->first_child; child; child = child->next_sibling) + { + hole |= covered_to < child->offset; + sth_created |= analyze_access_subtree (child, root, + allow_replacements && !scalar); + + root->grp_unscalarized_data |= child->grp_unscalarized_data; + root->grp_total_scalarization &= child->grp_total_scalarization; + if (child->grp_covered) + covered_to += child->size; else + hole = true; + } + + if (allow_replacements && scalar && !root->first_child + && (root->grp_hint + || ((root->grp_scalar_read || root->grp_assignment_read) + && (root->grp_scalar_write || root->grp_assignment_write)))) + { + bool new_integer_type; + if (TREE_CODE (root->type) == ENUMERAL_TYPE) { - tree one = build_int_cst_wide (stype, 1, 0); - tree mask = int_const_binop (LSHIFT_EXPR, one, width, 0); - mask = int_const_binop (MINUS_EXPR, mask, one, 0); - var = fold_build2 (BIT_AND_EXPR, stype, var, mask); + tree rt = root->type; + root->type = build_nonstandard_integer_type (TYPE_PRECISION (rt), + TYPE_UNSIGNED (rt)); + new_integer_type = true; } + else + new_integer_type = false; - /* After shifting and masking, convert to the target type. */ - var = fold_convert (utype, var); - - /* Perform sign extension, if required. - ??? This should never be necessary. */ - if (!unsignedp) + if (dump_file && (dump_flags & TDF_DETAILS)) { - tree signbit = int_const_binop (LSHIFT_EXPR, - build_int_cst_wide (utype, 1, 0), - size_binop (MINUS_EXPR, width, - bitsize_int (1)), 0); + fprintf (dump_file, "Marking "); + print_generic_expr (dump_file, root->base, 0); + fprintf (dump_file, " offset: %u, size: %u ", + (unsigned) root->offset, (unsigned) root->size); + fprintf (dump_file, " to be replaced%s.\n", + new_integer_type ? " with an integer": ""); + } + + root->grp_to_be_replaced = 1; + sth_created = true; + hole = false; + } + else + { + if (covered_to < limit) + hole = true; + if (scalar) + root->grp_total_scalarization = 0; + } + + if (sth_created + && (!hole || root->grp_total_scalarization)) + { + root->grp_covered = 1; + return true; + } + if (root->grp_write || TREE_CODE (root->base) == PARM_DECL) + root->grp_unscalarized_data = 1; /* not covered and written to */ + if (sth_created) + return true; + return false; +} + +/* Analyze all access trees linked by next_grp by the means of + analyze_access_subtree. */ +static bool +analyze_access_trees (struct access *access) +{ + bool ret = false; + + while (access) + { + if (analyze_access_subtree (access, NULL, true)) + ret = true; + access = access->next_grp; + } - var = fold_build2 (BIT_XOR_EXPR, utype, var, signbit); - var = fold_build2 (MINUS_EXPR, utype, var, signbit); + return ret; +} + +/* Return true iff a potential new child of LACC at offset OFFSET and with size + SIZE would conflict with an already existing one. If exactly such a child + already exists in LACC, store a pointer to it in EXACT_MATCH. */ + +static bool +child_would_conflict_in_lacc (struct access *lacc, HOST_WIDE_INT norm_offset, + HOST_WIDE_INT size, struct access **exact_match) +{ + struct access *child; + + for (child = lacc->first_child; child; child = child->next_sibling) + { + if (child->offset == norm_offset && child->size == size) + { + *exact_match = child; + return true; } - /* fold_build3 (BIT_FIELD_REF, ...) sometimes returns a cast. */ - STRIP_NOPS (dst); + if (child->offset < norm_offset + size + && child->offset + child->size > norm_offset) + return true; + } - /* Finally, move and convert to the destination. */ - if (INTEGRAL_TYPE_P (TREE_TYPE (dst))) - var = fold_convert (TREE_TYPE (dst), var); - else - var = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (dst), var); + return false; +} - push_gimplify_context (&gctx); - gctx.into_ssa = true; - gctx.allow_rhs_cond_expr = true; +/* Create a new child access of PARENT, with all properties just like MODEL + except for its offset and with its grp_write false and grp_read true. + Return the new access or NULL if it cannot be created. Note that this access + is created long after all splicing and sorting, it's not located in any + access vector and is automatically a representative of its group. */ - gimplify_assign (dst, var, &seq); +static struct access * +create_artificial_child_access (struct access *parent, struct access *model, + HOST_WIDE_INT new_offset) +{ + struct access *access; + struct access **child; + tree expr = parent->base; - if (gimple_referenced_vars (cfun)) - for (var = gctx.temps; var; var = TREE_CHAIN (var)) - add_referenced_var (var); - pop_gimplify_context (NULL); + gcc_assert (!model->grp_unscalarizable_region); - return seq; + access = (struct access *) pool_alloc (access_pool); + memset (access, 0, sizeof (struct access)); + if (!build_user_friendly_ref_for_offset (&expr, TREE_TYPE (expr), new_offset, + model->type)) + { + access->grp_no_warning = true; + expr = build_ref_for_model (EXPR_LOCATION (parent->base), parent->base, + new_offset, model, NULL, false); } - /* fold_build3 (BIT_FIELD_REF, ...) sometimes returns a cast. */ - if (CONVERT_EXPR_P (dst)) + access->base = parent->base; + access->expr = expr; + access->offset = new_offset; + access->size = model->size; + access->type = model->type; + access->grp_write = true; + access->grp_read = false; + + child = &parent->first_child; + while (*child && (*child)->offset < new_offset) + child = &(*child)->next_sibling; + + access->next_sibling = *child; + *child = access; + + return access; +} + + +/* Propagate all subaccesses of RACC across an assignment link to LACC. Return + true if any new subaccess was created. Additionally, if RACC is a scalar + access but LACC is not, change the type of the latter, if possible. */ + +static bool +propagate_subaccesses_across_link (struct access *lacc, struct access *racc) +{ + struct access *rchild; + HOST_WIDE_INT norm_delta = lacc->offset - racc->offset; + bool ret = false; + + if (is_gimple_reg_type (lacc->type) + || lacc->grp_unscalarizable_region + || racc->grp_unscalarizable_region) + return false; + + if (is_gimple_reg_type (racc->type)) { - STRIP_NOPS (dst); - src = fold_convert (TREE_TYPE (dst), src); + if (!lacc->first_child && !racc->first_child) + { + tree t = lacc->base; + + lacc->type = racc->type; + if (build_user_friendly_ref_for_offset (&t, TREE_TYPE (t), + lacc->offset, racc->type)) + lacc->expr = t; + else + { + lacc->expr = build_ref_for_model (EXPR_LOCATION (lacc->base), + lacc->base, lacc->offset, + racc, NULL, false); + lacc->grp_no_warning = true; + } + } + return false; } - /* It was hoped that we could perform some type sanity checking - here, but since front-ends can emit accesses of fields in types - different from their nominal types and copy structures containing - them as a whole, we'd have to handle such differences here. - Since such accesses under different types require compatibility - anyway, there's little point in making tests and/or adding - conversions to ensure the types of src and dst are the same. - So we just assume type differences at this point are ok. - The only exception we make here are pointer types, which can be different - in e.g. structurally equal, but non-identical RECORD_TYPEs. */ - else if (POINTER_TYPE_P (TREE_TYPE (dst)) - && !useless_type_conversion_p (TREE_TYPE (dst), TREE_TYPE (src))) - src = fold_convert (TREE_TYPE (dst), src); - /* ??? Only call the gimplifier if we need to. Otherwise we may - end up substituting with DECL_VALUE_EXPR - see PR37380. */ - if (!handled_component_p (src) - && !SSA_VAR_P (src)) + for (rchild = racc->first_child; rchild; rchild = rchild->next_sibling) { - src = force_gimple_operand (src, &seq2, false, NULL_TREE); - gimple_seq_add_seq (&seq, seq2); + struct access *new_acc = NULL; + HOST_WIDE_INT norm_offset = rchild->offset + norm_delta; + + if (rchild->grp_unscalarizable_region) + continue; + + if (child_would_conflict_in_lacc (lacc, norm_offset, rchild->size, + &new_acc)) + { + if (new_acc) + { + rchild->grp_hint = 1; + new_acc->grp_hint |= new_acc->grp_read; + if (rchild->first_child) + ret |= propagate_subaccesses_across_link (new_acc, rchild); + } + continue; + } + + rchild->grp_hint = 1; + new_acc = create_artificial_child_access (lacc, rchild, norm_offset); + if (new_acc) + { + ret = true; + if (racc->first_child) + propagate_subaccesses_across_link (new_acc, rchild); + } } - stmt = gimple_build_assign (dst, src); - gimple_seq_add_stmt (&seq, stmt); - return seq; + + return ret; } -/* BIT_FIELD_REFs must not be shared. sra_build_elt_assignment() - takes care of assignments, but we must create copies for uses. */ -#define REPLDUP(t) (TREE_CODE (t) != BIT_FIELD_REF ? (t) : unshare_expr (t)) +/* Propagate all subaccesses across assignment links. */ + +static void +propagate_all_subaccesses (void) +{ + while (work_queue_head) + { + struct access *racc = pop_access_from_work_queue (); + struct assign_link *link; + + gcc_assert (racc->first_link); + + for (link = racc->first_link; link; link = link->next) + { + struct access *lacc = link->lacc; + + if (!bitmap_bit_p (candidate_bitmap, DECL_UID (lacc->base))) + continue; + lacc = lacc->group_representative; + if (propagate_subaccesses_across_link (lacc, racc) + && lacc->first_link) + add_access_to_work_queue (lacc); + } + } +} -/* Emit an assignment from SRC to DST, but if DST is a scalarizable - BIT_FIELD_REF, turn it into bit operations. */ +/* Go through all accesses collected throughout the (intraprocedural) analysis + stage, exclude overlapping ones, identify representatives and build trees + out of them, making decisions about scalarization on the way. Return true + iff there are any to-be-scalarized variables after this stage. */ -static gimple_seq -sra_build_bf_assignment (tree dst, tree src) +static bool +analyze_all_variable_accesses (void) { - tree var, type, utype, tmp, tmp2, tmp3; - gimple_seq seq; - gimple stmt; - tree cst, cst2, mask; - tree minshift, maxshift; + int res = 0; + bitmap tmp = BITMAP_ALLOC (NULL); + bitmap_iterator bi; + unsigned i, max_total_scalarization_size; - if (TREE_CODE (dst) != BIT_FIELD_REF) - return sra_build_assignment (dst, src); + max_total_scalarization_size = UNITS_PER_WORD * BITS_PER_UNIT + * MOVE_RATIO (optimize_function_for_speed_p (cfun)); - var = TREE_OPERAND (dst, 0); + EXECUTE_IF_SET_IN_BITMAP (candidate_bitmap, 0, i, bi) + if (bitmap_bit_p (should_scalarize_away_bitmap, i) + && !bitmap_bit_p (cannot_scalarize_away_bitmap, i)) + { + tree var = referenced_var (i); - if (!scalar_bitfield_p (dst)) - return sra_build_assignment (REPLDUP (dst), src); + if (TREE_CODE (var) == VAR_DECL + && type_consists_of_records_p (TREE_TYPE (var))) + { + if ((unsigned) tree_low_cst (TYPE_SIZE (TREE_TYPE (var)), 1) + <= max_total_scalarization_size) + { + completely_scalarize_var (var); + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, "Will attempt to totally scalarize "); + print_generic_expr (dump_file, var, 0); + fprintf (dump_file, " (UID: %u): \n", DECL_UID (var)); + } + } + else if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, "Too big to totally scalarize: "); + print_generic_expr (dump_file, var, 0); + fprintf (dump_file, " (UID: %u)\n", DECL_UID (var)); + } + } + } - seq = NULL; + bitmap_copy (tmp, candidate_bitmap); + EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi) + { + tree var = referenced_var (i); + struct access *access; + + access = sort_and_splice_var_accesses (var); + if (!access || !build_access_trees (access)) + disqualify_candidate (var, + "No or inhibitingly overlapping accesses."); + } - cst = fold_convert (bitsizetype, TREE_OPERAND (dst, 2)); - cst2 = size_binop (PLUS_EXPR, - fold_convert (bitsizetype, TREE_OPERAND (dst, 1)), - cst); + propagate_all_subaccesses (); - if (BYTES_BIG_ENDIAN) + bitmap_copy (tmp, candidate_bitmap); + EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi) { - maxshift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), cst); - minshift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), cst2); + tree var = referenced_var (i); + struct access *access = get_first_repr_for_decl (var); + + if (analyze_access_trees (access)) + { + res++; + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, "\nAccess trees for "); + print_generic_expr (dump_file, var, 0); + fprintf (dump_file, " (UID: %u): \n", DECL_UID (var)); + dump_access_tree (dump_file, access); + fprintf (dump_file, "\n"); + } + } + else + disqualify_candidate (var, "No scalar replacements to be created."); + } + + BITMAP_FREE (tmp); + + if (res) + { + statistics_counter_event (cfun, "Scalarized aggregates", res); + return true; } else + return false; +} + +/* Generate statements copying scalar replacements of accesses within a subtree + into or out of AGG. ACCESS, all its children, siblings and their children + are to be processed. AGG is an aggregate type expression (can be a + declaration but does not have to be, it can for example also be a mem_ref or + a series of handled components). TOP_OFFSET is the offset of the processed + subtree which has to be subtracted from offsets of individual accesses to + get corresponding offsets for AGG. If CHUNK_SIZE is non-null, copy only + replacements in the interval , + otherwise copy all. GSI is a statement iterator used to place the new + statements. WRITE should be true when the statements should write from AGG + to the replacement and false if vice versa. if INSERT_AFTER is true, new + statements will be added after the current statement in GSI, they will be + added before the statement otherwise. */ + +static void +generate_subtree_copies (struct access *access, tree agg, + HOST_WIDE_INT top_offset, + HOST_WIDE_INT start_offset, HOST_WIDE_INT chunk_size, + gimple_stmt_iterator *gsi, bool write, + bool insert_after, location_t loc) +{ + do + { + if (chunk_size && access->offset >= start_offset + chunk_size) + return; + + if (access->grp_to_be_replaced + && (chunk_size == 0 + || access->offset + access->size > start_offset)) + { + tree expr, repl = get_access_replacement (access); + gimple stmt; + + expr = build_ref_for_model (loc, agg, access->offset - top_offset, + access, gsi, insert_after); + + if (write) + { + if (access->grp_partial_lhs) + expr = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE, + !insert_after, + insert_after ? GSI_NEW_STMT + : GSI_SAME_STMT); + stmt = gimple_build_assign (repl, expr); + } + else + { + TREE_NO_WARNING (repl) = 1; + if (access->grp_partial_lhs) + repl = force_gimple_operand_gsi (gsi, repl, true, NULL_TREE, + !insert_after, + insert_after ? GSI_NEW_STMT + : GSI_SAME_STMT); + stmt = gimple_build_assign (expr, repl); + } + gimple_set_location (stmt, loc); + + if (insert_after) + gsi_insert_after (gsi, stmt, GSI_NEW_STMT); + else + gsi_insert_before (gsi, stmt, GSI_SAME_STMT); + update_stmt (stmt); + sra_stats.subtree_copies++; + } + + if (access->first_child) + generate_subtree_copies (access->first_child, agg, top_offset, + start_offset, chunk_size, gsi, + write, insert_after, loc); + + access = access->next_sibling; + } + while (access); +} + +/* Assign zero to all scalar replacements in an access subtree. ACCESS is the + the root of the subtree to be processed. GSI is the statement iterator used + for inserting statements which are added after the current statement if + INSERT_AFTER is true or before it otherwise. */ + +static void +init_subtree_with_zero (struct access *access, gimple_stmt_iterator *gsi, + bool insert_after, location_t loc) + +{ + struct access *child; + + if (access->grp_to_be_replaced) { - maxshift = cst2; - minshift = cst; + gimple stmt; + + stmt = gimple_build_assign (get_access_replacement (access), + build_zero_cst (access->type)); + if (insert_after) + gsi_insert_after (gsi, stmt, GSI_NEW_STMT); + else + gsi_insert_before (gsi, stmt, GSI_SAME_STMT); + update_stmt (stmt); + gimple_set_location (stmt, loc); } - type = TREE_TYPE (var); - if (!INTEGRAL_TYPE_P (type)) - type = lang_hooks.types.type_for_size - (TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (var))), 1); - if (TYPE_UNSIGNED (type)) - utype = type; + for (child = access->first_child; child; child = child->next_sibling) + init_subtree_with_zero (child, gsi, insert_after, loc); +} + +/* Search for an access representative for the given expression EXPR and + return it or NULL if it cannot be found. */ + +static struct access * +get_access_for_expr (tree expr) +{ + HOST_WIDE_INT offset, size, max_size; + tree base; + + /* FIXME: This should not be necessary but Ada produces V_C_Es with a type of + a different size than the size of its argument and we need the latter + one. */ + if (TREE_CODE (expr) == VIEW_CONVERT_EXPR) + expr = TREE_OPERAND (expr, 0); + + base = get_ref_base_and_extent (expr, &offset, &size, &max_size); + if (max_size == -1 || !DECL_P (base)) + return NULL; + + if (!bitmap_bit_p (candidate_bitmap, DECL_UID (base))) + return NULL; + + return get_var_base_offset_size_access (base, offset, max_size); +} + +/* Replace the expression EXPR with a scalar replacement if there is one and + generate other statements to do type conversion or subtree copying if + necessary. GSI is used to place newly created statements, WRITE is true if + the expression is being written to (it is on a LHS of a statement or output + in an assembly statement). */ + +static bool +sra_modify_expr (tree *expr, gimple_stmt_iterator *gsi, bool write) +{ + location_t loc; + struct access *access; + tree type, bfr; + + if (TREE_CODE (*expr) == BIT_FIELD_REF) + { + bfr = *expr; + expr = &TREE_OPERAND (*expr, 0); + } else - utype = unsigned_type_for (type); + bfr = NULL_TREE; + + if (TREE_CODE (*expr) == REALPART_EXPR || TREE_CODE (*expr) == IMAGPART_EXPR) + expr = &TREE_OPERAND (*expr, 0); + access = get_access_for_expr (*expr); + if (!access) + return false; + type = TREE_TYPE (*expr); + + loc = gimple_location (gsi_stmt (*gsi)); + if (access->grp_to_be_replaced) + { + tree repl = get_access_replacement (access); + /* If we replace a non-register typed access simply use the original + access expression to extract the scalar component afterwards. + This happens if scalarizing a function return value or parameter + like in gcc.c-torture/execute/20041124-1.c, 20050316-1.c and + gcc.c-torture/compile/20011217-1.c. + + We also want to use this when accessing a complex or vector which can + be accessed as a different type too, potentially creating a need for + type conversion (see PR42196) and when scalarized unions are involved + in assembler statements (see PR42398). */ + if (!useless_type_conversion_p (type, access->type)) + { + tree ref; + + ref = build_ref_for_model (loc, access->base, access->offset, access, + NULL, false); + + if (write) + { + gimple stmt; + + if (access->grp_partial_lhs) + ref = force_gimple_operand_gsi (gsi, ref, true, NULL_TREE, + false, GSI_NEW_STMT); + stmt = gimple_build_assign (repl, ref); + gimple_set_location (stmt, loc); + gsi_insert_after (gsi, stmt, GSI_NEW_STMT); + } + else + { + gimple stmt; + + if (access->grp_partial_lhs) + repl = force_gimple_operand_gsi (gsi, repl, true, NULL_TREE, + true, GSI_SAME_STMT); + stmt = gimple_build_assign (ref, repl); + gimple_set_location (stmt, loc); + gsi_insert_before (gsi, stmt, GSI_SAME_STMT); + } + } + else + *expr = repl; + sra_stats.exprs++; + } + + if (access->first_child) + { + HOST_WIDE_INT start_offset, chunk_size; + if (bfr + && host_integerp (TREE_OPERAND (bfr, 1), 1) + && host_integerp (TREE_OPERAND (bfr, 2), 1)) + { + chunk_size = tree_low_cst (TREE_OPERAND (bfr, 1), 1); + start_offset = access->offset + + tree_low_cst (TREE_OPERAND (bfr, 2), 1); + } + else + start_offset = chunk_size = 0; + + generate_subtree_copies (access->first_child, access->base, 0, + start_offset, chunk_size, gsi, write, write, + loc); + } + return true; +} + +/* Where scalar replacements of the RHS have been written to when a replacement + of a LHS of an assigments cannot be direclty loaded from a replacement of + the RHS. */ +enum unscalarized_data_handling { SRA_UDH_NONE, /* Nothing done so far. */ + SRA_UDH_RIGHT, /* Data flushed to the RHS. */ + SRA_UDH_LEFT }; /* Data flushed to the LHS. */ - mask = build_int_cst_wide (utype, 1, 0); - if (TREE_INT_CST_LOW (maxshift) == TYPE_PRECISION (utype)) - cst = build_int_cst_wide (utype, 0, 0); +/* Store all replacements in the access tree rooted in TOP_RACC either to their + base aggregate if there are unscalarized data or directly to LHS of the + statement that is pointed to by GSI otherwise. */ + +static enum unscalarized_data_handling +handle_unscalarized_data_in_subtree (struct access *top_racc, + gimple_stmt_iterator *gsi) +{ + if (top_racc->grp_unscalarized_data) + { + generate_subtree_copies (top_racc->first_child, top_racc->base, 0, 0, 0, + gsi, false, false, + gimple_location (gsi_stmt (*gsi))); + return SRA_UDH_RIGHT; + } else - cst = int_const_binop (LSHIFT_EXPR, mask, maxshift, true); - if (integer_zerop (minshift)) - cst2 = mask; + { + tree lhs = gimple_assign_lhs (gsi_stmt (*gsi)); + generate_subtree_copies (top_racc->first_child, lhs, top_racc->offset, + 0, 0, gsi, false, false, + gimple_location (gsi_stmt (*gsi))); + return SRA_UDH_LEFT; + } +} + + +/* Try to generate statements to load all sub-replacements in an access subtree + formed by children of LACC from scalar replacements in the TOP_RACC subtree. + If that is not possible, refresh the TOP_RACC base aggregate and load the + accesses from it. LEFT_OFFSET is the offset of the left whole subtree being + copied. NEW_GSI is stmt iterator used for statement insertions after the + original assignment, OLD_GSI is used to insert statements before the + assignment. *REFRESHED keeps the information whether we have needed to + refresh replacements of the LHS and from which side of the assignments this + takes place. */ + +static void +load_assign_lhs_subreplacements (struct access *lacc, struct access *top_racc, + HOST_WIDE_INT left_offset, + gimple_stmt_iterator *old_gsi, + gimple_stmt_iterator *new_gsi, + enum unscalarized_data_handling *refreshed) +{ + location_t loc = gimple_location (gsi_stmt (*old_gsi)); + for (lacc = lacc->first_child; lacc; lacc = lacc->next_sibling) + { + if (lacc->grp_to_be_replaced) + { + struct access *racc; + HOST_WIDE_INT offset = lacc->offset - left_offset + top_racc->offset; + gimple stmt; + tree rhs; + + racc = find_access_in_subtree (top_racc, offset, lacc->size); + if (racc && racc->grp_to_be_replaced) + { + rhs = get_access_replacement (racc); + if (!useless_type_conversion_p (lacc->type, racc->type)) + rhs = fold_build1_loc (loc, VIEW_CONVERT_EXPR, lacc->type, rhs); + + if (racc->grp_partial_lhs && lacc->grp_partial_lhs) + rhs = force_gimple_operand_gsi (old_gsi, rhs, true, NULL_TREE, + true, GSI_SAME_STMT); + } + else + { + /* No suitable access on the right hand side, need to load from + the aggregate. See if we have to update it first... */ + if (*refreshed == SRA_UDH_NONE) + *refreshed = handle_unscalarized_data_in_subtree (top_racc, + old_gsi); + + if (*refreshed == SRA_UDH_LEFT) + rhs = build_ref_for_model (loc, lacc->base, lacc->offset, lacc, + new_gsi, true); + else + rhs = build_ref_for_model (loc, top_racc->base, offset, lacc, + new_gsi, true); + if (lacc->grp_partial_lhs) + rhs = force_gimple_operand_gsi (new_gsi, rhs, true, NULL_TREE, + false, GSI_NEW_STMT); + } + + stmt = gimple_build_assign (get_access_replacement (lacc), rhs); + gsi_insert_after (new_gsi, stmt, GSI_NEW_STMT); + gimple_set_location (stmt, loc); + update_stmt (stmt); + sra_stats.subreplacements++; + } + else if (*refreshed == SRA_UDH_NONE + && lacc->grp_read && !lacc->grp_covered) + *refreshed = handle_unscalarized_data_in_subtree (top_racc, + old_gsi); + + if (lacc->first_child) + load_assign_lhs_subreplacements (lacc, top_racc, left_offset, + old_gsi, new_gsi, refreshed); + } +} + +/* Result code for SRA assignment modification. */ +enum assignment_mod_result { SRA_AM_NONE, /* nothing done for the stmt */ + SRA_AM_MODIFIED, /* stmt changed but not + removed */ + SRA_AM_REMOVED }; /* stmt eliminated */ + +/* Modify assignments with a CONSTRUCTOR on their RHS. STMT contains a pointer + to the assignment and GSI is the statement iterator pointing at it. Returns + the same values as sra_modify_assign. */ + +static enum assignment_mod_result +sra_modify_constructor_assign (gimple *stmt, gimple_stmt_iterator *gsi) +{ + tree lhs = gimple_assign_lhs (*stmt); + struct access *acc; + location_t loc; + + acc = get_access_for_expr (lhs); + if (!acc) + return SRA_AM_NONE; + + loc = gimple_location (*stmt); + if (VEC_length (constructor_elt, + CONSTRUCTOR_ELTS (gimple_assign_rhs1 (*stmt))) > 0) + { + /* I have never seen this code path trigger but if it can happen the + following should handle it gracefully. */ + if (access_has_children_p (acc)) + generate_subtree_copies (acc->first_child, acc->base, 0, 0, 0, gsi, + true, true, loc); + return SRA_AM_MODIFIED; + } + + if (acc->grp_covered) + { + init_subtree_with_zero (acc, gsi, false, loc); + unlink_stmt_vdef (*stmt); + gsi_remove (gsi, true); + return SRA_AM_REMOVED; + } else - cst2 = int_const_binop (LSHIFT_EXPR, mask, minshift, true); - mask = int_const_binop (MINUS_EXPR, cst, cst2, true); - mask = fold_build1 (BIT_NOT_EXPR, utype, mask); + { + init_subtree_with_zero (acc, gsi, true, loc); + return SRA_AM_MODIFIED; + } +} + +/* Create and return a new suitable default definition SSA_NAME for RACC which + is an access describing an uninitialized part of an aggregate that is being + loaded. */ - if (TYPE_MAIN_VARIANT (utype) != TYPE_MAIN_VARIANT (TREE_TYPE (var)) - && !integer_zerop (mask)) +static tree +get_repl_default_def_ssa_name (struct access *racc) +{ + tree repl, decl; + + decl = get_unrenamed_access_replacement (racc); + + repl = gimple_default_def (cfun, decl); + if (!repl) { - tmp = var; - if (!is_gimple_variable (tmp)) - tmp = unshare_expr (var); - else - TREE_NO_WARNING (var) = true; + repl = make_ssa_name (decl, gimple_build_nop ()); + set_default_def (decl, repl); + } - tmp2 = make_rename_temp (utype, "SR"); + return repl; +} - if (INTEGRAL_TYPE_P (TREE_TYPE (var))) - tmp = fold_convert (utype, tmp); - else - tmp = fold_build1 (VIEW_CONVERT_EXPR, utype, tmp); +/* Return true if REF has a COMPONENT_REF with a bit-field field declaration + somewhere in it. */ - stmt = gimple_build_assign (tmp2, tmp); - gimple_seq_add_stmt (&seq, stmt); +static inline bool +contains_bitfld_comp_ref_p (const_tree ref) +{ + while (handled_component_p (ref)) + { + if (TREE_CODE (ref) == COMPONENT_REF + && DECL_BIT_FIELD (TREE_OPERAND (ref, 1))) + return true; + ref = TREE_OPERAND (ref, 0); + } + + return false; +} + +/* Return true if REF has an VIEW_CONVERT_EXPR or a COMPONENT_REF with a + bit-field field declaration somewhere in it. */ + +static inline bool +contains_vce_or_bfcref_p (const_tree ref) +{ + while (handled_component_p (ref)) + { + if (TREE_CODE (ref) == VIEW_CONVERT_EXPR + || (TREE_CODE (ref) == COMPONENT_REF + && DECL_BIT_FIELD (TREE_OPERAND (ref, 1)))) + return true; + ref = TREE_OPERAND (ref, 0); + } + + return false; +} + +/* Examine both sides of the assignment statement pointed to by STMT, replace + them with a scalare replacement if there is one and generate copying of + replacements if scalarized aggregates have been used in the assignment. GSI + is used to hold generated statements for type conversions and subtree + copying. */ + +static enum assignment_mod_result +sra_modify_assign (gimple *stmt, gimple_stmt_iterator *gsi) +{ + struct access *lacc, *racc; + tree lhs, rhs; + bool modify_this_stmt = false; + bool force_gimple_rhs = false; + location_t loc; + gimple_stmt_iterator orig_gsi = *gsi; + + if (!gimple_assign_single_p (*stmt)) + return SRA_AM_NONE; + lhs = gimple_assign_lhs (*stmt); + rhs = gimple_assign_rhs1 (*stmt); + + if (TREE_CODE (rhs) == CONSTRUCTOR) + return sra_modify_constructor_assign (stmt, gsi); + + if (TREE_CODE (rhs) == REALPART_EXPR || TREE_CODE (lhs) == REALPART_EXPR + || TREE_CODE (rhs) == IMAGPART_EXPR || TREE_CODE (lhs) == IMAGPART_EXPR + || TREE_CODE (rhs) == BIT_FIELD_REF || TREE_CODE (lhs) == BIT_FIELD_REF) + { + modify_this_stmt = sra_modify_expr (gimple_assign_rhs1_ptr (*stmt), + gsi, false); + modify_this_stmt |= sra_modify_expr (gimple_assign_lhs_ptr (*stmt), + gsi, true); + return modify_this_stmt ? SRA_AM_MODIFIED : SRA_AM_NONE; + } + + lacc = get_access_for_expr (lhs); + racc = get_access_for_expr (rhs); + if (!lacc && !racc) + return SRA_AM_NONE; + + loc = gimple_location (*stmt); + if (lacc && lacc->grp_to_be_replaced) + { + lhs = get_access_replacement (lacc); + gimple_assign_set_lhs (*stmt, lhs); + modify_this_stmt = true; + if (lacc->grp_partial_lhs) + force_gimple_rhs = true; + sra_stats.exprs++; + } + + if (racc && racc->grp_to_be_replaced) + { + rhs = get_access_replacement (racc); + modify_this_stmt = true; + if (racc->grp_partial_lhs) + force_gimple_rhs = true; + sra_stats.exprs++; + } + + if (modify_this_stmt) + { + if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs))) + { + /* If we can avoid creating a VIEW_CONVERT_EXPR do so. + ??? This should move to fold_stmt which we simply should + call after building a VIEW_CONVERT_EXPR here. */ + if (AGGREGATE_TYPE_P (TREE_TYPE (lhs)) + && !contains_bitfld_comp_ref_p (lhs) + && !access_has_children_p (lacc)) + { + lhs = build_ref_for_model (loc, lhs, 0, racc, gsi, false); + gimple_assign_set_lhs (*stmt, lhs); + } + else if (AGGREGATE_TYPE_P (TREE_TYPE (rhs)) + && !contains_vce_or_bfcref_p (rhs) + && !access_has_children_p (racc)) + rhs = build_ref_for_model (loc, rhs, 0, lacc, gsi, false); + + if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs))) + { + rhs = fold_build1_loc (loc, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), + rhs); + if (is_gimple_reg_type (TREE_TYPE (lhs)) + && TREE_CODE (lhs) != SSA_NAME) + force_gimple_rhs = true; + } + } + } + + /* From this point on, the function deals with assignments in between + aggregates when at least one has scalar reductions of some of its + components. There are three possible scenarios: Both the LHS and RHS have + to-be-scalarized components, 2) only the RHS has or 3) only the LHS has. + + In the first case, we would like to load the LHS components from RHS + components whenever possible. If that is not possible, we would like to + read it directly from the RHS (after updating it by storing in it its own + components). If there are some necessary unscalarized data in the LHS, + those will be loaded by the original assignment too. If neither of these + cases happen, the original statement can be removed. Most of this is done + by load_assign_lhs_subreplacements. + + In the second case, we would like to store all RHS scalarized components + directly into LHS and if they cover the aggregate completely, remove the + statement too. In the third case, we want the LHS components to be loaded + directly from the RHS (DSE will remove the original statement if it + becomes redundant). + + This is a bit complex but manageable when types match and when unions do + not cause confusion in a way that we cannot really load a component of LHS + from the RHS or vice versa (the access representing this level can have + subaccesses that are accessible only through a different union field at a + higher level - different from the one used in the examined expression). + Unions are fun. + + Therefore, I specially handle a fourth case, happening when there is a + specific type cast or it is impossible to locate a scalarized subaccess on + the other side of the expression. If that happens, I simply "refresh" the + RHS by storing in it is scalarized components leave the original statement + there to do the copying and then load the scalar replacements of the LHS. + This is what the first branch does. */ + + if (modify_this_stmt + || gimple_has_volatile_ops (*stmt) + || contains_vce_or_bfcref_p (rhs) + || contains_vce_or_bfcref_p (lhs)) + { + if (access_has_children_p (racc)) + generate_subtree_copies (racc->first_child, racc->base, 0, 0, 0, + gsi, false, false, loc); + if (access_has_children_p (lacc)) + generate_subtree_copies (lacc->first_child, lacc->base, 0, 0, 0, + gsi, true, true, loc); + sra_stats.separate_lhs_rhs_handling++; } else - tmp2 = var; + { + if (access_has_children_p (lacc) && access_has_children_p (racc)) + { + gimple_stmt_iterator orig_gsi = *gsi; + enum unscalarized_data_handling refreshed; + + if (lacc->grp_read && !lacc->grp_covered) + refreshed = handle_unscalarized_data_in_subtree (racc, gsi); + else + refreshed = SRA_UDH_NONE; + + load_assign_lhs_subreplacements (lacc, racc, lacc->offset, + &orig_gsi, gsi, &refreshed); + if (refreshed != SRA_UDH_RIGHT) + { + gsi_next (gsi); + unlink_stmt_vdef (*stmt); + gsi_remove (&orig_gsi, true); + sra_stats.deleted++; + return SRA_AM_REMOVED; + } + } + else + { + if (racc) + { + if (!racc->grp_to_be_replaced && !racc->grp_unscalarized_data) + { + if (dump_file) + { + fprintf (dump_file, "Removing load: "); + print_gimple_stmt (dump_file, *stmt, 0, 0); + } + + if (TREE_CODE (lhs) == SSA_NAME) + { + rhs = get_repl_default_def_ssa_name (racc); + if (!useless_type_conversion_p (TREE_TYPE (lhs), + TREE_TYPE (rhs))) + rhs = fold_build1_loc (loc, VIEW_CONVERT_EXPR, + TREE_TYPE (lhs), rhs); + } + else + { + if (racc->first_child) + generate_subtree_copies (racc->first_child, lhs, + racc->offset, 0, 0, gsi, + false, false, loc); + + gcc_assert (*stmt == gsi_stmt (*gsi)); + unlink_stmt_vdef (*stmt); + gsi_remove (gsi, true); + sra_stats.deleted++; + return SRA_AM_REMOVED; + } + } + else if (racc->first_child) + generate_subtree_copies (racc->first_child, lhs, racc->offset, + 0, 0, gsi, false, true, loc); + } + if (access_has_children_p (lacc)) + generate_subtree_copies (lacc->first_child, rhs, lacc->offset, + 0, 0, gsi, true, true, loc); + } + } - if (!integer_zerop (mask)) + /* This gimplification must be done after generate_subtree_copies, lest we + insert the subtree copies in the middle of the gimplified sequence. */ + if (force_gimple_rhs) + rhs = force_gimple_operand_gsi (&orig_gsi, rhs, true, NULL_TREE, + true, GSI_SAME_STMT); + if (gimple_assign_rhs1 (*stmt) != rhs) { - tmp = make_rename_temp (utype, "SR"); - stmt = gimple_build_assign (tmp, fold_build2 (BIT_AND_EXPR, utype, - tmp2, mask)); - gimple_seq_add_stmt (&seq, stmt); + modify_this_stmt = true; + gimple_assign_set_rhs_from_tree (&orig_gsi, rhs); + gcc_assert (*stmt == gsi_stmt (orig_gsi)); } + + return modify_this_stmt ? SRA_AM_MODIFIED : SRA_AM_NONE; +} + +/* Traverse the function body and all modifications as decided in + analyze_all_variable_accesses. Return true iff the CFG has been + changed. */ + +static bool +sra_modify_function_body (void) +{ + bool cfg_changed = false; + basic_block bb; + + FOR_EACH_BB (bb) + { + gimple_stmt_iterator gsi = gsi_start_bb (bb); + while (!gsi_end_p (gsi)) + { + gimple stmt = gsi_stmt (gsi); + enum assignment_mod_result assign_result; + bool modified = false, deleted = false; + tree *t; + unsigned i; + + switch (gimple_code (stmt)) + { + case GIMPLE_RETURN: + t = gimple_return_retval_ptr (stmt); + if (*t != NULL_TREE) + modified |= sra_modify_expr (t, &gsi, false); + break; + + case GIMPLE_ASSIGN: + assign_result = sra_modify_assign (&stmt, &gsi); + modified |= assign_result == SRA_AM_MODIFIED; + deleted = assign_result == SRA_AM_REMOVED; + break; + + case GIMPLE_CALL: + /* Operands must be processed before the lhs. */ + for (i = 0; i < gimple_call_num_args (stmt); i++) + { + t = gimple_call_arg_ptr (stmt, i); + modified |= sra_modify_expr (t, &gsi, false); + } + + if (gimple_call_lhs (stmt)) + { + t = gimple_call_lhs_ptr (stmt); + modified |= sra_modify_expr (t, &gsi, true); + } + break; + + case GIMPLE_ASM: + for (i = 0; i < gimple_asm_ninputs (stmt); i++) + { + t = &TREE_VALUE (gimple_asm_input_op (stmt, i)); + modified |= sra_modify_expr (t, &gsi, false); + } + for (i = 0; i < gimple_asm_noutputs (stmt); i++) + { + t = &TREE_VALUE (gimple_asm_output_op (stmt, i)); + modified |= sra_modify_expr (t, &gsi, true); + } + break; + + default: + break; + } + + if (modified) + { + update_stmt (stmt); + if (maybe_clean_eh_stmt (stmt) + && gimple_purge_dead_eh_edges (gimple_bb (stmt))) + cfg_changed = true; + } + if (!deleted) + gsi_next (&gsi); + } + } + + return cfg_changed; +} + +/* Generate statements initializing scalar replacements of parts of function + parameters. */ + +static void +initialize_parameter_reductions (void) +{ + gimple_stmt_iterator gsi; + gimple_seq seq = NULL; + tree parm; + + for (parm = DECL_ARGUMENTS (current_function_decl); + parm; + parm = DECL_CHAIN (parm)) + { + VEC (access_p, heap) *access_vec; + struct access *access; + + if (!bitmap_bit_p (candidate_bitmap, DECL_UID (parm))) + continue; + access_vec = get_base_access_vector (parm); + if (!access_vec) + continue; + + if (!seq) + { + seq = gimple_seq_alloc (); + gsi = gsi_start (seq); + } + + for (access = VEC_index (access_p, access_vec, 0); + access; + access = access->next_grp) + generate_subtree_copies (access, parm, 0, 0, 0, &gsi, true, true, + EXPR_LOCATION (parm)); + } + + if (seq) + gsi_insert_seq_on_edge_immediate (single_succ_edge (ENTRY_BLOCK_PTR), seq); +} + +/* The "main" function of intraprocedural SRA passes. Runs the analysis and if + it reveals there are components of some aggregates to be scalarized, it runs + the required transformations. */ +static unsigned int +perform_intra_sra (void) +{ + int ret = 0; + sra_initialize (); + + if (!find_var_candidates ()) + goto out; + + if (!scan_function ()) + goto out; + + if (!analyze_all_variable_accesses ()) + goto out; + + if (sra_modify_function_body ()) + ret = TODO_update_ssa | TODO_cleanup_cfg; else - tmp = mask; + ret = TODO_update_ssa; + initialize_parameter_reductions (); + + statistics_counter_event (cfun, "Scalar replacements created", + sra_stats.replacements); + statistics_counter_event (cfun, "Modified expressions", sra_stats.exprs); + statistics_counter_event (cfun, "Subtree copy stmts", + sra_stats.subtree_copies); + statistics_counter_event (cfun, "Subreplacement stmts", + sra_stats.subreplacements); + statistics_counter_event (cfun, "Deleted stmts", sra_stats.deleted); + statistics_counter_event (cfun, "Separate LHS and RHS handling", + sra_stats.separate_lhs_rhs_handling); + + out: + sra_deinitialize (); + return ret; +} + +/* Perform early intraprocedural SRA. */ +static unsigned int +early_intra_sra (void) +{ + sra_mode = SRA_MODE_EARLY_INTRA; + return perform_intra_sra (); +} + +/* Perform "late" intraprocedural SRA. */ +static unsigned int +late_intra_sra (void) +{ + sra_mode = SRA_MODE_INTRA; + return perform_intra_sra (); +} + + +static bool +gate_intra_sra (void) +{ + return flag_tree_sra != 0 && dbg_cnt (tree_sra); +} + + +struct gimple_opt_pass pass_sra_early = +{ + { + GIMPLE_PASS, + "esra", /* name */ + gate_intra_sra, /* gate */ + early_intra_sra, /* execute */ + NULL, /* sub */ + NULL, /* next */ + 0, /* static_pass_number */ + TV_TREE_SRA, /* tv_id */ + PROP_cfg | PROP_ssa, /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + 0, /* todo_flags_start */ + TODO_update_ssa + | TODO_ggc_collect + | TODO_verify_ssa /* todo_flags_finish */ + } +}; + +struct gimple_opt_pass pass_sra = +{ + { + GIMPLE_PASS, + "sra", /* name */ + gate_intra_sra, /* gate */ + late_intra_sra, /* execute */ + NULL, /* sub */ + NULL, /* next */ + 0, /* static_pass_number */ + TV_TREE_SRA, /* tv_id */ + PROP_cfg | PROP_ssa, /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + TODO_update_address_taken, /* todo_flags_start */ + TODO_update_ssa + | TODO_ggc_collect + | TODO_verify_ssa /* todo_flags_finish */ + } +}; + + +/* Return true iff PARM (which must be a parm_decl) is an unused scalar + parameter. */ + +static bool +is_unused_scalar_param (tree parm) +{ + tree name; + return (is_gimple_reg (parm) + && (!(name = gimple_default_def (cfun, parm)) + || has_zero_uses (name))); +} - if (is_gimple_reg (src) && INTEGRAL_TYPE_P (TREE_TYPE (src))) - tmp2 = src; - else if (INTEGRAL_TYPE_P (TREE_TYPE (src))) - { - gimple_seq tmp_seq; - tmp2 = make_rename_temp (TREE_TYPE (src), "SR"); - tmp_seq = sra_build_assignment (tmp2, src); - gimple_seq_add_seq (&seq, tmp_seq); - } - else - { - gimple_seq tmp_seq; - tmp2 = make_rename_temp - (lang_hooks.types.type_for_size - (TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (src))), - 1), "SR"); - tmp_seq = sra_build_assignment (tmp2, fold_build1 (VIEW_CONVERT_EXPR, - TREE_TYPE (tmp2), src)); - gimple_seq_add_seq (&seq, tmp_seq); - } +/* Scan immediate uses of a default definition SSA name of a parameter PARM and + examine whether there are any direct or otherwise infeasible ones. If so, + return true, otherwise return false. PARM must be a gimple register with a + non-NULL default definition. */ + +static bool +ptr_parm_has_direct_uses (tree parm) +{ + imm_use_iterator ui; + gimple stmt; + tree name = gimple_default_def (cfun, parm); + bool ret = false; - if (!TYPE_UNSIGNED (TREE_TYPE (tmp2))) + FOR_EACH_IMM_USE_STMT (stmt, ui, name) { - gimple_seq tmp_seq; - tree ut = unsigned_type_for (TREE_TYPE (tmp2)); - tmp3 = make_rename_temp (ut, "SR"); - tmp2 = fold_convert (ut, tmp2); - tmp_seq = sra_build_assignment (tmp3, tmp2); - gimple_seq_add_seq (&seq, tmp_seq); + int uses_ok = 0; + use_operand_p use_p; - tmp2 = fold_build1 (BIT_NOT_EXPR, utype, mask); - tmp2 = int_const_binop (RSHIFT_EXPR, tmp2, minshift, true); - tmp2 = fold_convert (ut, tmp2); - tmp2 = fold_build2 (BIT_AND_EXPR, ut, tmp3, tmp2); + if (is_gimple_debug (stmt)) + continue; - if (tmp3 != tmp2) + /* Valid uses include dereferences on the lhs and the rhs. */ + if (gimple_has_lhs (stmt)) { - tmp3 = make_rename_temp (ut, "SR"); - tmp_seq = sra_build_assignment (tmp3, tmp2); - gimple_seq_add_seq (&seq, tmp_seq); + tree lhs = gimple_get_lhs (stmt); + while (handled_component_p (lhs)) + lhs = TREE_OPERAND (lhs, 0); + if (TREE_CODE (lhs) == MEM_REF + && TREE_OPERAND (lhs, 0) == name + && integer_zerop (TREE_OPERAND (lhs, 1)) + && types_compatible_p (TREE_TYPE (lhs), + TREE_TYPE (TREE_TYPE (name))) + && !TREE_THIS_VOLATILE (lhs)) + uses_ok++; + } + if (gimple_assign_single_p (stmt)) + { + tree rhs = gimple_assign_rhs1 (stmt); + while (handled_component_p (rhs)) + rhs = TREE_OPERAND (rhs, 0); + if (TREE_CODE (rhs) == MEM_REF + && TREE_OPERAND (rhs, 0) == name + && integer_zerop (TREE_OPERAND (rhs, 1)) + && types_compatible_p (TREE_TYPE (rhs), + TREE_TYPE (TREE_TYPE (name))) + && !TREE_THIS_VOLATILE (rhs)) + uses_ok++; + } + else if (is_gimple_call (stmt)) + { + unsigned i; + for (i = 0; i < gimple_call_num_args (stmt); ++i) + { + tree arg = gimple_call_arg (stmt, i); + while (handled_component_p (arg)) + arg = TREE_OPERAND (arg, 0); + if (TREE_CODE (arg) == MEM_REF + && TREE_OPERAND (arg, 0) == name + && integer_zerop (TREE_OPERAND (arg, 1)) + && types_compatible_p (TREE_TYPE (arg), + TREE_TYPE (TREE_TYPE (name))) + && !TREE_THIS_VOLATILE (arg)) + uses_ok++; + } } - tmp2 = tmp3; - } - - if (TYPE_MAIN_VARIANT (TREE_TYPE (tmp2)) != TYPE_MAIN_VARIANT (utype)) - { - gimple_seq tmp_seq; - tmp3 = make_rename_temp (utype, "SR"); - tmp2 = fold_convert (utype, tmp2); - tmp_seq = sra_build_assignment (tmp3, tmp2); - gimple_seq_add_seq (&seq, tmp_seq); - tmp2 = tmp3; - } - - if (!integer_zerop (minshift)) - { - tmp3 = make_rename_temp (utype, "SR"); - stmt = gimple_build_assign (tmp3, fold_build2 (LSHIFT_EXPR, utype, - tmp2, minshift)); - gimple_seq_add_stmt (&seq, stmt); - tmp2 = tmp3; - } + /* If the number of valid uses does not match the number of + uses in this stmt there is an unhandled use. */ + FOR_EACH_IMM_USE_ON_STMT (use_p, ui) + --uses_ok; - if (utype != TREE_TYPE (var)) - tmp3 = make_rename_temp (utype, "SR"); - else - tmp3 = var; - stmt = gimple_build_assign (tmp3, fold_build2 (BIT_IOR_EXPR, utype, - tmp, tmp2)); - gimple_seq_add_stmt (&seq, stmt); + if (uses_ok != 0) + ret = true; - if (tmp3 != var) - { - if (TREE_TYPE (var) == type) - stmt = gimple_build_assign (var, fold_convert (type, tmp3)); - else - stmt = gimple_build_assign (var, fold_build1 (VIEW_CONVERT_EXPR, - TREE_TYPE (var), tmp3)); - gimple_seq_add_stmt (&seq, stmt); + if (ret) + BREAK_FROM_IMM_USE_STMT (ui); } - return seq; + return ret; } -/* Expand an assignment of SRC to the scalarized representation of - ELT. If it is a field group, try to widen the assignment to cover - the full variable. */ +/* Identify candidates for reduction for IPA-SRA based on their type and mark + them in candidate_bitmap. Note that these do not necessarily include + parameter which are unused and thus can be removed. Return true iff any + such candidate has been found. */ -static gimple_seq -sra_build_elt_assignment (struct sra_elt *elt, tree src) +static bool +find_param_candidates (void) { - tree dst = elt->replacement; - tree var, tmp, cst, cst2; - gimple stmt; - gimple_seq seq; - - if (TREE_CODE (dst) != BIT_FIELD_REF - || !elt->in_bitfld_block) - return sra_build_assignment (REPLDUP (dst), src); - - var = TREE_OPERAND (dst, 0); - - /* Try to widen the assignment to the entire variable. - We need the source to be a BIT_FIELD_REF as well, such that, for - BIT_FIELD_REF = BIT_FIELD_REF, - by design, conditions are met such that we can turn it into - d = BIT_FIELD_REF. */ - if (elt->in_bitfld_block == 2 - && TREE_CODE (src) == BIT_FIELD_REF) + tree parm; + int count = 0; + bool ret = false; + const char *msg; + + for (parm = DECL_ARGUMENTS (current_function_decl); + parm; + parm = DECL_CHAIN (parm)) { - tmp = src; - cst = TYPE_SIZE (TREE_TYPE (var)); - cst2 = size_binop (MINUS_EXPR, TREE_OPERAND (src, 2), - TREE_OPERAND (dst, 2)); + tree type = TREE_TYPE (parm); - src = TREE_OPERAND (src, 0); + count++; - /* Avoid full-width bit-fields. */ - if (integer_zerop (cst2) - && tree_int_cst_equal (cst, TYPE_SIZE (TREE_TYPE (src)))) - { - if (INTEGRAL_TYPE_P (TREE_TYPE (src)) - && !TYPE_UNSIGNED (TREE_TYPE (src))) - src = fold_convert (unsigned_type_for (TREE_TYPE (src)), src); - - /* If a single conversion won't do, we'll need a statement - list. */ - if (TYPE_MAIN_VARIANT (TREE_TYPE (var)) - != TYPE_MAIN_VARIANT (TREE_TYPE (src))) - { - gimple_seq tmp_seq; - seq = NULL; - - if (!INTEGRAL_TYPE_P (TREE_TYPE (src))) - src = fold_build1 (VIEW_CONVERT_EXPR, - lang_hooks.types.type_for_size - (TREE_INT_CST_LOW - (TYPE_SIZE (TREE_TYPE (src))), - 1), src); - gcc_assert (TYPE_UNSIGNED (TREE_TYPE (src))); - - tmp = make_rename_temp (TREE_TYPE (src), "SR"); - stmt = gimple_build_assign (tmp, src); - gimple_seq_add_stmt (&seq, stmt); - - tmp_seq = sra_build_assignment (var, - fold_convert (TREE_TYPE (var), - tmp)); - gimple_seq_add_seq (&seq, tmp_seq); - - return seq; - } + if (TREE_THIS_VOLATILE (parm) + || TREE_ADDRESSABLE (parm) + || (!is_gimple_reg_type (type) && is_va_list_type (type))) + continue; - src = fold_convert (TREE_TYPE (var), src); + if (is_unused_scalar_param (parm)) + { + ret = true; + continue; } - else + + if (POINTER_TYPE_P (type)) { - src = fold_convert (TREE_TYPE (var), tmp); + type = TREE_TYPE (type); + + if (TREE_CODE (type) == FUNCTION_TYPE + || TYPE_VOLATILE (type) + || (TREE_CODE (type) == ARRAY_TYPE + && TYPE_NONALIASED_COMPONENT (type)) + || !is_gimple_reg (parm) + || is_va_list_type (type) + || ptr_parm_has_direct_uses (parm)) + continue; } + else if (!AGGREGATE_TYPE_P (type)) + continue; + + if (!COMPLETE_TYPE_P (type) + || !host_integerp (TYPE_SIZE (type), 1) + || tree_low_cst (TYPE_SIZE (type), 1) == 0 + || (AGGREGATE_TYPE_P (type) + && type_internals_preclude_sra_p (type, &msg))) + continue; - return sra_build_assignment (var, src); + bitmap_set_bit (candidate_bitmap, DECL_UID (parm)); + ret = true; + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, "Candidate (%d): ", DECL_UID (parm)); + print_generic_expr (dump_file, parm, 0); + fprintf (dump_file, "\n"); + } } - return sra_build_bf_assignment (dst, src); + func_param_count = count; + return ret; +} + +/* Callback of walk_aliased_vdefs, marks the access passed as DATA as + maybe_modified. */ + +static bool +mark_maybe_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED, + void *data) +{ + struct access *repr = (struct access *) data; + + repr->grp_maybe_modified = 1; + return true; } -/* Generate a set of assignment statements in *LIST_P to copy all - instantiated elements under ELT to or from the equivalent structure - rooted at EXPR. COPY_OUT controls the direction of the copy, with - true meaning to copy out of EXPR into ELT. */ +/* Analyze what representatives (in linked lists accessible from + REPRESENTATIVES) can be modified by side effects of statements in the + current function. */ static void -generate_copy_inout (struct sra_elt *elt, bool copy_out, tree expr, - gimple_seq *seq_p) +analyze_modified_params (VEC (access_p, heap) *representatives) { - struct sra_elt *c; - gimple_seq tmp_seq; - tree t; + int i; - if (!copy_out && TREE_CODE (expr) == SSA_NAME - && TREE_CODE (TREE_TYPE (expr)) == COMPLEX_TYPE) + for (i = 0; i < func_param_count; i++) { - tree r, i; + struct access *repr; - c = lookup_element (elt, integer_zero_node, NULL, NO_INSERT); - r = c->replacement; - c = lookup_element (elt, integer_one_node, NULL, NO_INSERT); - i = c->replacement; - - t = build2 (COMPLEX_EXPR, elt->type, r, i); - tmp_seq = sra_build_bf_assignment (expr, t); - SSA_NAME_DEF_STMT (expr) = gimple_seq_last_stmt (tmp_seq); - gimple_seq_add_seq (seq_p, tmp_seq); - } - else if (elt->replacement) - { - if (copy_out) - tmp_seq = sra_build_elt_assignment (elt, expr); - else - tmp_seq = sra_build_bf_assignment (expr, REPLDUP (elt->replacement)); - gimple_seq_add_seq (seq_p, tmp_seq); - } - else - { - FOR_EACH_ACTUAL_CHILD (c, elt) + for (repr = VEC_index (access_p, representatives, i); + repr; + repr = repr->next_grp) { - t = generate_one_element_ref (c, unshare_expr (expr)); - generate_copy_inout (c, copy_out, t, seq_p); + struct access *access; + bitmap visited; + ao_ref ar; + + if (no_accesses_p (repr)) + continue; + if (!POINTER_TYPE_P (TREE_TYPE (repr->base)) + || repr->grp_maybe_modified) + continue; + + ao_ref_init (&ar, repr->expr); + visited = BITMAP_ALLOC (NULL); + for (access = repr; access; access = access->next_sibling) + { + /* All accesses are read ones, otherwise grp_maybe_modified would + be trivially set. */ + walk_aliased_vdefs (&ar, gimple_vuse (access->stmt), + mark_maybe_modified, repr, &visited); + if (repr->grp_maybe_modified) + break; + } + BITMAP_FREE (visited); } } } -/* Generate a set of assignment statements in *LIST_P to copy all instantiated - elements under SRC to their counterparts under DST. There must be a 1-1 - correspondence of instantiated elements. */ +/* Propagate distances in bb_dereferences in the opposite direction than the + control flow edges, in each step storing the maximum of the current value + and the minimum of all successors. These steps are repeated until the table + stabilizes. Note that BBs which might terminate the functions (according to + final_bbs bitmap) never updated in this way. */ static void -generate_element_copy (struct sra_elt *dst, struct sra_elt *src, gimple_seq *seq_p) +propagate_dereference_distances (void) { - struct sra_elt *dc, *sc; + VEC (basic_block, heap) *queue; + basic_block bb; + + queue = VEC_alloc (basic_block, heap, last_basic_block_for_function (cfun)); + VEC_quick_push (basic_block, queue, ENTRY_BLOCK_PTR); + FOR_EACH_BB (bb) + { + VEC_quick_push (basic_block, queue, bb); + bb->aux = bb; + } - FOR_EACH_ACTUAL_CHILD (dc, dst) + while (!VEC_empty (basic_block, queue)) { - sc = lookup_element (src, dc->element, NULL, NO_INSERT); - if (!sc && dc->in_bitfld_block == 2) - { - struct sra_elt *dcs; + edge_iterator ei; + edge e; + bool change = false; + int i; - FOR_EACH_ACTUAL_CHILD (dcs, dc) - { - sc = lookup_element (src, dcs->element, NULL, NO_INSERT); - gcc_assert (sc); - generate_element_copy (dcs, sc, seq_p); - } + bb = VEC_pop (basic_block, queue); + bb->aux = NULL; - continue; - } + if (bitmap_bit_p (final_bbs, bb->index)) + continue; - /* If DST and SRC are structs with the same elements, but do not have - the same TYPE_MAIN_VARIANT, then lookup of DST FIELD_DECL in SRC - will fail. Try harder by finding the corresponding FIELD_DECL - in SRC. */ - if (!sc) + for (i = 0; i < func_param_count; i++) { - tree f; - - gcc_assert (useless_type_conversion_p (dst->type, src->type)); - gcc_assert (TREE_CODE (dc->element) == FIELD_DECL); - for (f = TYPE_FIELDS (src->type); f ; f = TREE_CHAIN (f)) - if (simple_cst_equal (DECL_FIELD_OFFSET (f), - DECL_FIELD_OFFSET (dc->element)) > 0 - && simple_cst_equal (DECL_FIELD_BIT_OFFSET (f), - DECL_FIELD_BIT_OFFSET (dc->element)) > 0 - && simple_cst_equal (DECL_SIZE (f), - DECL_SIZE (dc->element)) > 0 - && (useless_type_conversion_p (TREE_TYPE (dc->element), - TREE_TYPE (f)) - || (POINTER_TYPE_P (TREE_TYPE (dc->element)) - && POINTER_TYPE_P (TREE_TYPE (f))))) - break; - gcc_assert (f != NULL_TREE); - sc = lookup_element (src, f, NULL, NO_INSERT); - } + int idx = bb->index * func_param_count + i; + bool first = true; + HOST_WIDE_INT inh = 0; - generate_element_copy (dc, sc, seq_p); - } + FOR_EACH_EDGE (e, ei, bb->succs) + { + int succ_idx = e->dest->index * func_param_count + i; - if (dst->replacement) - { - gimple_seq tmp_seq; + if (e->src == EXIT_BLOCK_PTR) + continue; + + if (first) + { + first = false; + inh = bb_dereferences [succ_idx]; + } + else if (bb_dereferences [succ_idx] < inh) + inh = bb_dereferences [succ_idx]; + } + + if (!first && bb_dereferences[idx] < inh) + { + bb_dereferences[idx] = inh; + change = true; + } + } - gcc_assert (src->replacement); + if (change && !bitmap_bit_p (final_bbs, bb->index)) + FOR_EACH_EDGE (e, ei, bb->preds) + { + if (e->src->aux) + continue; - tmp_seq = sra_build_elt_assignment (dst, REPLDUP (src->replacement)); - gimple_seq_add_seq (seq_p, tmp_seq); + e->src->aux = e->src; + VEC_quick_push (basic_block, queue, e->src); + } } + + VEC_free (basic_block, heap, queue); } -/* Generate a set of assignment statements in *LIST_P to zero all instantiated - elements under ELT. In addition, do not assign to elements that have been - marked VISITED but do reset the visited flag; this allows easy coordination - with generate_element_init. */ +/* Dump a dereferences TABLE with heading STR to file F. */ static void -generate_element_zero (struct sra_elt *elt, gimple_seq *seq_p) +dump_dereferences_table (FILE *f, const char *str, HOST_WIDE_INT *table) { - struct sra_elt *c; + basic_block bb; - if (elt->visited) + fprintf (dump_file, str); + FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { - elt->visited = false; - return; + fprintf (f, "%4i %i ", bb->index, bitmap_bit_p (final_bbs, bb->index)); + if (bb != EXIT_BLOCK_PTR) + { + int i; + for (i = 0; i < func_param_count; i++) + { + int idx = bb->index * func_param_count + i; + fprintf (f, " %4" HOST_WIDE_INT_PRINT "d", table[idx]); + } + } + fprintf (f, "\n"); } + fprintf (dump_file, "\n"); +} - if (!elt->in_bitfld_block) - FOR_EACH_ACTUAL_CHILD (c, elt) - generate_element_zero (c, seq_p); - - if (elt->replacement) - { - tree t; - gimple_seq tmp_seq; - - gcc_assert (elt->is_scalar); - t = fold_convert (elt->type, integer_zero_node); +/* Determine what (parts of) parameters passed by reference that are not + assigned to are not certainly dereferenced in this function and thus the + dereferencing cannot be safely moved to the caller without potentially + introducing a segfault. Mark such REPRESENTATIVES as + grp_not_necessarilly_dereferenced. - tmp_seq = sra_build_elt_assignment (elt, t); - gimple_seq_add_seq (seq_p, tmp_seq); - } -} + The dereferenced maximum "distance," i.e. the offset + size of the accessed + part is calculated rather than simple booleans are calculated for each + pointer parameter to handle cases when only a fraction of the whole + aggregate is allocated (see testsuite/gcc.c-torture/execute/ipa-sra-2.c for + an example). -/* Generate an assignment VAR = INIT, where INIT may need gimplification. - Add the result to *LIST_P. */ + The maximum dereference distances for each pointer parameter and BB are + already stored in bb_dereference. This routine simply propagates these + values upwards by propagate_dereference_distances and then compares the + distances of individual parameters in the ENTRY BB to the equivalent + distances of each representative of a (fraction of a) parameter. */ static void -generate_one_element_init (struct sra_elt *elt, tree init, gimple_seq *seq_p) +analyze_caller_dereference_legality (VEC (access_p, heap) *representatives) { - gimple_seq tmp_seq = sra_build_elt_assignment (elt, init); - gimple_seq_add_seq (seq_p, tmp_seq); -} + int i; -/* Generate a set of assignment statements in *LIST_P to set all instantiated - elements under ELT with the contents of the initializer INIT. In addition, - mark all assigned elements VISITED; this allows easy coordination with - generate_element_zero. Return false if we found a case we couldn't - handle. */ + if (dump_file && (dump_flags & TDF_DETAILS)) + dump_dereferences_table (dump_file, + "Dereference table before propagation:\n", + bb_dereferences); -static bool -generate_element_init_1 (struct sra_elt *elt, tree init, gimple_seq *seq_p) -{ - bool result = true; - enum tree_code init_code; - struct sra_elt *sub; - tree t; - unsigned HOST_WIDE_INT idx; - tree value, purpose; + propagate_dereference_distances (); - /* We can be passed DECL_INITIAL of a static variable. It might have a - conversion, which we strip off here. */ - STRIP_USELESS_TYPE_CONVERSION (init); - init_code = TREE_CODE (init); + if (dump_file && (dump_flags & TDF_DETAILS)) + dump_dereferences_table (dump_file, + "Dereference table after propagation:\n", + bb_dereferences); - if (elt->is_scalar) + for (i = 0; i < func_param_count; i++) { - if (elt->replacement) - { - generate_one_element_init (elt, init, seq_p); - elt->visited = true; - } - return result; - } + struct access *repr = VEC_index (access_p, representatives, i); + int idx = ENTRY_BLOCK_PTR->index * func_param_count + i; - switch (init_code) - { - case COMPLEX_CST: - case COMPLEX_EXPR: - FOR_EACH_ACTUAL_CHILD (sub, elt) - { - if (sub->element == integer_zero_node) - t = (init_code == COMPLEX_EXPR - ? TREE_OPERAND (init, 0) : TREE_REALPART (init)); - else - t = (init_code == COMPLEX_EXPR - ? TREE_OPERAND (init, 1) : TREE_IMAGPART (init)); - result &= generate_element_init_1 (sub, t, seq_p); - } - break; + if (!repr || no_accesses_p (repr)) + continue; - case CONSTRUCTOR: - FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (init), idx, purpose, value) + do { - /* Array constructors are routinely created with NULL indices. */ - if (purpose == NULL_TREE) - { - result = false; - break; - } - if (TREE_CODE (purpose) == RANGE_EXPR) - { - tree lower = TREE_OPERAND (purpose, 0); - tree upper = TREE_OPERAND (purpose, 1); - - while (1) - { - sub = lookup_element (elt, lower, NULL, NO_INSERT); - if (sub != NULL) - result &= generate_element_init_1 (sub, value, seq_p); - if (tree_int_cst_equal (lower, upper)) - break; - lower = int_const_binop (PLUS_EXPR, lower, - integer_one_node, true); - } - } - else - { - sub = lookup_element (elt, purpose, NULL, NO_INSERT); - if (sub != NULL) - result &= generate_element_init_1 (sub, value, seq_p); - } + if ((repr->offset + repr->size) > bb_dereferences[idx]) + repr->grp_not_necessarilly_dereferenced = 1; + repr = repr->next_grp; } - break; - - default: - elt->visited = true; - result = false; + while (repr); } - - return result; } -/* A wrapper function for generate_element_init_1 that handles cleanup after - gimplification. */ +/* Return the representative access for the parameter declaration PARM if it is + a scalar passed by reference which is not written to and the pointer value + is not used directly. Thus, if it is legal to dereference it in the caller + and we can rule out modifications through aliases, such parameter should be + turned into one passed by value. Return NULL otherwise. */ -static bool -generate_element_init (struct sra_elt *elt, tree init, gimple_seq *seq_p) +static struct access * +unmodified_by_ref_scalar_representative (tree parm) { - bool ret; - struct gimplify_ctx gctx; - - push_gimplify_context (&gctx); - ret = generate_element_init_1 (elt, init, seq_p); - pop_gimplify_context (NULL); + int i, access_count; + struct access *repr; + VEC (access_p, heap) *access_vec; + + access_vec = get_base_access_vector (parm); + gcc_assert (access_vec); + repr = VEC_index (access_p, access_vec, 0); + if (repr->write) + return NULL; + repr->group_representative = repr; - /* The replacement can expose previously unreferenced variables. */ - if (ret && *seq_p) + access_count = VEC_length (access_p, access_vec); + for (i = 1; i < access_count; i++) { - gimple_stmt_iterator i; - - for (i = gsi_start (*seq_p); !gsi_end_p (i); gsi_next (&i)) - find_new_referenced_vars (gsi_stmt (i)); + struct access *access = VEC_index (access_p, access_vec, i); + if (access->write) + return NULL; + access->group_representative = repr; + access->next_sibling = repr->next_sibling; + repr->next_sibling = access; } - return ret; + repr->grp_read = 1; + repr->grp_scalar_ptr = 1; + return repr; } -/* Insert a gimple_seq SEQ on all the outgoing edges out of BB. Note that - if BB has more than one edge, STMT will be replicated for each edge. - Also, abnormal edges will be ignored. */ +/* Return true iff this access precludes IPA-SRA of the parameter it is + associated with. */ -void -insert_edge_copies_seq (gimple_seq seq, basic_block bb) +static bool +access_precludes_ipa_sra_p (struct access *access) { - edge e; - edge_iterator ei; - unsigned n_copies = -1; + /* Avoid issues such as the second simple testcase in PR 42025. The problem + is incompatible assign in a call statement (and possibly even in asm + statements). This can be relaxed by using a new temporary but only for + non-TREE_ADDRESSABLE types and is probably not worth the complexity. (In + intraprocedural SRA we deal with this by keeping the old aggregate around, + something we cannot do in IPA-SRA.) */ + if (access->write + && (is_gimple_call (access->stmt) + || gimple_code (access->stmt) == GIMPLE_ASM)) + return true; - FOR_EACH_EDGE (e, ei, bb->succs) - if (!(e->flags & EDGE_ABNORMAL)) - n_copies++; + if (STRICT_ALIGNMENT + && tree_non_aligned_mem_p (access->expr, TYPE_ALIGN (access->type))) + return true; - FOR_EACH_EDGE (e, ei, bb->succs) - if (!(e->flags & EDGE_ABNORMAL)) - gsi_insert_seq_on_edge (e, n_copies-- > 0 ? gimple_seq_copy (seq) : seq); + return false; } -/* Helper function to insert LIST before GSI, and set up line number info. */ - -void -sra_insert_before (gimple_stmt_iterator *gsi, gimple_seq seq) -{ - gimple stmt = gsi_stmt (*gsi); - - if (gimple_has_location (stmt)) - annotate_all_with_location (seq, gimple_location (stmt)); - gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT); -} -/* Similarly, but insert after GSI. Handles insertion onto edges as well. */ +/* Sort collected accesses for parameter PARM, identify representatives for + each accessed region and link them together. Return NULL if there are + different but overlapping accesses, return the special ptr value meaning + there are no accesses for this parameter if that is the case and return the + first representative otherwise. Set *RO_GRP if there is a group of accesses + with only read (i.e. no write) accesses. */ -void -sra_insert_after (gimple_stmt_iterator *gsi, gimple_seq seq) +static struct access * +splice_param_accesses (tree parm, bool *ro_grp) { - gimple stmt = gsi_stmt (*gsi); + int i, j, access_count, group_count; + int agg_size, total_size = 0; + struct access *access, *res, **prev_acc_ptr = &res; + VEC (access_p, heap) *access_vec; + + access_vec = get_base_access_vector (parm); + if (!access_vec) + return &no_accesses_representant; + access_count = VEC_length (access_p, access_vec); + + VEC_qsort (access_p, access_vec, compare_access_positions); + + i = 0; + total_size = 0; + group_count = 0; + while (i < access_count) + { + bool modification; + tree a1_alias_type; + access = VEC_index (access_p, access_vec, i); + modification = access->write; + if (access_precludes_ipa_sra_p (access)) + return NULL; + a1_alias_type = reference_alias_ptr_type (access->expr); - if (gimple_has_location (stmt)) - annotate_all_with_location (seq, gimple_location (stmt)); + /* Access is about to become group representative unless we find some + nasty overlap which would preclude us from breaking this parameter + apart. */ - if (stmt_ends_bb_p (stmt)) - insert_edge_copies_seq (seq, gsi_bb (*gsi)); - else - gsi_insert_seq_after (gsi, seq, GSI_SAME_STMT); -} + j = i + 1; + while (j < access_count) + { + struct access *ac2 = VEC_index (access_p, access_vec, j); + if (ac2->offset != access->offset) + { + /* All or nothing law for parameters. */ + if (access->offset + access->size > ac2->offset) + return NULL; + else + break; + } + else if (ac2->size != access->size) + return NULL; + + if (access_precludes_ipa_sra_p (ac2) + || (ac2->type != access->type + && (TREE_ADDRESSABLE (ac2->type) + || TREE_ADDRESSABLE (access->type))) + || (reference_alias_ptr_type (ac2->expr) != a1_alias_type)) + return NULL; + + modification |= ac2->write; + ac2->group_representative = access; + ac2->next_sibling = access->next_sibling; + access->next_sibling = ac2; + j++; + } -/* Similarly, but replace the statement at GSI. */ + group_count++; + access->grp_maybe_modified = modification; + if (!modification) + *ro_grp = true; + *prev_acc_ptr = access; + prev_acc_ptr = &access->next_grp; + total_size += access->size; + i = j; + } -static void -sra_replace (gimple_stmt_iterator *gsi, gimple_seq seq) -{ - sra_insert_before (gsi, seq); - unlink_stmt_vdef (gsi_stmt (*gsi)); - gsi_remove (gsi, false); - if (gsi_end_p (*gsi)) - *gsi = gsi_last (gsi_seq (*gsi)); + if (POINTER_TYPE_P (TREE_TYPE (parm))) + agg_size = tree_low_cst (TYPE_SIZE (TREE_TYPE (TREE_TYPE (parm))), 1); else - gsi_prev (gsi); + agg_size = tree_low_cst (TYPE_SIZE (TREE_TYPE (parm)), 1); + if (total_size >= agg_size) + return NULL; + + gcc_assert (group_count > 0); + return res; } -/* Data structure that bitfield_overlaps_p fills in with information - about the element passed in and how much of it overlaps with the - bit-range passed it to. */ +/* Decide whether parameters with representative accesses given by REPR should + be reduced into components. */ -struct bitfield_overlap_info +static int +decide_one_param_reduction (struct access *repr) { - /* The bit-length of an element. */ - tree field_len; - - /* The bit-position of the element in its parent. */ - tree field_pos; - - /* The number of bits of the element that overlap with the incoming - bit range. */ - tree overlap_len; + int total_size, cur_parm_size, agg_size, new_param_count, parm_size_limit; + bool by_ref; + tree parm; - /* The first bit of the element that overlaps with the incoming bit - range. */ - tree overlap_pos; -}; - -/* Return true if a BIT_FIELD_REF<(FLD->parent), BLEN, BPOS> - expression (referenced as BF below) accesses any of the bits in FLD, - false if it doesn't. If DATA is non-null, its field_len and - field_pos are filled in such that BIT_FIELD_REF<(FLD->parent), - field_len, field_pos> (referenced as BFLD below) represents the - entire field FLD->element, and BIT_FIELD_REF represents the portion of the entire field that - overlaps with BF. */ - -static bool -bitfield_overlaps_p (tree blen, tree bpos, struct sra_elt *fld, - struct bitfield_overlap_info *data) -{ - tree flen, fpos; - bool ret; + parm = repr->base; + cur_parm_size = tree_low_cst (TYPE_SIZE (TREE_TYPE (parm)), 1); + gcc_assert (cur_parm_size > 0); - if (TREE_CODE (fld->element) == FIELD_DECL) + if (POINTER_TYPE_P (TREE_TYPE (parm))) { - flen = fold_convert (bitsizetype, DECL_SIZE (fld->element)); - fpos = fold_convert (bitsizetype, DECL_FIELD_OFFSET (fld->element)); - fpos = size_binop (MULT_EXPR, fpos, bitsize_int (BITS_PER_UNIT)); - fpos = size_binop (PLUS_EXPR, fpos, DECL_FIELD_BIT_OFFSET (fld->element)); + by_ref = true; + agg_size = tree_low_cst (TYPE_SIZE (TREE_TYPE (TREE_TYPE (parm))), 1); } - else if (TREE_CODE (fld->element) == BIT_FIELD_REF) + else { - flen = fold_convert (bitsizetype, TREE_OPERAND (fld->element, 1)); - fpos = fold_convert (bitsizetype, TREE_OPERAND (fld->element, 2)); + by_ref = false; + agg_size = cur_parm_size; } - else if (TREE_CODE (fld->element) == INTEGER_CST) + + if (dump_file) { - tree domain_type = TYPE_DOMAIN (TREE_TYPE (fld->parent->element)); - flen = fold_convert (bitsizetype, TYPE_SIZE (fld->type)); - fpos = fold_convert (bitsizetype, fld->element); - if (domain_type && TYPE_MIN_VALUE (domain_type)) - fpos = size_binop (MINUS_EXPR, fpos, - fold_convert (bitsizetype, - TYPE_MIN_VALUE (domain_type))); - fpos = size_binop (MULT_EXPR, flen, fpos); + struct access *acc; + fprintf (dump_file, "Evaluating PARAM group sizes for "); + print_generic_expr (dump_file, parm, 0); + fprintf (dump_file, " (UID: %u): \n", DECL_UID (parm)); + for (acc = repr; acc; acc = acc->next_grp) + dump_access (dump_file, acc, true); } - else - gcc_unreachable (); - - gcc_assert (host_integerp (blen, 1) - && host_integerp (bpos, 1) - && host_integerp (flen, 1) - && host_integerp (fpos, 1)); - - ret = ((!tree_int_cst_lt (fpos, bpos) - && tree_int_cst_lt (size_binop (MINUS_EXPR, fpos, bpos), - blen)) - || (!tree_int_cst_lt (bpos, fpos) - && tree_int_cst_lt (size_binop (MINUS_EXPR, bpos, fpos), - flen))); - if (!ret) - return ret; + total_size = 0; + new_param_count = 0; - if (data) + for (; repr; repr = repr->next_grp) { - tree bend, fend; + gcc_assert (parm == repr->base); - data->field_len = flen; - data->field_pos = fpos; + /* Taking the address of a non-addressable field is verboten. */ + if (by_ref && repr->non_addressable) + return 0; - fend = size_binop (PLUS_EXPR, fpos, flen); - bend = size_binop (PLUS_EXPR, bpos, blen); - - if (tree_int_cst_lt (bend, fend)) - data->overlap_len = size_binop (MINUS_EXPR, bend, fpos); + if (!by_ref || (!repr->grp_maybe_modified + && !repr->grp_not_necessarilly_dereferenced)) + total_size += repr->size; else - data->overlap_len = NULL; + total_size += cur_parm_size; - if (tree_int_cst_lt (fpos, bpos)) - { - data->overlap_pos = size_binop (MINUS_EXPR, bpos, fpos); - data->overlap_len = size_binop (MINUS_EXPR, - data->overlap_len - ? data->overlap_len - : data->field_len, - data->overlap_pos); - } - else - data->overlap_pos = NULL; + new_param_count++; } - return ret; -} + gcc_assert (new_param_count > 0); -/* Add to LISTP a sequence of statements that copies BLEN bits between - VAR and the scalarized elements of ELT, starting a bit VPOS of VAR - and at bit BPOS of ELT. The direction of the copy is given by - TO_VAR. */ - -static void -sra_explode_bitfield_assignment (tree var, tree vpos, bool to_var, - gimple_seq *seq_p, tree blen, tree bpos, - struct sra_elt *elt) -{ - struct sra_elt *fld; - struct bitfield_overlap_info flp; + if (optimize_function_for_size_p (cfun)) + parm_size_limit = cur_parm_size; + else + parm_size_limit = (PARAM_VALUE (PARAM_IPA_SRA_PTR_GROWTH_FACTOR) + * cur_parm_size); - FOR_EACH_ACTUAL_CHILD (fld, elt) + if (total_size < agg_size + && total_size <= parm_size_limit) { - tree flen, fpos; + if (dump_file) + fprintf (dump_file, " ....will be split into %i components\n", + new_param_count); + return new_param_count; + } + else + return 0; +} - if (!bitfield_overlaps_p (blen, bpos, fld, &flp)) - continue; +/* The order of the following enums is important, we need to do extra work for + UNUSED_PARAMS, BY_VAL_ACCESSES and UNMODIF_BY_REF_ACCESSES. */ +enum ipa_splicing_result { NO_GOOD_ACCESS, UNUSED_PARAMS, BY_VAL_ACCESSES, + MODIF_BY_REF_ACCESSES, UNMODIF_BY_REF_ACCESSES }; - flen = flp.overlap_len ? flp.overlap_len : flp.field_len; - fpos = flp.overlap_pos ? flp.overlap_pos : bitsize_int (0); +/* Identify representatives of all accesses to all candidate parameters for + IPA-SRA. Return result based on what representatives have been found. */ - if (fld->replacement) - { - tree infld, invar, type; - gimple_seq st; +static enum ipa_splicing_result +splice_all_param_accesses (VEC (access_p, heap) **representatives) +{ + enum ipa_splicing_result result = NO_GOOD_ACCESS; + tree parm; + struct access *repr; - infld = fld->replacement; + *representatives = VEC_alloc (access_p, heap, func_param_count); - type = unsigned_type_for (TREE_TYPE (infld)); - if (TYPE_PRECISION (type) != TREE_INT_CST_LOW (flen)) - type = build_nonstandard_integer_type (TREE_INT_CST_LOW (flen), 1); + for (parm = DECL_ARGUMENTS (current_function_decl); + parm; + parm = DECL_CHAIN (parm)) + { + if (is_unused_scalar_param (parm)) + { + VEC_quick_push (access_p, *representatives, + &no_accesses_representant); + if (result == NO_GOOD_ACCESS) + result = UNUSED_PARAMS; + } + else if (POINTER_TYPE_P (TREE_TYPE (parm)) + && is_gimple_reg_type (TREE_TYPE (TREE_TYPE (parm))) + && bitmap_bit_p (candidate_bitmap, DECL_UID (parm))) + { + repr = unmodified_by_ref_scalar_representative (parm); + VEC_quick_push (access_p, *representatives, repr); + if (repr) + result = UNMODIF_BY_REF_ACCESSES; + } + else if (bitmap_bit_p (candidate_bitmap, DECL_UID (parm))) + { + bool ro_grp = false; + repr = splice_param_accesses (parm, &ro_grp); + VEC_quick_push (access_p, *representatives, repr); - if (TREE_CODE (infld) == BIT_FIELD_REF) - { - fpos = size_binop (PLUS_EXPR, fpos, TREE_OPERAND (infld, 2)); - infld = TREE_OPERAND (infld, 0); - } - else if (BYTES_BIG_ENDIAN && DECL_P (fld->element) - && !tree_int_cst_equal (TYPE_SIZE (TREE_TYPE (infld)), - DECL_SIZE (fld->element))) + if (repr && !no_accesses_p (repr)) { - fpos = size_binop (PLUS_EXPR, fpos, - TYPE_SIZE (TREE_TYPE (infld))); - fpos = size_binop (MINUS_EXPR, fpos, - DECL_SIZE (fld->element)); + if (POINTER_TYPE_P (TREE_TYPE (parm))) + { + if (ro_grp) + result = UNMODIF_BY_REF_ACCESSES; + else if (result < MODIF_BY_REF_ACCESSES) + result = MODIF_BY_REF_ACCESSES; + } + else if (result < BY_VAL_ACCESSES) + result = BY_VAL_ACCESSES; } - - infld = fold_build3 (BIT_FIELD_REF, type, infld, flen, fpos); - - invar = size_binop (MINUS_EXPR, flp.field_pos, bpos); - if (flp.overlap_pos) - invar = size_binop (PLUS_EXPR, invar, flp.overlap_pos); - invar = size_binop (PLUS_EXPR, invar, vpos); - - invar = fold_build3 (BIT_FIELD_REF, type, var, flen, invar); - - if (to_var) - st = sra_build_bf_assignment (invar, infld); - else - st = sra_build_bf_assignment (infld, invar); - - gimple_seq_add_seq (seq_p, st); + else if (no_accesses_p (repr) && (result == NO_GOOD_ACCESS)) + result = UNUSED_PARAMS; } else - { - tree sub = size_binop (MINUS_EXPR, flp.field_pos, bpos); - sub = size_binop (PLUS_EXPR, vpos, sub); - if (flp.overlap_pos) - sub = size_binop (PLUS_EXPR, sub, flp.overlap_pos); + VEC_quick_push (access_p, *representatives, NULL); + } - sra_explode_bitfield_assignment (var, sub, to_var, seq_p, - flen, fpos, fld); - } + if (result == NO_GOOD_ACCESS) + { + VEC_free (access_p, heap, *representatives); + *representatives = NULL; + return NO_GOOD_ACCESS; } + + return result; } -/* Add to LISTBEFOREP statements that copy scalarized members of ELT - that overlap with BIT_FIELD_REF<(ELT->element), BLEN, BPOS> back - into the full variable, and to LISTAFTERP, if non-NULL, statements - that copy the (presumably modified) overlapping portions of the - full variable back to the scalarized variables. */ +/* Return the index of BASE in PARMS. Abort if it is not found. */ -static void -sra_sync_for_bitfield_assignment (gimple_seq *seq_before_p, - gimple_seq *seq_after_p, - tree blen, tree bpos, - struct sra_elt *elt) +static inline int +get_param_index (tree base, VEC(tree, heap) *parms) { - struct sra_elt *fld; - struct bitfield_overlap_info flp; + int i, len; - FOR_EACH_ACTUAL_CHILD (fld, elt) - if (bitfield_overlaps_p (blen, bpos, fld, &flp)) - { - if (fld->replacement || (!flp.overlap_len && !flp.overlap_pos)) - { - generate_copy_inout (fld, false, generate_element_ref (fld), - seq_before_p); - mark_no_warning (fld); - if (seq_after_p) - generate_copy_inout (fld, true, generate_element_ref (fld), - seq_after_p); - } - else - { - tree flen = flp.overlap_len ? flp.overlap_len : flp.field_len; - tree fpos = flp.overlap_pos ? flp.overlap_pos : bitsize_int (0); - - sra_sync_for_bitfield_assignment (seq_before_p, seq_after_p, - flen, fpos, fld); - } - } + len = VEC_length (tree, parms); + for (i = 0; i < len; i++) + if (VEC_index (tree, parms, i) == base) + return i; + gcc_unreachable (); } -/* Scalarize a USE. To recap, this is either a simple reference to ELT, - if elt is scalar, or some occurrence of ELT that requires a complete - aggregate. IS_OUTPUT is true if ELT is being modified. */ +/* Convert the decisions made at the representative level into compact + parameter adjustments. REPRESENTATIVES are pointers to first + representatives of each param accesses, ADJUSTMENTS_COUNT is the expected + final number of adjustments. */ -static void -scalarize_use (struct sra_elt *elt, tree *expr_p, gimple_stmt_iterator *gsi, - bool is_output, bool use_all) +static ipa_parm_adjustment_vec +turn_representatives_into_adjustments (VEC (access_p, heap) *representatives, + int adjustments_count) { - gimple stmt = gsi_stmt (*gsi); - tree bfexpr; + VEC (tree, heap) *parms; + ipa_parm_adjustment_vec adjustments; + tree parm; + int i; - if (elt->replacement) + gcc_assert (adjustments_count > 0); + parms = ipa_get_vector_of_formal_parms (current_function_decl); + adjustments = VEC_alloc (ipa_parm_adjustment_t, heap, adjustments_count); + parm = DECL_ARGUMENTS (current_function_decl); + for (i = 0; i < func_param_count; i++, parm = DECL_CHAIN (parm)) { - tree replacement = elt->replacement; + struct access *repr = VEC_index (access_p, representatives, i); - /* If we have a replacement, then updating the reference is as - simple as modifying the existing statement in place. */ - if (is_output - && TREE_CODE (elt->replacement) == BIT_FIELD_REF - && is_gimple_reg (TREE_OPERAND (elt->replacement, 0)) - && is_gimple_assign (stmt) - && gimple_assign_lhs_ptr (stmt) == expr_p) - { - gimple_seq newseq; - /* RHS must be a single operand. */ - gcc_assert (gimple_assign_single_p (stmt)); - newseq = sra_build_elt_assignment (elt, gimple_assign_rhs1 (stmt)); - sra_replace (gsi, newseq); - return; - } - else if (!is_output - && TREE_CODE (elt->replacement) == BIT_FIELD_REF - && is_gimple_assign (stmt) - && gimple_assign_rhs1_ptr (stmt) == expr_p) + if (!repr || no_accesses_p (repr)) { - tree tmp = make_rename_temp - (TREE_TYPE (gimple_assign_lhs (stmt)), "SR"); - gimple_seq newseq = sra_build_assignment (tmp, REPLDUP (elt->replacement)); - - sra_insert_before (gsi, newseq); - replacement = tmp; + struct ipa_parm_adjustment *adj; + + adj = VEC_quick_push (ipa_parm_adjustment_t, adjustments, NULL); + memset (adj, 0, sizeof (*adj)); + adj->base_index = get_param_index (parm, parms); + adj->base = parm; + if (!repr) + adj->copy_param = 1; + else + adj->remove_param = 1; } - if (is_output) - update_stmt_if_modified (stmt); - *expr_p = REPLDUP (replacement); - update_stmt (stmt); - } - else if (use_all && is_output - && is_gimple_assign (stmt) - && TREE_CODE (bfexpr - = gimple_assign_lhs (stmt)) == BIT_FIELD_REF - && &TREE_OPERAND (bfexpr, 0) == expr_p - && INTEGRAL_TYPE_P (TREE_TYPE (bfexpr)) - && TREE_CODE (TREE_TYPE (*expr_p)) == RECORD_TYPE) - { - gimple_seq seq_before = NULL; - gimple_seq seq_after = NULL; - tree blen = fold_convert (bitsizetype, TREE_OPERAND (bfexpr, 1)); - tree bpos = fold_convert (bitsizetype, TREE_OPERAND (bfexpr, 2)); - bool update = false; - - if (!elt->use_block_copy) + else { - tree type = TREE_TYPE (bfexpr); - tree var = make_rename_temp (type, "SR"), tmp, vpos; - gimple st; - - gimple_assign_set_lhs (stmt, var); - update = true; + struct ipa_parm_adjustment *adj; + int index = get_param_index (parm, parms); - if (!TYPE_UNSIGNED (type)) + for (; repr; repr = repr->next_grp) { - type = unsigned_type_for (type); - tmp = make_rename_temp (type, "SR"); - st = gimple_build_assign (tmp, fold_convert (type, var)); - gimple_seq_add_stmt (&seq_after, st); - var = tmp; - } + adj = VEC_quick_push (ipa_parm_adjustment_t, adjustments, NULL); + memset (adj, 0, sizeof (*adj)); + gcc_assert (repr->base == parm); + adj->base_index = index; + adj->base = repr->base; + adj->type = repr->type; + adj->alias_ptr_type = reference_alias_ptr_type (repr->expr); + adj->offset = repr->offset; + adj->by_ref = (POINTER_TYPE_P (TREE_TYPE (repr->base)) + && (repr->grp_maybe_modified + || repr->grp_not_necessarilly_dereferenced)); - /* If VAR is wider than BLEN bits, it is padded at the - most-significant end. We want to set VPOS such that - would refer to the - least-significant BLEN bits of VAR. */ - if (BYTES_BIG_ENDIAN) - vpos = size_binop (MINUS_EXPR, TYPE_SIZE (type), blen); - else - vpos = bitsize_int (0); - sra_explode_bitfield_assignment - (var, vpos, false, &seq_after, blen, bpos, elt); + } } - else - sra_sync_for_bitfield_assignment - (&seq_before, &seq_after, blen, bpos, elt); + } + VEC_free (tree, heap, parms); + return adjustments; +} - if (seq_before) - { - mark_all_v_defs_seq (seq_before); - sra_insert_before (gsi, seq_before); - } - if (seq_after) - { - mark_all_v_defs_seq (seq_after); - sra_insert_after (gsi, seq_after); - } +/* Analyze the collected accesses and produce a plan what to do with the + parameters in the form of adjustments, NULL meaning nothing. */ - if (update) - update_stmt (stmt); +static ipa_parm_adjustment_vec +analyze_all_param_acesses (void) +{ + enum ipa_splicing_result repr_state; + bool proceed = false; + int i, adjustments_count = 0; + VEC (access_p, heap) *representatives; + ipa_parm_adjustment_vec adjustments; + + repr_state = splice_all_param_accesses (&representatives); + if (repr_state == NO_GOOD_ACCESS) + return NULL; + + /* If there are any parameters passed by reference which are not modified + directly, we need to check whether they can be modified indirectly. */ + if (repr_state == UNMODIF_BY_REF_ACCESSES) + { + analyze_caller_dereference_legality (representatives); + analyze_modified_params (representatives); } - else if (use_all && !is_output - && is_gimple_assign (stmt) - && TREE_CODE (bfexpr - = gimple_assign_rhs1 (stmt)) == BIT_FIELD_REF - && &TREE_OPERAND (gimple_assign_rhs1 (stmt), 0) == expr_p - && INTEGRAL_TYPE_P (TREE_TYPE (bfexpr)) - && TREE_CODE (TREE_TYPE (*expr_p)) == RECORD_TYPE) + + for (i = 0; i < func_param_count; i++) { - gimple_seq seq = NULL; - tree blen = fold_convert (bitsizetype, TREE_OPERAND (bfexpr, 1)); - tree bpos = fold_convert (bitsizetype, TREE_OPERAND (bfexpr, 2)); - bool update = false; + struct access *repr = VEC_index (access_p, representatives, i); - if (!elt->use_block_copy) + if (repr && !no_accesses_p (repr)) { - tree type = TREE_TYPE (bfexpr); - tree var = make_rename_temp (type, "SR"), tmp, vpos; - gimple st = NULL; - - gimple_assign_set_rhs1 (stmt, var); - update = true; - - if (!TYPE_UNSIGNED (type)) + if (repr->grp_scalar_ptr) { - type = unsigned_type_for (type); - tmp = make_rename_temp (type, "SR"); - st = gimple_build_assign (var, - fold_convert (TREE_TYPE (var), tmp)); - var = tmp; + adjustments_count++; + if (repr->grp_not_necessarilly_dereferenced + || repr->grp_maybe_modified) + VEC_replace (access_p, representatives, i, NULL); + else + { + proceed = true; + sra_stats.scalar_by_ref_to_by_val++; + } } - - gimple_seq_add_stmt (&seq, - gimple_build_assign - (var, build_int_cst_wide (type, 0, 0))); - - /* If VAR is wider than BLEN bits, it is padded at the - most-significant end. We want to set VPOS such that - would refer to the - least-significant BLEN bits of VAR. */ - if (BYTES_BIG_ENDIAN) - vpos = size_binop (MINUS_EXPR, TYPE_SIZE (type), blen); else - vpos = bitsize_int (0); - sra_explode_bitfield_assignment - (var, vpos, true, &seq, blen, bpos, elt); + { + int new_components = decide_one_param_reduction (repr); - if (st) - gimple_seq_add_stmt (&seq, st); + if (new_components == 0) + { + VEC_replace (access_p, representatives, i, NULL); + adjustments_count++; + } + else + { + adjustments_count += new_components; + sra_stats.aggregate_params_reduced++; + sra_stats.param_reductions_created += new_components; + proceed = true; + } + } } else - sra_sync_for_bitfield_assignment - (&seq, NULL, blen, bpos, elt); - - if (seq) { - mark_all_v_defs_seq (seq); - sra_insert_before (gsi, seq); + if (no_accesses_p (repr)) + { + proceed = true; + sra_stats.deleted_unused_parameters++; + } + adjustments_count++; } - - if (update) - update_stmt (stmt); } + + if (!proceed && dump_file) + fprintf (dump_file, "NOT proceeding to change params.\n"); + + if (proceed) + adjustments = turn_representatives_into_adjustments (representatives, + adjustments_count); else + adjustments = NULL; + + VEC_free (access_p, heap, representatives); + return adjustments; +} + +/* If a parameter replacement identified by ADJ does not yet exist in the form + of declaration, create it and record it, otherwise return the previously + created one. */ + +static tree +get_replaced_param_substitute (struct ipa_parm_adjustment *adj) +{ + tree repl; + if (!adj->new_ssa_base) { - gimple_seq seq = NULL; - - /* Otherwise we need some copies. If ELT is being read, then we - want to store all (modified) sub-elements back into the - structure before the reference takes place. If ELT is being - written, then we want to load the changed values back into - our shadow variables. */ - /* ??? We don't check modified for reads, we just always write all of - the values. We should be able to record the SSA number of the VOP - for which the values were last read. If that number matches the - SSA number of the VOP in the current statement, then we needn't - emit an assignment. This would also eliminate double writes when - a structure is passed as more than one argument to a function call. - This optimization would be most effective if sra_walk_function - processed the blocks in dominator order. */ - - generate_copy_inout (elt, is_output, generate_element_ref (elt), &seq); - if (seq == NULL) - return; - mark_all_v_defs_seq (seq); - if (is_output) - sra_insert_after (gsi, seq); - else - { - sra_insert_before (gsi, seq); - if (use_all) - mark_no_warning (elt); - } + char *pretty_name = make_fancy_name (adj->base); + + repl = create_tmp_reg (TREE_TYPE (adj->base), "ISR"); + DECL_NAME (repl) = get_identifier (pretty_name); + obstack_free (&name_obstack, pretty_name); + + add_referenced_var (repl); + adj->new_ssa_base = repl; } + else + repl = adj->new_ssa_base; + return repl; } -/* Scalarize a COPY. To recap, this is an assignment statement between - two scalarizable references, LHS_ELT and RHS_ELT. */ +/* Find the first adjustment for a particular parameter BASE in a vector of + ADJUSTMENTS which is not a copy_param. Return NULL if there is no such + adjustment. */ -static void -scalarize_copy (struct sra_elt *lhs_elt, struct sra_elt *rhs_elt, - gimple_stmt_iterator *gsi) +static struct ipa_parm_adjustment * +get_adjustment_for_base (ipa_parm_adjustment_vec adjustments, tree base) { - gimple_seq seq; - gimple stmt; + int i, len; - if (lhs_elt->replacement && rhs_elt->replacement) + len = VEC_length (ipa_parm_adjustment_t, adjustments); + for (i = 0; i < len; i++) { - /* If we have two scalar operands, modify the existing statement. */ - stmt = gsi_stmt (*gsi); + struct ipa_parm_adjustment *adj; - /* See the commentary in sra_walk_function concerning - RETURN_EXPR, and why we should never see one here. */ - gcc_assert (is_gimple_assign (stmt)); - gcc_assert (gimple_assign_copy_p (stmt)); - - - gimple_assign_set_lhs (stmt, lhs_elt->replacement); - gimple_assign_set_rhs1 (stmt, REPLDUP (rhs_elt->replacement)); - update_stmt (stmt); + adj = VEC_index (ipa_parm_adjustment_t, adjustments, i); + if (!adj->copy_param && adj->base == base) + return adj; } - else if (lhs_elt->use_block_copy || rhs_elt->use_block_copy) - { - /* If either side requires a block copy, then sync the RHS back - to the original structure, leave the original assignment - statement (which will perform the block copy), then load the - LHS values out of its now-updated original structure. */ - /* ??? Could perform a modified pair-wise element copy. That - would at least allow those elements that are instantiated in - both structures to be optimized well. */ - seq = NULL; - generate_copy_inout (rhs_elt, false, - generate_element_ref (rhs_elt), &seq); - if (seq) - { - mark_all_v_defs_seq (seq); - sra_insert_before (gsi, seq); - } + return NULL; +} - seq = NULL; - generate_copy_inout (lhs_elt, true, - generate_element_ref (lhs_elt), &seq); - if (seq) - { - mark_all_v_defs_seq (seq); - sra_insert_after (gsi, seq); - } - } +/* If the statement STMT defines an SSA_NAME of a parameter which is to be + removed because its value is not used, replace the SSA_NAME with a one + relating to a created VAR_DECL together all of its uses and return true. + ADJUSTMENTS is a pointer to an adjustments vector. */ + +static bool +replace_removed_params_ssa_names (gimple stmt, + ipa_parm_adjustment_vec adjustments) +{ + struct ipa_parm_adjustment *adj; + tree lhs, decl, repl, name; + + if (gimple_code (stmt) == GIMPLE_PHI) + lhs = gimple_phi_result (stmt); + else if (is_gimple_assign (stmt)) + lhs = gimple_assign_lhs (stmt); + else if (is_gimple_call (stmt)) + lhs = gimple_call_lhs (stmt); else - { - /* Otherwise both sides must be fully instantiated. In which - case perform pair-wise element assignments and replace the - original block copy statement. */ + gcc_unreachable (); + + if (TREE_CODE (lhs) != SSA_NAME) + return false; + decl = SSA_NAME_VAR (lhs); + if (TREE_CODE (decl) != PARM_DECL) + return false; + + adj = get_adjustment_for_base (adjustments, decl); + if (!adj) + return false; - stmt = gsi_stmt (*gsi); - update_stmt_if_modified (stmt); + repl = get_replaced_param_substitute (adj); + name = make_ssa_name (repl, stmt); - seq = NULL; - generate_element_copy (lhs_elt, rhs_elt, &seq); - gcc_assert (seq); - mark_all_v_defs_seq (seq); - sra_replace (gsi, seq); + if (dump_file) + { + fprintf (dump_file, "replacing an SSA name of a removed param "); + print_generic_expr (dump_file, lhs, 0); + fprintf (dump_file, " with "); + print_generic_expr (dump_file, name, 0); + fprintf (dump_file, "\n"); } + + if (is_gimple_assign (stmt)) + gimple_assign_set_lhs (stmt, name); + else if (is_gimple_call (stmt)) + gimple_call_set_lhs (stmt, name); + else + gimple_phi_set_result (stmt, name); + + replace_uses_by (lhs, name); + release_ssa_name (lhs); + return true; } -/* Scalarize an INIT. To recap, this is an assignment to a scalarizable - reference from some form of constructor: CONSTRUCTOR, COMPLEX_CST or - COMPLEX_EXPR. If RHS is NULL, it should be treated as an empty - CONSTRUCTOR. */ +/* If the expression *EXPR should be replaced by a reduction of a parameter, do + so. ADJUSTMENTS is a pointer to a vector of adjustments. CONVERT + specifies whether the function should care about type incompatibility the + current and new expressions. If it is false, the function will leave + incompatibility issues to the caller. Return true iff the expression + was modified. */ -static void -scalarize_init (struct sra_elt *lhs_elt, tree rhs, gimple_stmt_iterator *gsi) +static bool +sra_ipa_modify_expr (tree *expr, bool convert, + ipa_parm_adjustment_vec adjustments) { - bool result = true; - gimple_seq seq = NULL, init_seq = NULL; + int i, len; + struct ipa_parm_adjustment *adj, *cand = NULL; + HOST_WIDE_INT offset, size, max_size; + tree base, src; - /* Generate initialization statements for all members extant in the RHS. */ - if (rhs) - { - /* Unshare the expression just in case this is from a decl's initial. */ - rhs = unshare_expr (rhs); - result = generate_element_init (lhs_elt, rhs, &init_seq); - } + len = VEC_length (ipa_parm_adjustment_t, adjustments); - if (!result) + if (TREE_CODE (*expr) == BIT_FIELD_REF + || TREE_CODE (*expr) == IMAGPART_EXPR + || TREE_CODE (*expr) == REALPART_EXPR) { - /* If we failed to convert the entire initializer, then we must - leave the structure assignment in place and must load values - from the structure into the slots for which we did not find - constants. The easiest way to do this is to generate a complete - copy-out, and then follow that with the constant assignments - that we were able to build. DCE will clean things up. */ - gimple_seq seq0 = NULL; - generate_copy_inout (lhs_elt, true, generate_element_ref (lhs_elt), - &seq0); - gimple_seq_add_seq (&seq0, seq); - seq = seq0; + expr = &TREE_OPERAND (*expr, 0); + convert = true; } - else + + base = get_ref_base_and_extent (*expr, &offset, &size, &max_size); + if (!base || size == -1 || max_size == -1) + return false; + + if (TREE_CODE (base) == MEM_REF) { - /* CONSTRUCTOR is defined such that any member not mentioned is assigned - a zero value. Initialize the rest of the instantiated elements. */ - generate_element_zero (lhs_elt, &seq); - gimple_seq_add_seq (&seq, init_seq); + offset += mem_ref_offset (base).low * BITS_PER_UNIT; + base = TREE_OPERAND (base, 0); } - if (lhs_elt->use_block_copy || !result) + base = get_ssa_base_param (base); + if (!base || TREE_CODE (base) != PARM_DECL) + return false; + + for (i = 0; i < len; i++) { - /* Since LHS is not fully instantiated, we must leave the structure - assignment in place. Treating this case differently from a USE - exposes constants to later optimizations. */ - if (seq) + adj = VEC_index (ipa_parm_adjustment_t, adjustments, i); + + if (adj->base == base && + (adj->offset == offset || adj->remove_param)) { - mark_all_v_defs_seq (seq); - sra_insert_after (gsi, seq); + cand = adj; + break; } } + if (!cand || cand->copy_param || cand->remove_param) + return false; + + if (cand->by_ref) + src = build_simple_mem_ref (cand->reduction); else + src = cand->reduction; + + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, "About to replace expr "); + print_generic_expr (dump_file, *expr, 0); + fprintf (dump_file, " with "); + print_generic_expr (dump_file, src, 0); + fprintf (dump_file, "\n"); + } + + if (convert && !useless_type_conversion_p (TREE_TYPE (*expr), cand->type)) { - /* The LHS is fully instantiated. The list of initializations - replaces the original structure assignment. */ - gcc_assert (seq); - update_stmt_if_modified (gsi_stmt (*gsi)); - mark_all_v_defs_seq (seq); - sra_replace (gsi, seq); + tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*expr), src); + *expr = vce; } + else + *expr = src; + return true; } -/* A subroutine of scalarize_ldst called via walk_tree. Set TREE_NO_TRAP - on all INDIRECT_REFs. */ +/* If the statement pointed to by STMT_PTR contains any expressions that need + to replaced with a different one as noted by ADJUSTMENTS, do so. Handle any + potential type incompatibilities (GSI is used to accommodate conversion + statements and must point to the statement). Return true iff the statement + was modified. */ -static tree -mark_notrap (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) +static bool +sra_ipa_modify_assign (gimple *stmt_ptr, gimple_stmt_iterator *gsi, + ipa_parm_adjustment_vec adjustments) { - tree t = *tp; + gimple stmt = *stmt_ptr; + tree *lhs_p, *rhs_p; + bool any; + + if (!gimple_assign_single_p (stmt)) + return false; - if (TREE_CODE (t) == INDIRECT_REF) + rhs_p = gimple_assign_rhs1_ptr (stmt); + lhs_p = gimple_assign_lhs_ptr (stmt); + + any = sra_ipa_modify_expr (rhs_p, false, adjustments); + any |= sra_ipa_modify_expr (lhs_p, false, adjustments); + if (any) { - TREE_THIS_NOTRAP (t) = 1; - *walk_subtrees = 0; + tree new_rhs = NULL_TREE; + + if (!useless_type_conversion_p (TREE_TYPE (*lhs_p), TREE_TYPE (*rhs_p))) + { + if (TREE_CODE (*rhs_p) == CONSTRUCTOR) + { + /* V_C_Es of constructors can cause trouble (PR 42714). */ + if (is_gimple_reg_type (TREE_TYPE (*lhs_p))) + *rhs_p = build_zero_cst (TREE_TYPE (*lhs_p)); + else + *rhs_p = build_constructor (TREE_TYPE (*lhs_p), 0); + } + else + new_rhs = fold_build1_loc (gimple_location (stmt), + VIEW_CONVERT_EXPR, TREE_TYPE (*lhs_p), + *rhs_p); + } + else if (REFERENCE_CLASS_P (*rhs_p) + && is_gimple_reg_type (TREE_TYPE (*lhs_p)) + && !is_gimple_reg (*lhs_p)) + /* This can happen when an assignment in between two single field + structures is turned into an assignment in between two pointers to + scalars (PR 42237). */ + new_rhs = *rhs_p; + + if (new_rhs) + { + tree tmp = force_gimple_operand_gsi (gsi, new_rhs, true, NULL_TREE, + true, GSI_SAME_STMT); + + gimple_assign_set_rhs_from_tree (gsi, tmp); + } + + return true; } - else if (IS_TYPE_OR_DECL_P (t)) - *walk_subtrees = 0; - return NULL; + return false; } -/* Scalarize a LDST. To recap, this is an assignment between one scalarizable - reference ELT and one non-scalarizable reference OTHER. IS_OUTPUT is true - if ELT is on the left-hand side. */ +/* Traverse the function body and all modifications as described in + ADJUSTMENTS. Return true iff the CFG has been changed. */ -static void -scalarize_ldst (struct sra_elt *elt, tree other, - gimple_stmt_iterator *gsi, bool is_output) +static bool +ipa_sra_modify_function_body (ipa_parm_adjustment_vec adjustments) { - /* Shouldn't have gotten called for a scalar. */ - gcc_assert (!elt->replacement); + bool cfg_changed = false; + basic_block bb; - if (elt->use_block_copy) - { - /* Since ELT is not fully instantiated, we have to leave the - block copy in place. Treat this as a USE. */ - scalarize_use (elt, NULL, gsi, is_output, false); - } - else + FOR_EACH_BB (bb) { - /* The interesting case is when ELT is fully instantiated. In this - case we can have each element stored/loaded directly to/from the - corresponding slot in OTHER. This avoids a block copy. */ + gimple_stmt_iterator gsi; - gimple_seq seq = NULL; - gimple stmt = gsi_stmt (*gsi); + for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) + replace_removed_params_ssa_names (gsi_stmt (gsi), adjustments); - update_stmt_if_modified (stmt); - generate_copy_inout (elt, is_output, other, &seq); - gcc_assert (seq); - mark_all_v_defs_seq (seq); - - /* Preserve EH semantics. */ - if (stmt_ends_bb_p (stmt)) + gsi = gsi_start_bb (bb); + while (!gsi_end_p (gsi)) { - gimple_stmt_iterator si; - gimple first; - gimple_seq blist = NULL; - bool thr = stmt_could_throw_p (stmt); - - /* If the last statement of this BB created an EH edge - before scalarization, we have to locate the first - statement that can throw in the new statement list and - use that as the last statement of this BB, such that EH - semantics is preserved. All statements up to this one - are added to the same BB. All other statements in the - list will be added to normal outgoing edges of the same - BB. If they access any memory, it's the same memory, so - we can assume they won't throw. */ - si = gsi_start (seq); - for (first = gsi_stmt (si); - thr && !gsi_end_p (si) && !stmt_could_throw_p (first); - first = gsi_stmt (si)) + gimple stmt = gsi_stmt (gsi); + bool modified = false; + tree *t; + unsigned i; + + switch (gimple_code (stmt)) { - gsi_remove (&si, false); - gimple_seq_add_stmt (&blist, first); - } + case GIMPLE_RETURN: + t = gimple_return_retval_ptr (stmt); + if (*t != NULL_TREE) + modified |= sra_ipa_modify_expr (t, true, adjustments); + break; - /* Extract the first remaining statement from LIST, this is - the EH statement if there is one. */ - gsi_remove (&si, false); + case GIMPLE_ASSIGN: + modified |= sra_ipa_modify_assign (&stmt, &gsi, adjustments); + modified |= replace_removed_params_ssa_names (stmt, adjustments); + break; - if (blist) - sra_insert_before (gsi, blist); + case GIMPLE_CALL: + /* Operands must be processed before the lhs. */ + for (i = 0; i < gimple_call_num_args (stmt); i++) + { + t = gimple_call_arg_ptr (stmt, i); + modified |= sra_ipa_modify_expr (t, true, adjustments); + } - /* Replace the old statement with this new representative. */ - gsi_replace (gsi, first, true); + if (gimple_call_lhs (stmt)) + { + t = gimple_call_lhs_ptr (stmt); + modified |= sra_ipa_modify_expr (t, false, adjustments); + modified |= replace_removed_params_ssa_names (stmt, + adjustments); + } + break; - if (!gsi_end_p (si)) - { - /* If any reference would trap, then they all would. And more - to the point, the first would. Therefore none of the rest - will trap since the first didn't. Indicate this by - iterating over the remaining statements and set - TREE_THIS_NOTRAP in all INDIRECT_REFs. */ - do + case GIMPLE_ASM: + for (i = 0; i < gimple_asm_ninputs (stmt); i++) + { + t = &TREE_VALUE (gimple_asm_input_op (stmt, i)); + modified |= sra_ipa_modify_expr (t, true, adjustments); + } + for (i = 0; i < gimple_asm_noutputs (stmt); i++) { - walk_gimple_stmt (&si, NULL, mark_notrap, NULL); - gsi_next (&si); + t = &TREE_VALUE (gimple_asm_output_op (stmt, i)); + modified |= sra_ipa_modify_expr (t, false, adjustments); } - while (!gsi_end_p (si)); + break; - insert_edge_copies_seq (seq, gsi_bb (*gsi)); + default: + break; + } + + if (modified) + { + update_stmt (stmt); + if (maybe_clean_eh_stmt (stmt) + && gimple_purge_dead_eh_edges (gimple_bb (stmt))) + cfg_changed = true; } + gsi_next (&gsi); } - else - sra_replace (gsi, seq); } + + return cfg_changed; } -/* Generate initializations for all scalarizable parameters. */ +/* Call gimple_debug_bind_reset_value on all debug statements describing + gimple register parameters that are being removed or replaced. */ static void -scalarize_parms (void) +sra_ipa_reset_debug_stmts (ipa_parm_adjustment_vec adjustments) { - gimple_seq seq = NULL; - unsigned i; - bitmap_iterator bi; + int i, len; + gimple_stmt_iterator *gsip = NULL, gsi; - EXECUTE_IF_SET_IN_BITMAP (needs_copy_in, 0, i, bi) + if (MAY_HAVE_DEBUG_STMTS && single_succ_p (ENTRY_BLOCK_PTR)) { - tree var = referenced_var (i); - struct sra_elt *elt = lookup_element (NULL, var, NULL, NO_INSERT); - generate_copy_inout (elt, true, var, &seq); + gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR)); + gsip = &gsi; } - - if (seq) + len = VEC_length (ipa_parm_adjustment_t, adjustments); + for (i = 0; i < len; i++) { - insert_edge_copies_seq (seq, ENTRY_BLOCK_PTR); - mark_all_v_defs_seq (seq); + struct ipa_parm_adjustment *adj; + imm_use_iterator ui; + gimple stmt, def_temp; + tree name, vexpr, copy = NULL_TREE; + use_operand_p use_p; + + adj = VEC_index (ipa_parm_adjustment_t, adjustments, i); + if (adj->copy_param || !is_gimple_reg (adj->base)) + continue; + name = gimple_default_def (cfun, adj->base); + vexpr = NULL; + if (name) + FOR_EACH_IMM_USE_STMT (stmt, ui, name) + { + /* All other users must have been removed by + ipa_sra_modify_function_body. */ + gcc_assert (is_gimple_debug (stmt)); + if (vexpr == NULL && gsip != NULL) + { + gcc_assert (TREE_CODE (adj->base) == PARM_DECL); + vexpr = make_node (DEBUG_EXPR_DECL); + def_temp = gimple_build_debug_source_bind (vexpr, adj->base, + NULL); + DECL_ARTIFICIAL (vexpr) = 1; + TREE_TYPE (vexpr) = TREE_TYPE (name); + DECL_MODE (vexpr) = DECL_MODE (adj->base); + gsi_insert_before (gsip, def_temp, GSI_SAME_STMT); + } + if (vexpr) + { + FOR_EACH_IMM_USE_ON_STMT (use_p, ui) + SET_USE (use_p, vexpr); + } + else + gimple_debug_bind_reset_value (stmt); + update_stmt (stmt); + } + /* Create a VAR_DECL for debug info purposes. */ + if (!DECL_IGNORED_P (adj->base)) + { + copy = build_decl (DECL_SOURCE_LOCATION (current_function_decl), + VAR_DECL, DECL_NAME (adj->base), + TREE_TYPE (adj->base)); + if (DECL_PT_UID_SET_P (adj->base)) + SET_DECL_PT_UID (copy, DECL_PT_UID (adj->base)); + TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (adj->base); + TREE_READONLY (copy) = TREE_READONLY (adj->base); + TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (adj->base); + DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (adj->base); + DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (adj->base); + DECL_IGNORED_P (copy) = DECL_IGNORED_P (adj->base); + DECL_ABSTRACT_ORIGIN (copy) = DECL_ORIGIN (adj->base); + DECL_SEEN_IN_BIND_EXPR_P (copy) = 1; + SET_DECL_RTL (copy, 0); + TREE_USED (copy) = 1; + DECL_CONTEXT (copy) = current_function_decl; + add_referenced_var (copy); + add_local_decl (cfun, copy); + DECL_CHAIN (copy) = + BLOCK_VARS (DECL_INITIAL (current_function_decl)); + BLOCK_VARS (DECL_INITIAL (current_function_decl)) = copy; + } + if (gsip != NULL && copy && target_for_debug_bind (adj->base)) + { + gcc_assert (TREE_CODE (adj->base) == PARM_DECL); + if (vexpr) + def_temp = gimple_build_debug_bind (copy, vexpr, NULL); + else + def_temp = gimple_build_debug_source_bind (copy, adj->base, + NULL); + gsi_insert_before (gsip, def_temp, GSI_SAME_STMT); + } } } -/* Entry point to phase 4. Update the function to match replacements. */ +/* Return false iff all callers have at least as many actual arguments as there + are formal parameters in the current function. */ -static void -scalarize_function (void) +static bool +not_all_callers_have_enough_arguments_p (struct cgraph_node *node, + void *data ATTRIBUTE_UNUSED) { - static const struct sra_walk_fns fns = { - scalarize_use, scalarize_copy, scalarize_init, scalarize_ldst, false - }; + struct cgraph_edge *cs; + for (cs = node->callers; cs; cs = cs->next_caller) + if (!callsite_has_enough_arguments_p (cs->call_stmt)) + return true; - sra_walk_function (&fns); - scalarize_parms (); - gsi_commit_edge_inserts (); + return false; } - -/* Debug helper function. Print ELT in a nice human-readable format. */ +/* Convert all callers of NODE. */ -static void -dump_sra_elt_name (FILE *f, struct sra_elt *elt) +static bool +convert_callers_for_node (struct cgraph_node *node, + void *data) { - if (elt->parent && TREE_CODE (elt->parent->type) == COMPLEX_TYPE) + ipa_parm_adjustment_vec adjustments = (ipa_parm_adjustment_vec)data; + bitmap recomputed_callers = BITMAP_ALLOC (NULL); + struct cgraph_edge *cs; + + for (cs = node->callers; cs; cs = cs->next_caller) { - fputs (elt->element == integer_zero_node ? "__real__ " : "__imag__ ", f); - dump_sra_elt_name (f, elt->parent); + current_function_decl = cs->caller->decl; + push_cfun (DECL_STRUCT_FUNCTION (cs->caller->decl)); + + if (dump_file) + fprintf (dump_file, "Adjusting call (%i -> %i) %s -> %s\n", + cs->caller->uid, cs->callee->uid, + cgraph_node_name (cs->caller), + cgraph_node_name (cs->callee)); + + ipa_modify_call_arguments (cs, cs->call_stmt, adjustments); + + pop_cfun (); } - else + + for (cs = node->callers; cs; cs = cs->next_caller) + if (bitmap_set_bit (recomputed_callers, cs->caller->uid) + && gimple_in_ssa_p (DECL_STRUCT_FUNCTION (cs->caller->decl))) + compute_inline_parameters (cs->caller, true); + BITMAP_FREE (recomputed_callers); + + return true; +} + +/* Convert all callers of NODE to pass parameters as given in ADJUSTMENTS. */ + +static void +convert_callers (struct cgraph_node *node, tree old_decl, + ipa_parm_adjustment_vec adjustments) +{ + tree old_cur_fndecl = current_function_decl; + basic_block this_block; + + cgraph_for_node_and_aliases (node, convert_callers_for_node, + adjustments, false); + + current_function_decl = old_cur_fndecl; + + if (!encountered_recursive_call) + return; + + FOR_EACH_BB (this_block) { - if (elt->parent) - dump_sra_elt_name (f, elt->parent); - if (DECL_P (elt->element)) - { - if (TREE_CODE (elt->element) == FIELD_DECL) - fputc ('.', f); - print_generic_expr (f, elt->element, dump_flags); + gimple_stmt_iterator gsi; + + for (gsi = gsi_start_bb (this_block); !gsi_end_p (gsi); gsi_next (&gsi)) + { + gimple stmt = gsi_stmt (gsi); + tree call_fndecl; + if (gimple_code (stmt) != GIMPLE_CALL) + continue; + call_fndecl = gimple_call_fndecl (stmt); + if (call_fndecl == old_decl) + { + if (dump_file) + fprintf (dump_file, "Adjusting recursive call"); + gimple_call_set_fndecl (stmt, node->decl); + ipa_modify_call_arguments (NULL, stmt, adjustments); + } } - else if (TREE_CODE (elt->element) == BIT_FIELD_REF) - fprintf (f, "$B" HOST_WIDE_INT_PRINT_DEC "F" HOST_WIDE_INT_PRINT_DEC, - tree_low_cst (TREE_OPERAND (elt->element, 2), 1), - tree_low_cst (TREE_OPERAND (elt->element, 1), 1)); - else if (TREE_CODE (elt->element) == RANGE_EXPR) - fprintf (f, "["HOST_WIDE_INT_PRINT_DEC".."HOST_WIDE_INT_PRINT_DEC"]", - TREE_INT_CST_LOW (TREE_OPERAND (elt->element, 0)), - TREE_INT_CST_LOW (TREE_OPERAND (elt->element, 1))); - else - fprintf (f, "[" HOST_WIDE_INT_PRINT_DEC "]", - TREE_INT_CST_LOW (elt->element)); } + + return; } -/* Likewise, but callable from the debugger. */ +/* Perform all the modification required in IPA-SRA for NODE to have parameters + as given in ADJUSTMENTS. Return true iff the CFG has been changed. */ -void -debug_sra_elt_name (struct sra_elt *elt) +static bool +modify_function (struct cgraph_node *node, ipa_parm_adjustment_vec adjustments) { - dump_sra_elt_name (stderr, elt); - fputc ('\n', stderr); + struct cgraph_node *new_node; + bool cfg_changed; + VEC (cgraph_edge_p, heap) * redirect_callers = collect_callers_of_node (node); + + rebuild_cgraph_edges (); + free_dominance_info (CDI_DOMINATORS); + pop_cfun (); + current_function_decl = NULL_TREE; + + new_node = cgraph_function_versioning (node, redirect_callers, NULL, NULL, + NULL, NULL, "isra"); + current_function_decl = new_node->decl; + push_cfun (DECL_STRUCT_FUNCTION (new_node->decl)); + + ipa_modify_formal_parameters (current_function_decl, adjustments, "ISRA"); + cfg_changed = ipa_sra_modify_function_body (adjustments); + sra_ipa_reset_debug_stmts (adjustments); + convert_callers (new_node, node->decl, adjustments); + cgraph_make_node_local (new_node); + return cfg_changed; } -void -sra_init_cache (void) +/* Return false the function is apparently unsuitable for IPA-SRA based on it's + attributes, return true otherwise. NODE is the cgraph node of the current + function. */ + +static bool +ipa_sra_preliminary_function_checks (struct cgraph_node *node) { - if (sra_type_decomp_cache) - return; + if (!cgraph_node_can_be_local_p (node)) + { + if (dump_file) + fprintf (dump_file, "Function not local to this compilation unit.\n"); + return false; + } - sra_type_decomp_cache = BITMAP_ALLOC (NULL); - sra_type_inst_cache = BITMAP_ALLOC (NULL); -} + if (!node->local.can_change_signature) + { + if (dump_file) + fprintf (dump_file, "Function can not change signature.\n"); + return false; + } + + if (!tree_versionable_function_p (node->decl)) + { + if (dump_file) + fprintf (dump_file, "Function is not versionable.\n"); + return false; + } + if (DECL_VIRTUAL_P (current_function_decl)) + { + if (dump_file) + fprintf (dump_file, "Function is a virtual method.\n"); + return false; + } -/* Main entry point. */ + if ((DECL_COMDAT (node->decl) || DECL_EXTERNAL (node->decl)) + && inline_summary(node)->size >= MAX_INLINE_INSNS_AUTO) + { + if (dump_file) + fprintf (dump_file, "Function too big to be made truly local.\n"); + return false; + } -static unsigned int -tree_sra (void) -{ - /* Initialize local variables. */ - gcc_obstack_init (&sra_obstack); - sra_candidates = BITMAP_ALLOC (NULL); - needs_copy_in = BITMAP_ALLOC (NULL); - sra_init_cache (); - sra_map = htab_create (101, sra_elt_hash, sra_elt_eq, NULL); + if (!node->callers) + { + if (dump_file) + fprintf (dump_file, + "Function has no callers in this compilation unit.\n"); + return false; + } - /* Scan. If we find anything, instantiate and scalarize. */ - if (find_candidates_for_sra ()) + if (cfun->stdarg) { - scan_function (); - decide_instantiations (); - scalarize_function (); + if (dump_file) + fprintf (dump_file, "Function uses stdarg. \n"); + return false; } - /* Free allocated memory. */ - htab_delete (sra_map); - sra_map = NULL; - BITMAP_FREE (sra_candidates); - BITMAP_FREE (needs_copy_in); - BITMAP_FREE (sra_type_decomp_cache); - BITMAP_FREE (sra_type_inst_cache); - obstack_free (&sra_obstack, NULL); - return 0; + if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl))) + return false; + + return true; } +/* Perform early interprocedural SRA. */ + static unsigned int -tree_sra_early (void) +ipa_early_sra (void) { - unsigned int ret; + struct cgraph_node *node = cgraph_get_node (current_function_decl); + ipa_parm_adjustment_vec adjustments; + int ret = 0; + + if (!ipa_sra_preliminary_function_checks (node)) + return 0; + + sra_initialize (); + sra_mode = SRA_MODE_EARLY_IPA; + + if (!find_param_candidates ()) + { + if (dump_file) + fprintf (dump_file, "Function has no IPA-SRA candidates.\n"); + goto simple_out; + } + + if (cgraph_for_node_and_aliases (node, not_all_callers_have_enough_arguments_p, + NULL, true)) + { + if (dump_file) + fprintf (dump_file, "There are callers with insufficient number of " + "arguments.\n"); + goto simple_out; + } + + bb_dereferences = XCNEWVEC (HOST_WIDE_INT, + func_param_count + * last_basic_block_for_function (cfun)); + final_bbs = BITMAP_ALLOC (NULL); + + scan_function (); + if (encountered_apply_args) + { + if (dump_file) + fprintf (dump_file, "Function calls __builtin_apply_args().\n"); + goto out; + } + + if (encountered_unchangable_recursive_call) + { + if (dump_file) + fprintf (dump_file, "Function calls itself with insufficient " + "number of arguments.\n"); + goto out; + } - early_sra = true; - ret = tree_sra (); - early_sra = false; + adjustments = analyze_all_param_acesses (); + if (!adjustments) + goto out; + if (dump_file) + ipa_dump_param_adjustments (dump_file, adjustments, current_function_decl); + if (modify_function (node, adjustments)) + ret = TODO_update_ssa | TODO_cleanup_cfg; + else + ret = TODO_update_ssa; + VEC_free (ipa_parm_adjustment_t, heap, adjustments); + + statistics_counter_event (cfun, "Unused parameters deleted", + sra_stats.deleted_unused_parameters); + statistics_counter_event (cfun, "Scalar parameters converted to by-value", + sra_stats.scalar_by_ref_to_by_val); + statistics_counter_event (cfun, "Aggregate parameters broken up", + sra_stats.aggregate_params_reduced); + statistics_counter_event (cfun, "Aggregate parameter components created", + sra_stats.param_reductions_created); + + out: + BITMAP_FREE (final_bbs); + free (bb_dereferences); + simple_out: + sra_deinitialize (); return ret; } +/* Return if early ipa sra shall be performed. */ static bool -gate_sra (void) +ipa_early_sra_gate (void) { - return flag_tree_sra != 0; + return flag_ipa_sra && dbg_cnt (eipa_sra); } -struct gimple_opt_pass pass_sra_early = +struct gimple_opt_pass pass_early_ipa_sra = { { GIMPLE_PASS, - "esra", /* name */ - gate_sra, /* gate */ - tree_sra_early, /* execute */ + "eipa_sra", /* name */ + ipa_early_sra_gate, /* gate */ + ipa_early_sra, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ - TV_TREE_SRA, /* tv_id */ - PROP_cfg | PROP_ssa, /* properties_required */ + TV_IPA_SRA, /* tv_id */ + 0, /* properties_required */ 0, /* properties_provided */ - 0, /* properties_destroyed */ + 0, /* properties_destroyed */ 0, /* todo_flags_start */ - TODO_dump_func - | TODO_update_ssa - | TODO_ggc_collect - | TODO_verify_ssa /* todo_flags_finish */ - } -}; - -struct gimple_opt_pass pass_sra = -{ - { - GIMPLE_PASS, - "sra", /* name */ - gate_sra, /* gate */ - tree_sra, /* execute */ - NULL, /* sub */ - NULL, /* next */ - 0, /* static_pass_number */ - TV_TREE_SRA, /* tv_id */ - PROP_cfg | PROP_ssa, /* properties_required */ - 0, /* properties_provided */ - 0, /* properties_destroyed */ - TODO_update_address_taken, /* todo_flags_start */ - TODO_dump_func - | TODO_update_ssa - | TODO_ggc_collect - | TODO_verify_ssa /* todo_flags_finish */ + TODO_dump_cgraph /* todo_flags_finish */ } };