X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;ds=sidebyside;f=gcc%2Fgimple.c;h=9b8e1b1d7be704a274c9151f8f5c5784a0814da1;hb=15c581919d5e1aa94eb0023c17d25497dee0caaf;hp=ae0be4e86bc988df32e3fcbc3fc9fe67844f5c27;hpb=7f2d9047e1adab1993bd392999885c4023a176c7;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/gimple.c b/gcc/gimple.c index ae0be4e86bc..9b8e1b1d7be 100644 --- a/gcc/gimple.c +++ b/gcc/gimple.c @@ -1,6 +1,6 @@ /* Gimple IR support functions. - Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc. + Copyright 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. Contributed by Aldy Hernandez This file is part of GCC. @@ -29,24 +29,26 @@ along with GCC; see the file COPYING3. If not see #include "hard-reg-set.h" #include "basic-block.h" #include "gimple.h" -#include "toplev.h" #include "diagnostic.h" #include "tree-flow.h" #include "value-prof.h" #include "flags.h" #include "alias.h" #include "demangle.h" +#include "langhooks.h" /* Global type table. FIXME lto, it should be possible to re-use some of the type hashing routines in tree.c (type_hash_canon, type_hash_lookup, etc), but those assume that types were built with the various build_*_type routines which is not the case with the streamer. */ -static htab_t gimple_types; -static struct pointer_map_t *type_hash_cache; - -/* Global type comparison cache. */ -static htab_t gtc_visited; -static struct obstack gtc_ob; +static GTY((if_marked ("ggc_marked_p"), param_is (union tree_node))) + htab_t gimple_types; +static GTY((if_marked ("ggc_marked_p"), param_is (union tree_node))) + htab_t gimple_canonical_types; +static GTY((if_marked ("tree_int_map_marked_p"), param_is (struct tree_int_map))) + htab_t type_hash_cache; +static GTY((if_marked ("tree_int_map_marked_p"), param_is (struct tree_int_map))) + htab_t canonical_type_hash_cache; /* All the tuples have their operand vector (if present) at the very bottom of the structure. Therefore, the offset required to find the @@ -145,7 +147,7 @@ gimple_alloc_stat (enum gimple_code code, unsigned num_ops MEM_STAT_DECL) } #endif - stmt = (gimple) ggc_alloc_cleared_stat (size PASS_MEM_STAT); + stmt = ggc_alloc_cleared_gimple_statement_d_stat (size PASS_MEM_STAT); gimple_set_code (stmt, code); gimple_set_num_ops (stmt, num_ops); @@ -198,9 +200,25 @@ gimple_build_return (tree retval) return s; } -/* Helper for gimple_build_call, gimple_build_call_vec and - gimple_build_call_from_tree. Build the basic components of a - GIMPLE_CALL statement to function FN with NARGS arguments. */ +/* Reset alias information on call S. */ + +void +gimple_call_reset_alias_info (gimple s) +{ + if (gimple_call_flags (s) & ECF_CONST) + memset (gimple_call_use_set (s), 0, sizeof (struct pt_solution)); + else + pt_solution_reset (gimple_call_use_set (s)); + if (gimple_call_flags (s) & (ECF_CONST|ECF_PURE|ECF_NOVOPS)) + memset (gimple_call_clobber_set (s), 0, sizeof (struct pt_solution)); + else + pt_solution_reset (gimple_call_clobber_set (s)); +} + +/* Helper for gimple_build_call, gimple_build_call_valist, + gimple_build_call_vec and gimple_build_call_from_tree. Build the basic + components of a GIMPLE_CALL statement to function FN with NARGS + arguments. */ static inline gimple gimple_build_call_1 (tree fn, unsigned nargs) @@ -209,6 +227,8 @@ gimple_build_call_1 (tree fn, unsigned nargs) if (TREE_CODE (fn) == FUNCTION_DECL) fn = build_fold_addr_expr (fn); gimple_set_op (s, 1, fn); + gimple_call_set_fntype (s, TREE_TYPE (TREE_TYPE (fn))); + gimple_call_reset_alias_info (s); return s; } @@ -253,6 +273,79 @@ gimple_build_call (tree fn, unsigned nargs, ...) } +/* Build a GIMPLE_CALL statement to function FN. NARGS is the number of + arguments. AP contains the arguments. */ + +gimple +gimple_build_call_valist (tree fn, unsigned nargs, va_list ap) +{ + gimple call; + unsigned i; + + gcc_assert (TREE_CODE (fn) == FUNCTION_DECL || is_gimple_call_addr (fn)); + + call = gimple_build_call_1 (fn, nargs); + + for (i = 0; i < nargs; i++) + gimple_call_set_arg (call, i, va_arg (ap, tree)); + + return call; +} + + +/* Helper for gimple_build_call_internal and gimple_build_call_internal_vec. + Build the basic components of a GIMPLE_CALL statement to internal + function FN with NARGS arguments. */ + +static inline gimple +gimple_build_call_internal_1 (enum internal_fn fn, unsigned nargs) +{ + gimple s = gimple_build_with_ops (GIMPLE_CALL, ERROR_MARK, nargs + 3); + s->gsbase.subcode |= GF_CALL_INTERNAL; + gimple_call_set_internal_fn (s, fn); + gimple_call_reset_alias_info (s); + return s; +} + + +/* Build a GIMPLE_CALL statement to internal function FN. NARGS is + the number of arguments. The ... are the arguments. */ + +gimple +gimple_build_call_internal (enum internal_fn fn, unsigned nargs, ...) +{ + va_list ap; + gimple call; + unsigned i; + + call = gimple_build_call_internal_1 (fn, nargs); + va_start (ap, nargs); + for (i = 0; i < nargs; i++) + gimple_call_set_arg (call, i, va_arg (ap, tree)); + va_end (ap); + + return call; +} + + +/* Build a GIMPLE_CALL statement to internal function FN with the arguments + specified in vector ARGS. */ + +gimple +gimple_build_call_internal_vec (enum internal_fn fn, VEC(tree, heap) *args) +{ + unsigned i, nargs; + gimple call; + + nargs = VEC_length (tree, args); + call = gimple_build_call_internal_1 (fn, nargs); + for (i = 0; i < nargs; i++) + gimple_call_set_arg (call, i, VEC_index (tree, args, i)); + + return call; +} + + /* Build a GIMPLE_CALL statement from CALL_EXPR T. Note that T is assumed to be in GIMPLE form already. Minimal checking is done of this fact. */ @@ -279,8 +372,14 @@ gimple_build_call_from_tree (tree t) gimple_call_set_tail (call, CALL_EXPR_TAILCALL (t)); gimple_call_set_cannot_inline (call, CALL_CANNOT_INLINE_P (t)); gimple_call_set_return_slot_opt (call, CALL_EXPR_RETURN_SLOT_OPT (t)); - gimple_call_set_from_thunk (call, CALL_FROM_THUNK_P (t)); + if (fndecl + && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL + && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA) + gimple_call_set_alloca_for_var (call, CALL_ALLOCA_FOR_VAR_P (t)); + else + gimple_call_set_from_thunk (call, CALL_FROM_THUNK_P (t)); gimple_call_set_va_arg_pack (call, CALL_EXPR_VA_ARG_PACK (t)); + gimple_call_set_nothrow (call, TREE_NOTHROW (t)); gimple_set_no_warning (call, TREE_NO_WARNING (t)); return call; @@ -288,31 +387,40 @@ gimple_build_call_from_tree (tree t) /* Extract the operands and code for expression EXPR into *SUBCODE_P, - *OP1_P and *OP2_P respectively. */ + *OP1_P, *OP2_P and *OP3_P respectively. */ void -extract_ops_from_tree (tree expr, enum tree_code *subcode_p, tree *op1_p, - tree *op2_p) +extract_ops_from_tree_1 (tree expr, enum tree_code *subcode_p, tree *op1_p, + tree *op2_p, tree *op3_p) { enum gimple_rhs_class grhs_class; *subcode_p = TREE_CODE (expr); grhs_class = get_gimple_rhs_class (*subcode_p); - if (grhs_class == GIMPLE_BINARY_RHS) + if (grhs_class == GIMPLE_TERNARY_RHS) + { + *op1_p = TREE_OPERAND (expr, 0); + *op2_p = TREE_OPERAND (expr, 1); + *op3_p = TREE_OPERAND (expr, 2); + } + else if (grhs_class == GIMPLE_BINARY_RHS) { *op1_p = TREE_OPERAND (expr, 0); *op2_p = TREE_OPERAND (expr, 1); + *op3_p = NULL_TREE; } else if (grhs_class == GIMPLE_UNARY_RHS) { *op1_p = TREE_OPERAND (expr, 0); *op2_p = NULL_TREE; + *op3_p = NULL_TREE; } else if (grhs_class == GIMPLE_SINGLE_RHS) { *op1_p = expr; *op2_p = NULL_TREE; + *op3_p = NULL_TREE; } else gcc_unreachable (); @@ -328,10 +436,10 @@ gimple gimple_build_assign_stat (tree lhs, tree rhs MEM_STAT_DECL) { enum tree_code subcode; - tree op1, op2; + tree op1, op2, op3; - extract_ops_from_tree (rhs, &subcode, &op1, &op2); - return gimple_build_assign_with_ops_stat (subcode, lhs, op1, op2 + extract_ops_from_tree_1 (rhs, &subcode, &op1, &op2, &op3); + return gimple_build_assign_with_ops_stat (subcode, lhs, op1, op2, op3 PASS_MEM_STAT); } @@ -342,7 +450,7 @@ gimple_build_assign_stat (tree lhs, tree rhs MEM_STAT_DECL) gimple gimple_build_assign_with_ops_stat (enum tree_code subcode, tree lhs, tree op1, - tree op2 MEM_STAT_DECL) + tree op2, tree op3 MEM_STAT_DECL) { unsigned num_ops; gimple p; @@ -361,6 +469,12 @@ gimple_build_assign_with_ops_stat (enum tree_code subcode, tree lhs, tree op1, gimple_assign_set_rhs2 (p, op2); } + if (op3) + { + gcc_assert (num_ops > 3); + gimple_assign_set_rhs3 (p, op3); + } + return p; } @@ -411,7 +525,6 @@ void gimple_cond_get_ops_from_tree (tree cond, enum tree_code *code_p, tree *lhs_p, tree *rhs_p) { - location_t loc = EXPR_LOCATION (cond); gcc_assert (TREE_CODE_CLASS (TREE_CODE (cond)) == tcc_comparison || TREE_CODE (cond) == TRUTH_NOT_EXPR || is_gimple_min_invariant (cond) @@ -424,14 +537,14 @@ gimple_cond_get_ops_from_tree (tree cond, enum tree_code *code_p, { *code_p = EQ_EXPR; gcc_assert (*lhs_p && *rhs_p == NULL_TREE); - *rhs_p = fold_convert_loc (loc, TREE_TYPE (*lhs_p), integer_zero_node); + *rhs_p = build_zero_cst (TREE_TYPE (*lhs_p)); } /* Canonicalize conditionals of the form 'if (VAL)' */ else if (TREE_CODE_CLASS (*code_p) != tcc_comparison) { *code_p = NE_EXPR; gcc_assert (*lhs_p && *rhs_p == NULL_TREE); - *rhs_p = fold_convert_loc (loc, TREE_TYPE (*lhs_p), integer_zero_node); + *rhs_p = build_zero_cst (TREE_TYPE (*lhs_p)); } } @@ -620,7 +733,7 @@ gimple_build_eh_filter (tree types, gimple_seq failure) gimple gimple_build_eh_must_not_throw (tree decl) { - gimple p = gimple_alloc (GIMPLE_EH_MUST_NOT_THROW, 1); + gimple p = gimple_alloc (GIMPLE_EH_MUST_NOT_THROW, 0); gcc_assert (TREE_CODE (decl) == FUNCTION_DECL); gcc_assert (flags_from_decl_or_type (decl) & ECF_NORETURN); @@ -774,6 +887,30 @@ gimple_build_debug_bind_stat (tree var, tree value, gimple stmt MEM_STAT_DECL) } +/* Build a new GIMPLE_DEBUG_SOURCE_BIND statement. + + VAR is bound to VALUE; block and location are taken from STMT. */ + +gimple +gimple_build_debug_source_bind_stat (tree var, tree value, + gimple stmt MEM_STAT_DECL) +{ + gimple p = gimple_build_with_ops_stat (GIMPLE_DEBUG, + (unsigned)GIMPLE_DEBUG_SOURCE_BIND, 2 + PASS_MEM_STAT); + + gimple_debug_source_bind_set_var (p, var); + gimple_debug_source_bind_set_value (p, value); + if (stmt) + { + gimple_set_block (p, gimple_block (stmt)); + gimple_set_location (p, gimple_location (stmt)); + } + + return p; +} + + /* Build a GIMPLE_OMP_CRITICAL statement. BODY is the sequence of statements for which only one thread can execute. @@ -807,7 +944,8 @@ gimple_build_omp_for (gimple_seq body, tree clauses, size_t collapse, gimple_omp_set_body (p, body); gimple_omp_for_set_clauses (p, clauses); p->gimple_omp_for.collapse = collapse; - p->gimple_omp_for.iter = GGC_CNEWVEC (struct gimple_omp_for_iter, collapse); + p->gimple_omp_for.iter + = ggc_alloc_cleared_vec_gimple_omp_for_iter (collapse); if (pre_body) gimple_omp_for_set_pre_body (p, pre_body); @@ -1057,7 +1195,7 @@ gimple_seq_alloc (void) } else { - seq = (gimple_seq) ggc_alloc_cleared (sizeof (*seq)); + seq = ggc_alloc_cleared_gimple_seq_d (); #ifdef GATHER_STATISTICS gimple_alloc_counts[(int) gimple_alloc_kind_seq]++; gimple_alloc_sizes[(int) gimple_alloc_kind_seq] += sizeof (*seq); @@ -1308,11 +1446,15 @@ walk_gimple_op (gimple stmt, walk_tree_fn callback_op, switch (gimple_code (stmt)) { case GIMPLE_ASSIGN: - /* Walk the RHS operands. A formal temporary LHS may use a - COMPONENT_REF RHS. */ + /* Walk the RHS operands. If the LHS is of a non-renamable type or + is a register variable, we may use a COMPONENT_REF on the RHS. */ if (wi) - wi->val_only = !is_gimple_reg (gimple_assign_lhs (stmt)) - || !gimple_assign_single_p (stmt); + { + tree lhs = gimple_assign_lhs (stmt); + wi->val_only + = (is_gimple_reg_type (TREE_TYPE (lhs)) && !is_gimple_reg (lhs)) + || !gimple_assign_single_p (stmt); + } for (i = 1; i < gimple_num_ops (stmt); i++) { @@ -1346,7 +1488,10 @@ walk_gimple_op (gimple stmt, walk_tree_fn callback_op, case GIMPLE_CALL: if (wi) - wi->is_lhs = false; + { + wi->is_lhs = false; + wi->val_only = true; + } ret = walk_tree (gimple_call_chain_ptr (stmt), callback_op, wi, pset); if (ret) @@ -1358,21 +1503,34 @@ walk_gimple_op (gimple stmt, walk_tree_fn callback_op, for (i = 0; i < gimple_call_num_args (stmt); i++) { + if (wi) + wi->val_only + = is_gimple_reg_type (TREE_TYPE (gimple_call_arg (stmt, i))); ret = walk_tree (gimple_call_arg_ptr (stmt, i), callback_op, wi, pset); if (ret) return ret; } - if (wi) - wi->is_lhs = true; + if (gimple_call_lhs (stmt)) + { + if (wi) + { + wi->is_lhs = true; + wi->val_only + = is_gimple_reg_type (TREE_TYPE (gimple_call_lhs (stmt))); + } - ret = walk_tree (gimple_call_lhs_ptr (stmt), callback_op, wi, pset); - if (ret) - return ret; + ret = walk_tree (gimple_call_lhs_ptr (stmt), callback_op, wi, pset); + if (ret) + return ret; + } if (wi) - wi->is_lhs = false; + { + wi->is_lhs = false; + wi->val_only = true; + } break; case GIMPLE_CATCH: @@ -1694,7 +1852,10 @@ gimple_set_body (tree fndecl, gimple_seq seq) } -/* Return the body of GIMPLE statements for function FN. */ +/* Return the body of GIMPLE statements for function FN. After the + CFG pass, the function body doesn't exist anymore because it has + been split up into basic blocks. In this case, it returns + NULL. */ gimple_seq gimple_body (tree fndecl) @@ -1712,6 +1873,20 @@ gimple_has_body_p (tree fndecl) return (gimple_body (fndecl) || (fn && fn->cfg)); } +/* Return true if calls C1 and C2 are known to go to the same function. */ + +bool +gimple_call_same_target_p (const_gimple c1, const_gimple c2) +{ + if (gimple_call_internal_p (c1)) + return (gimple_call_internal_p (c2) + && gimple_call_internal_fn (c1) == gimple_call_internal_fn (c2)); + else + return (gimple_call_fn (c1) == gimple_call_fn (c2) + || (gimple_call_fndecl (c1) + && gimple_call_fndecl (c1) == gimple_call_fndecl (c2))); +} + /* Detect flags from a GIMPLE_CALL. This is just like call_expr_flags, but for gimple tuples. */ @@ -1720,20 +1895,101 @@ gimple_call_flags (const_gimple stmt) { int flags; tree decl = gimple_call_fndecl (stmt); - tree t; if (decl) flags = flags_from_decl_or_type (decl); + else if (gimple_call_internal_p (stmt)) + flags = internal_fn_flags (gimple_call_internal_fn (stmt)); else + flags = flags_from_decl_or_type (gimple_call_fntype (stmt)); + + if (stmt->gsbase.subcode & GF_CALL_NOTHROW) + flags |= ECF_NOTHROW; + + return flags; +} + +/* Return the "fn spec" string for call STMT. */ + +static tree +gimple_call_fnspec (const_gimple stmt) +{ + tree type, attr; + + type = gimple_call_fntype (stmt); + if (!type) + return NULL_TREE; + + attr = lookup_attribute ("fn spec", TYPE_ATTRIBUTES (type)); + if (!attr) + return NULL_TREE; + + return TREE_VALUE (TREE_VALUE (attr)); +} + +/* Detects argument flags for argument number ARG on call STMT. */ + +int +gimple_call_arg_flags (const_gimple stmt, unsigned arg) +{ + tree attr = gimple_call_fnspec (stmt); + + if (!attr || 1 + arg >= (unsigned) TREE_STRING_LENGTH (attr)) + return 0; + + switch (TREE_STRING_POINTER (attr)[1 + arg]) { - t = TREE_TYPE (gimple_call_fn (stmt)); - if (t && TREE_CODE (t) == POINTER_TYPE) - flags = flags_from_decl_or_type (TREE_TYPE (t)); - else - flags = 0; + case 'x': + case 'X': + return EAF_UNUSED; + + case 'R': + return EAF_DIRECT | EAF_NOCLOBBER | EAF_NOESCAPE; + + case 'r': + return EAF_NOCLOBBER | EAF_NOESCAPE; + + case 'W': + return EAF_DIRECT | EAF_NOESCAPE; + + case 'w': + return EAF_NOESCAPE; + + case '.': + default: + return 0; } +} - return flags; +/* Detects return flags for the call STMT. */ + +int +gimple_call_return_flags (const_gimple stmt) +{ + tree attr; + + if (gimple_call_flags (stmt) & ECF_MALLOC) + return ERF_NOALIAS; + + attr = gimple_call_fnspec (stmt); + if (!attr || TREE_STRING_LENGTH (attr) < 1) + return 0; + + switch (TREE_STRING_POINTER (attr)[0]) + { + case '1': + case '2': + case '3': + case '4': + return ERF_RETURNS_ARG | (TREE_STRING_POINTER (attr)[0] - '1'); + + case 'm': + return ERF_NOALIAS; + + case '.': + default: + return 0; + } } @@ -1742,10 +1998,8 @@ gimple_call_flags (const_gimple stmt) bool gimple_assign_copy_p (gimple gs) { - return gimple_code (gs) == GIMPLE_ASSIGN - && get_gimple_rhs_class (gimple_assign_rhs_code (gs)) - == GIMPLE_SINGLE_RHS - && is_gimple_val (gimple_op (gs, 1)); + return (gimple_assign_single_p (gs) + && is_gimple_val (gimple_op (gs, 1))); } @@ -1754,28 +2008,12 @@ gimple_assign_copy_p (gimple gs) bool gimple_assign_ssa_name_copy_p (gimple gs) { - return (gimple_code (gs) == GIMPLE_ASSIGN - && (get_gimple_rhs_class (gimple_assign_rhs_code (gs)) - == GIMPLE_SINGLE_RHS) + return (gimple_assign_single_p (gs) && TREE_CODE (gimple_assign_lhs (gs)) == SSA_NAME && TREE_CODE (gimple_assign_rhs1 (gs)) == SSA_NAME); } -/* Return true if GS is an assignment with a singleton RHS, i.e., - there is no operator associated with the assignment itself. - Unlike gimple_assign_copy_p, this predicate returns true for - any RHS operand, including those that perform an operation - and do not have the semantics of a copy, such as COND_EXPR. */ - -bool -gimple_assign_single_p (gimple gs) -{ - return (gimple_code (gs) == GIMPLE_ASSIGN - && get_gimple_rhs_class (gimple_assign_rhs_code (gs)) - == GIMPLE_SINGLE_RHS); -} - /* Return true if GS is an assignment with a unary RHS, but the operator has no effect on the assigned value. The logic is adapted from STRIP_NOPS. This predicate is intended to be used in tuplifying @@ -1793,7 +2031,7 @@ gimple_assign_single_p (gimple gs) bool gimple_assign_unary_nop_p (gimple gs) { - return (gimple_code (gs) == GIMPLE_ASSIGN + return (is_gimple_assign (gs) && (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (gs)) || gimple_assign_rhs_code (gs) == NON_LVALUE_EXPR) && gimple_assign_rhs1 (gs) != error_mark_node @@ -1856,22 +2094,22 @@ void gimple_assign_set_rhs_from_tree (gimple_stmt_iterator *gsi, tree expr) { enum tree_code subcode; - tree op1, op2; + tree op1, op2, op3; - extract_ops_from_tree (expr, &subcode, &op1, &op2); - gimple_assign_set_rhs_with_ops (gsi, subcode, op1, op2); + extract_ops_from_tree_1 (expr, &subcode, &op1, &op2, &op3); + gimple_assign_set_rhs_with_ops_1 (gsi, subcode, op1, op2, op3); } /* Set the RHS of assignment statement pointed-to by GSI to CODE with - operands OP1 and OP2. + operands OP1, OP2 and OP3. NOTE: The statement pointed-to by GSI may be reallocated if it did not have enough operand slots. */ void -gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code, - tree op1, tree op2) +gimple_assign_set_rhs_with_ops_1 (gimple_stmt_iterator *gsi, enum tree_code code, + tree op1, tree op2, tree op3) { unsigned new_rhs_ops = get_gimple_rhs_num_ops (code); gimple stmt = gsi_stmt (*gsi); @@ -1895,6 +2133,8 @@ gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code, gimple_assign_set_rhs1 (stmt, op1); if (new_rhs_ops > 1) gimple_assign_set_rhs2 (stmt, op2); + if (new_rhs_ops > 2) + gimple_assign_set_rhs3 (stmt, op3); } @@ -2024,8 +2264,8 @@ gimple_copy (gimple stmt) t = unshare_expr (gimple_omp_for_clauses (stmt)); gimple_omp_for_set_clauses (copy, t); copy->gimple_omp_for.iter - = GGC_NEWVEC (struct gimple_omp_for_iter, - gimple_omp_for_collapse (stmt)); + = ggc_alloc_vec_gimple_omp_for_iter + (gimple_omp_for_collapse (stmt)); for (i = 0; i < gimple_omp_for_collapse (stmt); i++) { gimple_omp_for_set_cond (copy, i, @@ -2130,15 +2370,7 @@ void gimple_set_modified (gimple s, bool modifiedp) { if (gimple_has_ops (s)) - { - s->gsbase.modified = (unsigned) modifiedp; - - if (modifiedp - && cfun->gimple_df - && is_gimple_call (s) - && gimple_call_noreturn_p (s)) - VEC_safe_push (gimple, gc, MODIFIED_NORETURN_CALLS (cfun), s); - } + s->gsbase.modified = (unsigned) modifiedp; } @@ -2162,9 +2394,14 @@ gimple_has_side_effects (const_gimple s) if (gimple_has_volatile_ops (s)) return true; + if (gimple_code (s) == GIMPLE_ASM + && gimple_asm_volatile_p (s)) + return true; + if (is_gimple_call (s)) { unsigned nargs = gimple_call_num_args (s); + tree fn; if (!(gimple_call_flags (s) & (ECF_CONST | ECF_PURE))) return true; @@ -2175,17 +2412,18 @@ gimple_has_side_effects (const_gimple s) if (gimple_call_lhs (s) && TREE_SIDE_EFFECTS (gimple_call_lhs (s))) { - gcc_assert (gimple_has_volatile_ops (s)); + gcc_checking_assert (gimple_has_volatile_ops (s)); return true; } - if (TREE_SIDE_EFFECTS (gimple_call_fn (s))) + fn = gimple_call_fn (s); + if (fn && TREE_SIDE_EFFECTS (fn)) return true; for (i = 0; i < nargs; i++) if (TREE_SIDE_EFFECTS (gimple_call_arg (s, i))) { - gcc_assert (gimple_has_volatile_ops (s)); + gcc_checking_assert (gimple_has_volatile_ops (s)); return true; } @@ -2194,11 +2432,14 @@ gimple_has_side_effects (const_gimple s) else { for (i = 0; i < gimple_num_ops (s); i++) - if (TREE_SIDE_EFFECTS (gimple_op (s, i))) - { - gcc_assert (gimple_has_volatile_ops (s)); - return true; - } + { + tree op = gimple_op (s, i); + if (op && TREE_SIDE_EFFECTS (op)) + { + gcc_checking_assert (gimple_has_volatile_ops (s)); + return true; + } + } } return false; @@ -2207,7 +2448,7 @@ gimple_has_side_effects (const_gimple s) /* Return true if the RHS of statement S has side effects. We may use it to determine if it is admissable to replace an assignment or call with a copy of a previously-computed - value. In such cases, side-effects due the the LHS are + value. In such cases, side-effects due to the LHS are preserved. */ bool @@ -2218,14 +2459,15 @@ gimple_rhs_has_side_effects (const_gimple s) if (is_gimple_call (s)) { unsigned nargs = gimple_call_num_args (s); + tree fn; if (!(gimple_call_flags (s) & (ECF_CONST | ECF_PURE))) return true; /* We cannot use gimple_has_volatile_ops here, because we must ignore a volatile LHS. */ - if (TREE_SIDE_EFFECTS (gimple_call_fn (s)) - || TREE_THIS_VOLATILE (gimple_call_fn (s))) + fn = gimple_call_fn (s); + if (fn && (TREE_SIDE_EFFECTS (fn) || TREE_THIS_VOLATILE (fn))) { gcc_assert (gimple_has_volatile_ops (s)); return true; @@ -2266,24 +2508,25 @@ gimple_rhs_has_side_effects (const_gimple s) return false; } - /* Helper for gimple_could_trap_p and gimple_assign_rhs_could_trap_p. - Return true if S can trap. If INCLUDE_LHS is true and S is a - GIMPLE_ASSIGN, the LHS of the assignment is also checked. - Otherwise, only the RHS of the assignment is checked. */ + Return true if S can trap. When INCLUDE_MEM is true, check whether + the memory operations could trap. When INCLUDE_STORES is true and + S is a GIMPLE_ASSIGN, the LHS of the assignment is also checked. */ -static bool -gimple_could_trap_p_1 (gimple s, bool include_lhs) +bool +gimple_could_trap_p_1 (gimple s, bool include_mem, bool include_stores) { - unsigned i, start; tree t, div = NULL_TREE; enum tree_code op; - start = (is_gimple_assign (s) && !include_lhs) ? 1 : 0; + if (include_mem) + { + unsigned i, start = (is_gimple_assign (s) && !include_stores) ? 1 : 0; - for (i = start; i < gimple_num_ops (s); i++) - if (tree_could_trap_p (gimple_op (s, i))) - return true; + for (i = start; i < gimple_num_ops (s); i++) + if (tree_could_trap_p (gimple_op (s, i))) + return true; + } switch (gimple_code (s)) { @@ -2312,26 +2555,23 @@ gimple_could_trap_p_1 (gimple s, bool include_lhs) } return false; - } - /* Return true if statement S can trap. */ bool gimple_could_trap_p (gimple s) { - return gimple_could_trap_p_1 (s, true); + return gimple_could_trap_p_1 (s, true, true); } - /* Return true if RHS of a GIMPLE_ASSIGN S can trap. */ bool gimple_assign_rhs_could_trap_p (gimple s) { gcc_assert (is_gimple_assign (s)); - return gimple_could_trap_p_1 (s, false); + return gimple_could_trap_p_1 (s, true, false); } @@ -2374,6 +2614,8 @@ get_gimple_rhs_num_ops (enum tree_code code) return 1; else if (rhs_class == GIMPLE_BINARY_RHS) return 2; + else if (rhs_class == GIMPLE_TERNARY_RHS) + return 3; else gcc_unreachable (); } @@ -2391,16 +2633,18 @@ get_gimple_rhs_num_ops (enum tree_code code) || (SYM) == TRUTH_XOR_EXPR) ? GIMPLE_BINARY_RHS \ : (SYM) == TRUTH_NOT_EXPR ? GIMPLE_UNARY_RHS \ : ((SYM) == COND_EXPR \ - || (SYM) == CONSTRUCTOR \ + || (SYM) == WIDEN_MULT_PLUS_EXPR \ + || (SYM) == WIDEN_MULT_MINUS_EXPR \ + || (SYM) == DOT_PROD_EXPR \ + || (SYM) == REALIGN_LOAD_EXPR \ + || (SYM) == VEC_COND_EXPR \ + || (SYM) == FMA_EXPR) ? GIMPLE_TERNARY_RHS \ + : ((SYM) == CONSTRUCTOR \ || (SYM) == OBJ_TYPE_REF \ || (SYM) == ASSERT_EXPR \ || (SYM) == ADDR_EXPR \ || (SYM) == WITH_SIZE_EXPR \ - || (SYM) == SSA_NAME \ - || (SYM) == POLYNOMIAL_CHREC \ - || (SYM) == DOT_PROD_EXPR \ - || (SYM) == VEC_COND_EXPR \ - || (SYM) == REALIGN_LOAD_EXPR) ? GIMPLE_SINGLE_RHS \ + || (SYM) == SSA_NAME) ? GIMPLE_SINGLE_RHS \ : GIMPLE_INVALID_RHS), #define END_OF_BASE_TREE_CODES (unsigned char) GIMPLE_INVALID_RHS, @@ -2415,15 +2659,6 @@ const unsigned char gimple_rhs_class_table[] = { /* Validation of GIMPLE expressions. */ -/* Return true if OP is an acceptable tree node to be used as a GIMPLE - operand. */ - -bool -is_gimple_operand (const_tree op) -{ - return op && get_gimple_rhs_class (TREE_CODE (op)) == GIMPLE_SINGLE_RHS; -} - /* Returns true iff T is a valid RHS for an assignment to a renamed user -- or front-end generated artificial -- variable. */ @@ -2465,7 +2700,7 @@ bool is_gimple_condexpr (tree t) { return (is_gimple_val (t) || (COMPARISON_CLASS_P (t) - && !tree_could_trap_p (t) + && !tree_could_throw_p (t) && is_gimple_val (TREE_OPERAND (t, 0)) && is_gimple_val (TREE_OPERAND (t, 1)))); } @@ -2475,7 +2710,8 @@ is_gimple_condexpr (tree t) bool is_gimple_addressable (tree t) { - return (is_gimple_id (t) || handled_component_p (t) || INDIRECT_REF_P (t)); + return (is_gimple_id (t) || handled_component_p (t) + || TREE_CODE (t) == MEM_REF); } /* Return true if T is a valid gimple constant. */ @@ -2526,7 +2762,7 @@ is_gimple_address (const_tree t) op = TREE_OPERAND (op, 0); } - if (CONSTANT_CLASS_P (op) || INDIRECT_REF_P (op)) + if (CONSTANT_CLASS_P (op) || TREE_CODE (op) == MEM_REF) return true; switch (TREE_CODE (op)) @@ -2586,8 +2822,18 @@ is_gimple_invariant_address (const_tree t) return false; op = strip_invariant_refs (TREE_OPERAND (t, 0)); + if (!op) + return false; - return op && (CONSTANT_CLASS_P (op) || decl_address_invariant_p (op)); + if (TREE_CODE (op) == MEM_REF) + { + const_tree op0 = TREE_OPERAND (op, 0); + return (TREE_CODE (op0) == ADDR_EXPR + && (CONSTANT_CLASS_P (TREE_OPERAND (op0, 0)) + || decl_address_invariant_p (TREE_OPERAND (op0, 0)))); + } + + return CONSTANT_CLASS_P (op) || decl_address_invariant_p (op); } /* Return true if T is a gimple invariant address at IPA level @@ -2804,24 +3050,27 @@ is_gimple_min_lval (tree t) { if (!(t = CONST_CAST_TREE (strip_invariant_refs (t)))) return false; - return (is_gimple_id (t) || TREE_CODE (t) == INDIRECT_REF); + return (is_gimple_id (t) || TREE_CODE (t) == MEM_REF); } -/* Return true if T is a typecast operation. */ +/* Return true if T is a valid function operand of a CALL_EXPR. */ bool -is_gimple_cast (tree t) +is_gimple_call_addr (tree t) { - return (CONVERT_EXPR_P (t) - || TREE_CODE (t) == FIX_TRUNC_EXPR); + return (TREE_CODE (t) == OBJ_TYPE_REF || is_gimple_val (t)); } -/* Return true if T is a valid function operand of a CALL_EXPR. */ +/* Return true if T is a valid address operand of a MEM_REF. */ bool -is_gimple_call_addr (tree t) +is_gimple_mem_ref_addr (tree t) { - return (TREE_CODE (t) == OBJ_TYPE_REF || is_gimple_val (t)); + return (is_gimple_reg (t) + || TREE_CODE (t) == INTEGER_CST + || (TREE_CODE (t) == ADDR_EXPR + && (CONSTANT_CLASS_P (TREE_OPERAND (t, 0)) + || decl_address_invariant_p (TREE_OPERAND (t, 0))))); } /* If T makes a function call, return the corresponding CALL_EXPR operand. @@ -2855,10 +3104,18 @@ get_base_address (tree t) while (handled_component_p (t)) t = TREE_OPERAND (t, 0); - if (SSA_VAR_P (t) + if ((TREE_CODE (t) == MEM_REF + || TREE_CODE (t) == TARGET_MEM_REF) + && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR) + t = TREE_OPERAND (TREE_OPERAND (t, 0), 0); + + if (TREE_CODE (t) == SSA_NAME + || DECL_P (t) || TREE_CODE (t) == STRING_CST || TREE_CODE (t) == CONSTRUCTOR - || INDIRECT_REF_P (t)) + || INDIRECT_REF_P (t) + || TREE_CODE (t) == MEM_REF + || TREE_CODE (t) == TARGET_MEM_REF) return t; else return NULL_TREE; @@ -2924,19 +3181,13 @@ canonicalize_cond_expr_cond (tree t) { /* Strip conversions around boolean operations. */ if (CONVERT_EXPR_P (t) - && truth_value_p (TREE_CODE (TREE_OPERAND (t, 0)))) + && (truth_value_p (TREE_CODE (TREE_OPERAND (t, 0))) + || TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) + == BOOLEAN_TYPE)) t = TREE_OPERAND (t, 0); - /* For (bool)x use x != 0. */ - if (CONVERT_EXPR_P (t) - && TREE_CODE (TREE_TYPE (t)) == BOOLEAN_TYPE) - { - tree top0 = TREE_OPERAND (t, 0); - t = build2 (NE_EXPR, TREE_TYPE (t), - top0, build_int_cst (TREE_TYPE (top0), 0)); - } /* For !x use x == 0. */ - else if (TREE_CODE (t) == TRUTH_NOT_EXPR) + if (TREE_CODE (t) == TRUTH_NOT_EXPR) { tree top0 = TREE_OPERAND (t, 0); t = build2 (EQ_EXPR, TREE_TYPE (t), @@ -2966,7 +3217,6 @@ gimple gimple_call_copy_skip_args (gimple stmt, bitmap args_to_skip) { int i; - tree fn = gimple_call_fn (stmt); int nargs = gimple_call_num_args (stmt); VEC(tree, heap) *vargs = VEC_alloc (tree, heap, nargs); gimple new_stmt; @@ -2975,7 +3225,11 @@ gimple_call_copy_skip_args (gimple stmt, bitmap args_to_skip) if (!bitmap_bit_p (args_to_skip, i)) VEC_quick_push (tree, vargs, gimple_call_arg (stmt, i)); - new_stmt = gimple_build_call_vec (fn, vargs); + if (gimple_call_internal_p (stmt)) + new_stmt = gimple_build_call_internal_vec (gimple_call_internal_fn (stmt), + vargs); + else + new_stmt = gimple_build_call_vec (gimple_call_fn (stmt), vargs); VEC_free (tree, heap, vargs); if (gimple_call_lhs (stmt)) gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt)); @@ -2986,14 +3240,8 @@ gimple_call_copy_skip_args (gimple stmt, bitmap args_to_skip) gimple_set_block (new_stmt, gimple_block (stmt)); if (gimple_has_location (stmt)) gimple_set_location (new_stmt, gimple_location (stmt)); - - /* Carry all the flags to the new GIMPLE_CALL. */ + gimple_call_copy_flags (new_stmt, stmt); gimple_call_set_chain (new_stmt, gimple_call_chain (stmt)); - gimple_call_set_tail (new_stmt, gimple_call_tail_p (stmt)); - gimple_call_set_cannot_inline (new_stmt, gimple_call_cannot_inline_p (stmt)); - gimple_call_set_return_slot_opt (new_stmt, gimple_call_return_slot_opt_p (stmt)); - gimple_call_set_from_thunk (new_stmt, gimple_call_from_thunk_p (stmt)); - gimple_call_set_va_arg_pack (new_stmt, gimple_call_va_arg_pack_p (stmt)); gimple_set_modified (new_stmt, true); @@ -3001,117 +3249,141 @@ gimple_call_copy_skip_args (gimple stmt, bitmap args_to_skip) } +enum gtc_mode { GTC_MERGE = 0, GTC_DIAG = 1 }; + static hashval_t gimple_type_hash (const void *); /* Structure used to maintain a cache of some type pairs compared by gimple_types_compatible_p when comparing aggregate types. There are - four possible values for SAME_P: + three possible values for SAME_P: -2: The pair (T1, T2) has just been inserted in the table. - -1: The pair (T1, T2) is currently being compared. 0: T1 and T2 are different types. 1: T1 and T2 are the same type. - This table is only used when comparing aggregate types to avoid - infinite recursion due to self-referential types. */ + The two elements in the SAME_P array are indexed by the comparison + mode gtc_mode. */ + struct type_pair_d { unsigned int uid1; unsigned int uid2; - int same_p; + signed char same_p[2]; }; typedef struct type_pair_d *type_pair_t; +DEF_VEC_P(type_pair_t); +DEF_VEC_ALLOC_P(type_pair_t,heap); -/* Return a hash value for the type pair pointed-to by P. */ - -static hashval_t -type_pair_hash (const void *p) -{ - const struct type_pair_d *pair = (const struct type_pair_d *) p; - hashval_t val1 = pair->uid1; - hashval_t val2 = pair->uid2; - return (iterative_hash_hashval_t (val2, val1) - ^ iterative_hash_hashval_t (val1, val2)); -} - -/* Compare two type pairs pointed-to by P1 and P2. */ +#define GIMPLE_TYPE_PAIR_SIZE 16381 +struct type_pair_d *type_pair_cache; -static int -type_pair_eq (const void *p1, const void *p2) -{ - const struct type_pair_d *pair1 = (const struct type_pair_d *) p1; - const struct type_pair_d *pair2 = (const struct type_pair_d *) p2; - return ((pair1->uid1 == pair2->uid1 && pair1->uid2 == pair2->uid2) - || (pair1->uid1 == pair2->uid2 && pair1->uid2 == pair2->uid1)); -} /* Lookup the pair of types T1 and T2 in *VISITED_P. Insert a new entry if none existed. */ -static type_pair_t -lookup_type_pair (tree t1, tree t2, htab_t *visited_p, struct obstack *ob_p) +static inline type_pair_t +lookup_type_pair (tree t1, tree t2) { - struct type_pair_d pair; - type_pair_t p; - void **slot; + unsigned int index; + unsigned int uid1, uid2; + + if (type_pair_cache == NULL) + type_pair_cache = XCNEWVEC (struct type_pair_d, GIMPLE_TYPE_PAIR_SIZE); - if (*visited_p == NULL) + if (TYPE_UID (t1) < TYPE_UID (t2)) { - *visited_p = htab_create (251, type_pair_hash, type_pair_eq, NULL); - gcc_obstack_init (ob_p); + uid1 = TYPE_UID (t1); + uid2 = TYPE_UID (t2); } - - pair.uid1 = TYPE_UID (t1); - pair.uid2 = TYPE_UID (t2); - slot = htab_find_slot (*visited_p, &pair, INSERT); - - if (*slot) - p = *((type_pair_t *) slot); else { - p = XOBNEW (ob_p, struct type_pair_d); - p->uid1 = TYPE_UID (t1); - p->uid2 = TYPE_UID (t2); - p->same_p = -2; - *slot = (void *) p; + uid1 = TYPE_UID (t2); + uid2 = TYPE_UID (t1); } + gcc_checking_assert (uid1 != uid2); - return p; -} + /* iterative_hash_hashval_t imply an function calls. + We know that UIDS are in limited range. */ + index = ((((unsigned HOST_WIDE_INT)uid1 << HOST_BITS_PER_WIDE_INT / 2) + uid2) + % GIMPLE_TYPE_PAIR_SIZE); + if (type_pair_cache [index].uid1 == uid1 + && type_pair_cache [index].uid2 == uid2) + return &type_pair_cache[index]; + type_pair_cache [index].uid1 = uid1; + type_pair_cache [index].uid2 = uid2; + type_pair_cache [index].same_p[0] = -2; + type_pair_cache [index].same_p[1] = -2; -/* Return true if T1 and T2 have the same name. If FOR_COMPLETION_P is - true then if any type has no name return false, otherwise return - true if both types have no names. */ + return &type_pair_cache[index]; +} -static bool -compare_type_names_p (tree t1, tree t2, bool for_completion_p) +/* Per pointer state for the SCC finding. The on_sccstack flag + is not strictly required, it is true when there is no hash value + recorded for the type and false otherwise. But querying that + is slower. */ + +struct sccs { - tree name1 = TYPE_NAME (t1); - tree name2 = TYPE_NAME (t2); + unsigned int dfsnum; + unsigned int low; + bool on_sccstack; + union { + hashval_t hash; + signed char same_p; + } u; +}; - /* Consider anonymous types all unique for completion. */ - if (for_completion_p - && (!name1 || !name2)) - return false; +static unsigned int next_dfs_num; +static unsigned int gtc_next_dfs_num; - if (name1 && TREE_CODE (name1) == TYPE_DECL) - { - name1 = DECL_NAME (name1); - if (for_completion_p - && !name1) - return false; - } - gcc_assert (!name1 || TREE_CODE (name1) == IDENTIFIER_NODE); + +/* GIMPLE type merging cache. A direct-mapped cache based on TYPE_UID. */ + +typedef struct GTY(()) gimple_type_leader_entry_s { + tree type; + tree leader; +} gimple_type_leader_entry; + +#define GIMPLE_TYPE_LEADER_SIZE 16381 +static GTY((deletable, length("GIMPLE_TYPE_LEADER_SIZE"))) + gimple_type_leader_entry *gimple_type_leader; + +/* Lookup an existing leader for T and return it or NULL_TREE, if + there is none in the cache. */ + +static inline tree +gimple_lookup_type_leader (tree t) +{ + gimple_type_leader_entry *leader; + + if (!gimple_type_leader) + return NULL_TREE; + + leader = &gimple_type_leader[TYPE_UID (t) % GIMPLE_TYPE_LEADER_SIZE]; + if (leader->type != t) + return NULL_TREE; + + return leader->leader; +} + +/* Return true if T1 and T2 have the same name. If FOR_COMPLETION_P is + true then if any type has no name return false, otherwise return + true if both types have no names. */ + +static bool +compare_type_names_p (tree t1, tree t2) +{ + tree name1 = TYPE_NAME (t1); + tree name2 = TYPE_NAME (t2); + + if (name1 && TREE_CODE (name1) == TYPE_DECL) + name1 = DECL_NAME (name1); + gcc_checking_assert (!name1 || TREE_CODE (name1) == IDENTIFIER_NODE); if (name2 && TREE_CODE (name2) == TYPE_DECL) - { - name2 = DECL_NAME (name2); - if (for_completion_p - && !name2) - return false; - } - gcc_assert (!name2 || TREE_CODE (name2) == IDENTIFIER_NODE); + name2 = DECL_NAME (name2); + gcc_checking_assert (!name2 || TREE_CODE (name2) == IDENTIFIER_NODE); /* Identifiers can be compared with pointer equality rather than a string comparison. */ @@ -3121,16 +3393,34 @@ compare_type_names_p (tree t1, tree t2, bool for_completion_p) return false; } -/* Return true if the field decls F1 and F2 are at the same offset. */ +/* Return true if the field decls F1 and F2 are at the same offset. + + This is intended to be used on GIMPLE types only. */ bool -compare_field_offset (tree f1, tree f2) +gimple_compare_field_offset (tree f1, tree f2) { if (DECL_OFFSET_ALIGN (f1) == DECL_OFFSET_ALIGN (f2)) - return (operand_equal_p (DECL_FIELD_OFFSET (f1), - DECL_FIELD_OFFSET (f2), 0) - && tree_int_cst_equal (DECL_FIELD_BIT_OFFSET (f1), - DECL_FIELD_BIT_OFFSET (f2))); + { + tree offset1 = DECL_FIELD_OFFSET (f1); + tree offset2 = DECL_FIELD_OFFSET (f2); + return ((offset1 == offset2 + /* Once gimplification is done, self-referential offsets are + instantiated as operand #2 of the COMPONENT_REF built for + each access and reset. Therefore, they are not relevant + anymore and fields are interchangeable provided that they + represent the same access. */ + || (TREE_CODE (offset1) == PLACEHOLDER_EXPR + && TREE_CODE (offset2) == PLACEHOLDER_EXPR + && (DECL_SIZE (f1) == DECL_SIZE (f2) + || (TREE_CODE (DECL_SIZE (f1)) == PLACEHOLDER_EXPR + && TREE_CODE (DECL_SIZE (f2)) == PLACEHOLDER_EXPR) + || operand_equal_p (DECL_SIZE (f1), DECL_SIZE (f2), 0)) + && DECL_ALIGN (f1) == DECL_ALIGN (f2)) + || operand_equal_p (offset1, offset2, 0)) + && tree_int_cst_equal (DECL_FIELD_BIT_OFFSET (f1), + DECL_FIELD_BIT_OFFSET (f2))); + } /* Fortran and C do not always agree on what DECL_OFFSET_ALIGN should be, so handle differing ones specially by decomposing @@ -3154,95 +3444,157 @@ compare_field_offset (tree f1, tree f2) return false; } -/* Return 1 iff T1 and T2 are structurally identical. - Otherwise, return 0. */ +static bool +gimple_types_compatible_p_1 (tree, tree, type_pair_t, + VEC(type_pair_t, heap) **, + struct pointer_map_t *, struct obstack *); + +/* DFS visit the edge from the callers type pair with state *STATE to + the pair T1, T2 while operating in FOR_MERGING_P mode. + Update the merging status if it is not part of the SCC containing the + callers pair and return it. + SCCSTACK, SCCSTATE and SCCSTATE_OBSTACK are state for the DFS walk done. */ -static int -gimple_types_compatible_p (tree t1, tree t2) +static bool +gtc_visit (tree t1, tree t2, + struct sccs *state, + VEC(type_pair_t, heap) **sccstack, + struct pointer_map_t *sccstate, + struct obstack *sccstate_obstack) { - type_pair_t p = NULL; + struct sccs *cstate = NULL; + type_pair_t p; + void **slot; + tree leader1, leader2; /* Check first for the obvious case of pointer identity. */ if (t1 == t2) - return 1; + return true; /* Check that we have two types to compare. */ if (t1 == NULL_TREE || t2 == NULL_TREE) - return 0; + return false; /* Can't be the same type if the types don't have the same code. */ if (TREE_CODE (t1) != TREE_CODE (t2)) - return 0; + return false; /* Can't be the same type if they have different CV qualifiers. */ if (TYPE_QUALS (t1) != TYPE_QUALS (t2)) - return 0; + return false; - /* Void types are always the same. */ - if (TREE_CODE (t1) == VOID_TYPE) - return 1; + if (TREE_ADDRESSABLE (t1) != TREE_ADDRESSABLE (t2)) + return false; + + /* Void types and nullptr types are always the same. */ + if (TREE_CODE (t1) == VOID_TYPE + || TREE_CODE (t1) == NULLPTR_TYPE) + return true; + + /* Can't be the same type if they have different alignment or mode. */ + if (TYPE_ALIGN (t1) != TYPE_ALIGN (t2) + || TYPE_MODE (t1) != TYPE_MODE (t2)) + return false; - /* For numerical types do some simple checks before doing three - hashtable queries. */ + /* Do some simple checks before doing three hashtable queries. */ if (INTEGRAL_TYPE_P (t1) || SCALAR_FLOAT_TYPE_P (t1) || FIXED_POINT_TYPE_P (t1) || TREE_CODE (t1) == VECTOR_TYPE || TREE_CODE (t1) == COMPLEX_TYPE - || TREE_CODE (t1) == OFFSET_TYPE) + || TREE_CODE (t1) == OFFSET_TYPE + || POINTER_TYPE_P (t1)) { - /* Can't be the same type if they have different alignment, - sign, precision or mode. */ - if (TYPE_ALIGN (t1) != TYPE_ALIGN (t2) - || TYPE_PRECISION (t1) != TYPE_PRECISION (t2) - || TYPE_MODE (t1) != TYPE_MODE (t2) + /* Can't be the same type if they have different sign or precision. */ + if (TYPE_PRECISION (t1) != TYPE_PRECISION (t2) || TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2)) - return 0; + return false; if (TREE_CODE (t1) == INTEGER_TYPE && (TYPE_IS_SIZETYPE (t1) != TYPE_IS_SIZETYPE (t2) || TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2))) - return 0; + return false; /* That's all we need to check for float and fixed-point types. */ if (SCALAR_FLOAT_TYPE_P (t1) || FIXED_POINT_TYPE_P (t1)) - return 1; - - /* Perform cheap tail-recursion for vector and complex types. */ - if (TREE_CODE (t1) == VECTOR_TYPE - || TREE_CODE (t1) == COMPLEX_TYPE) - return gimple_types_compatible_p (TREE_TYPE (t1), TREE_TYPE (t2)); + return true; - /* For integral types fall thru to more complex checks. */ + /* For other types fall thru to more complex checks. */ } + /* If the types have been previously registered and found equal + they still are. */ + leader1 = gimple_lookup_type_leader (t1); + leader2 = gimple_lookup_type_leader (t2); + if (leader1 == t2 + || t1 == leader2 + || (leader1 && leader1 == leader2)) + return true; + /* If the hash values of t1 and t2 are different the types can't possibly be the same. This helps keeping the type-pair hashtable small, only tracking comparisons for hash collisions. */ if (gimple_type_hash (t1) != gimple_type_hash (t2)) - return 0; + return false; - /* If we've visited this type pair before (in the case of aggregates - with self-referential types), and we made a decision, return it. */ - p = lookup_type_pair (t1, t2, >c_visited, >c_ob); - if (p->same_p == 0 || p->same_p == 1) + /* Allocate a new cache entry for this comparison. */ + p = lookup_type_pair (t1, t2); + if (p->same_p[GTC_MERGE] == 0 || p->same_p[GTC_MERGE] == 1) { /* We have already decided whether T1 and T2 are the same, return the cached result. */ - return p->same_p == 1; + return p->same_p[GTC_MERGE] == 1; } - else if (p->same_p == -1) + + if ((slot = pointer_map_contains (sccstate, p)) != NULL) + cstate = (struct sccs *)*slot; + /* Not yet visited. DFS recurse. */ + if (!cstate) { - /* We are currently comparing this pair of types, assume - that they are the same and let the caller decide. */ - return 1; + gimple_types_compatible_p_1 (t1, t2, p, + sccstack, sccstate, sccstate_obstack); + cstate = (struct sccs *)* pointer_map_contains (sccstate, p); + state->low = MIN (state->low, cstate->low); } + /* If the type is still on the SCC stack adjust the parents low. */ + if (cstate->dfsnum < state->dfsnum + && cstate->on_sccstack) + state->low = MIN (cstate->dfsnum, state->low); - gcc_assert (p->same_p == -2); + /* Return the current lattice value. We start with an equality + assumption so types part of a SCC will be optimistically + treated equal unless proven otherwise. */ + return cstate->u.same_p; +} - /* Mark the (T1, T2) comparison in progress. */ - p->same_p = -1; +/* Worker for gimple_types_compatible. + SCCSTACK, SCCSTATE and SCCSTATE_OBSTACK are state for the DFS walk done. */ + +static bool +gimple_types_compatible_p_1 (tree t1, tree t2, type_pair_t p, + VEC(type_pair_t, heap) **sccstack, + struct pointer_map_t *sccstate, + struct obstack *sccstate_obstack) +{ + struct sccs *state; + + gcc_assert (p->same_p[GTC_MERGE] == -2); + + state = XOBNEW (sccstate_obstack, struct sccs); + *pointer_map_insert (sccstate, p) = state; + + VEC_safe_push (type_pair_t, heap, *sccstack, p); + state->dfsnum = gtc_next_dfs_num++; + state->low = state->dfsnum; + state->on_sccstack = true; + /* Start with an equality assumption. As we DFS recurse into child + SCCs this assumption may get revisited. */ + state->u.same_p = 1; + + /* The struct tags shall compare equal. */ + if (!compare_type_names_p (t1, t2)) + goto different_types; /* If their attributes are not the same they can't be the same type. */ if (!attribute_list_equal (TYPE_ATTRIBUTES (t1), TYPE_ATTRIBUTES (t2))) @@ -3251,10 +3603,18 @@ gimple_types_compatible_p (tree t1, tree t2) /* Do type-specific comparisons. */ switch (TREE_CODE (t1)) { + case VECTOR_TYPE: + case COMPLEX_TYPE: + if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2), + state, sccstack, sccstate, sccstate_obstack)) + goto different_types; + goto same_types; + case ARRAY_TYPE: /* Array types are the same if the element types are the same and the number of elements are the same. */ - if (!gimple_types_compatible_p (TREE_TYPE (t1), TREE_TYPE (t2)) + if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2), + state, sccstack, sccstate, sccstate_obstack) || TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2) || TYPE_NONALIASED_COMPONENT (t1) != TYPE_NONALIASED_COMPONENT (t2)) goto different_types; @@ -3285,9 +3645,15 @@ gimple_types_compatible_p (tree t1, tree t2) /* The minimum/maximum values have to be the same. */ if ((min1 == min2 - || (min1 && min2 && operand_equal_p (min1, min2, 0))) + || (min1 && min2 + && ((TREE_CODE (min1) == PLACEHOLDER_EXPR + && TREE_CODE (min2) == PLACEHOLDER_EXPR) + || operand_equal_p (min1, min2, 0)))) && (max1 == max2 - || (max1 && max2 && operand_equal_p (max1, max2, 0)))) + || (max1 && max2 + && ((TREE_CODE (max1) == PLACEHOLDER_EXPR + && TREE_CODE (max2) == PLACEHOLDER_EXPR) + || operand_equal_p (max1, max2, 0))))) goto same_types; else goto different_types; @@ -3296,8 +3662,8 @@ gimple_types_compatible_p (tree t1, tree t2) case METHOD_TYPE: /* Method types should belong to the same class. */ - if (!gimple_types_compatible_p (TYPE_METHOD_BASETYPE (t1), - TYPE_METHOD_BASETYPE (t2))) + if (!gtc_visit (TYPE_METHOD_BASETYPE (t1), TYPE_METHOD_BASETYPE (t2), + state, sccstack, sccstate, sccstate_obstack)) goto different_types; /* Fallthru */ @@ -3305,40 +3671,41 @@ gimple_types_compatible_p (tree t1, tree t2) case FUNCTION_TYPE: /* Function types are the same if the return type and arguments types are the same. */ - if (!gimple_types_compatible_p (TREE_TYPE (t1), TREE_TYPE (t2))) + if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2), + state, sccstack, sccstate, sccstate_obstack)) goto different_types; + + if (!comp_type_attributes (t1, t2)) + goto different_types; + + if (TYPE_ARG_TYPES (t1) == TYPE_ARG_TYPES (t2)) + goto same_types; else { - if (!targetm.comp_type_attributes (t1, t2)) - goto different_types; + tree parms1, parms2; - if (TYPE_ARG_TYPES (t1) == TYPE_ARG_TYPES (t2)) - goto same_types; - else + for (parms1 = TYPE_ARG_TYPES (t1), parms2 = TYPE_ARG_TYPES (t2); + parms1 && parms2; + parms1 = TREE_CHAIN (parms1), parms2 = TREE_CHAIN (parms2)) { - tree parms1, parms2; - - for (parms1 = TYPE_ARG_TYPES (t1), parms2 = TYPE_ARG_TYPES (t2); - parms1 && parms2; - parms1 = TREE_CHAIN (parms1), parms2 = TREE_CHAIN (parms2)) - { - if (!gimple_types_compatible_p (TREE_VALUE (parms1), - TREE_VALUE (parms2))) - goto different_types; - } - - if (parms1 || parms2) + if (!gtc_visit (TREE_VALUE (parms1), TREE_VALUE (parms2), + state, sccstack, sccstate, sccstate_obstack)) goto different_types; - - goto same_types; } + + if (parms1 || parms2) + goto different_types; + + goto same_types; } case OFFSET_TYPE: { - if (!gimple_types_compatible_p (TREE_TYPE (t1), TREE_TYPE (t2)) - || !gimple_types_compatible_p (TYPE_OFFSET_BASETYPE (t1), - TYPE_OFFSET_BASETYPE (t2))) + if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2), + state, sccstack, sccstate, sccstate_obstack) + || !gtc_visit (TYPE_OFFSET_BASETYPE (t1), + TYPE_OFFSET_BASETYPE (t2), + state, sccstack, sccstate, sccstate_obstack)) goto different_types; goto same_types; @@ -3352,27 +3719,10 @@ gimple_types_compatible_p (tree t1, tree t2) if (TYPE_REF_CAN_ALIAS_ALL (t1) != TYPE_REF_CAN_ALIAS_ALL (t2)) goto different_types; - /* If one pointer points to an incomplete type variant of - the other pointed-to type they are the same. */ - if (TREE_CODE (TREE_TYPE (t1)) == TREE_CODE (TREE_TYPE (t2)) - && RECORD_OR_UNION_TYPE_P (TREE_TYPE (t1)) - && (!COMPLETE_TYPE_P (TREE_TYPE (t1)) - || !COMPLETE_TYPE_P (TREE_TYPE (t2))) - && compare_type_names_p (TYPE_MAIN_VARIANT (TREE_TYPE (t1)), - TYPE_MAIN_VARIANT (TREE_TYPE (t2)), true)) - { - /* Replace the pointed-to incomplete type with the - complete one. */ - if (COMPLETE_TYPE_P (TREE_TYPE (t2))) - TREE_TYPE (t1) = TREE_TYPE (t2); - else - TREE_TYPE (t2) = TREE_TYPE (t1); - goto same_types; - } - /* Otherwise, pointer and reference types are the same if the pointed-to types are the same. */ - if (gimple_types_compatible_p (TREE_TYPE (t1), TREE_TYPE (t2))) + if (gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2), + state, sccstack, sccstate, sccstate_obstack)) goto same_types; goto different_types; @@ -3437,6 +3787,9 @@ gimple_types_compatible_p (tree t1, tree t2) if (tree_int_cst_equal (c1, c2) != 1) goto different_types; + + if (TREE_PURPOSE (v1) != TREE_PURPOSE (v2)) + goto different_types; } /* If one enumeration has more values than the other, they @@ -3453,28 +3806,23 @@ gimple_types_compatible_p (tree t1, tree t2) { tree f1, f2; - /* If one type requires structural equality checks and the - other doesn't, do not merge the types. */ - if (TYPE_STRUCTURAL_EQUALITY_P (t1) - != TYPE_STRUCTURAL_EQUALITY_P (t2)) - goto different_types; - - /* The struct tags shall compare equal. */ - if (!compare_type_names_p (TYPE_MAIN_VARIANT (t1), - TYPE_MAIN_VARIANT (t2), false)) - goto different_types; - /* For aggregate types, all the fields must be the same. */ for (f1 = TYPE_FIELDS (t1), f2 = TYPE_FIELDS (t2); f1 && f2; f1 = TREE_CHAIN (f1), f2 = TREE_CHAIN (f2)) { - /* The fields must have the same name, offset and type. */ + /* Different field kinds are not compatible. */ + if (TREE_CODE (f1) != TREE_CODE (f2)) + goto different_types; + /* Field decls must have the same name and offset. */ + if (TREE_CODE (f1) == FIELD_DECL + && (DECL_NONADDRESSABLE_P (f1) != DECL_NONADDRESSABLE_P (f2) + || !gimple_compare_field_offset (f1, f2))) + goto different_types; + /* All entities should have the same name and type. */ if (DECL_NAME (f1) != DECL_NAME (f2) - || DECL_NONADDRESSABLE_P (f1) != DECL_NONADDRESSABLE_P (f2) - || !compare_field_offset (f1, f2) - || !gimple_types_compatible_p (TREE_TYPE (f1), - TREE_TYPE (f2))) + || !gtc_visit (TREE_TYPE (f1), TREE_TYPE (f2), + state, sccstack, sccstate, sccstate_obstack)) goto different_types; } @@ -3492,32 +3840,144 @@ gimple_types_compatible_p (tree t1, tree t2) /* Common exit path for types that are not compatible. */ different_types: - p->same_p = 0; - return 0; + state->u.same_p = 0; + goto pop; /* Common exit path for types that are compatible. */ same_types: - p->same_p = 1; - return 1; -} + gcc_assert (state->u.same_p == 1); +pop: + if (state->low == state->dfsnum) + { + type_pair_t x; + /* Pop off the SCC and set its cache values to the final + comparison result. */ + do + { + struct sccs *cstate; + x = VEC_pop (type_pair_t, *sccstack); + cstate = (struct sccs *)*pointer_map_contains (sccstate, x); + cstate->on_sccstack = false; + x->same_p[GTC_MERGE] = state->u.same_p; + } + while (x != p); + } + return state->u.same_p; +} -/* Per pointer state for the SCC finding. The on_sccstack flag - is not strictly required, it is true when there is no hash value - recorded for the type and false otherwise. But querying that - is slower. */ +/* Return true iff T1 and T2 are structurally identical. When + FOR_MERGING_P is true the an incomplete type and a complete type + are considered different, otherwise they are considered compatible. */ -struct sccs +static bool +gimple_types_compatible_p (tree t1, tree t2) { - unsigned int dfsnum; - unsigned int low; - bool on_sccstack; - hashval_t hash; -}; + VEC(type_pair_t, heap) *sccstack = NULL; + struct pointer_map_t *sccstate; + struct obstack sccstate_obstack; + type_pair_t p = NULL; + bool res; + tree leader1, leader2; + + /* Before starting to set up the SCC machinery handle simple cases. */ + + /* Check first for the obvious case of pointer identity. */ + if (t1 == t2) + return true; + + /* Check that we have two types to compare. */ + if (t1 == NULL_TREE || t2 == NULL_TREE) + return false; + + /* Can't be the same type if the types don't have the same code. */ + if (TREE_CODE (t1) != TREE_CODE (t2)) + return false; + + /* Can't be the same type if they have different CV qualifiers. */ + if (TYPE_QUALS (t1) != TYPE_QUALS (t2)) + return false; + + if (TREE_ADDRESSABLE (t1) != TREE_ADDRESSABLE (t2)) + return false; + + /* Void types and nullptr types are always the same. */ + if (TREE_CODE (t1) == VOID_TYPE + || TREE_CODE (t1) == NULLPTR_TYPE) + return true; + + /* Can't be the same type if they have different alignment or mode. */ + if (TYPE_ALIGN (t1) != TYPE_ALIGN (t2) + || TYPE_MODE (t1) != TYPE_MODE (t2)) + return false; + + /* Do some simple checks before doing three hashtable queries. */ + if (INTEGRAL_TYPE_P (t1) + || SCALAR_FLOAT_TYPE_P (t1) + || FIXED_POINT_TYPE_P (t1) + || TREE_CODE (t1) == VECTOR_TYPE + || TREE_CODE (t1) == COMPLEX_TYPE + || TREE_CODE (t1) == OFFSET_TYPE + || POINTER_TYPE_P (t1)) + { + /* Can't be the same type if they have different sign or precision. */ + if (TYPE_PRECISION (t1) != TYPE_PRECISION (t2) + || TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2)) + return false; + + if (TREE_CODE (t1) == INTEGER_TYPE + && (TYPE_IS_SIZETYPE (t1) != TYPE_IS_SIZETYPE (t2) + || TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2))) + return false; + + /* That's all we need to check for float and fixed-point types. */ + if (SCALAR_FLOAT_TYPE_P (t1) + || FIXED_POINT_TYPE_P (t1)) + return true; + + /* For other types fall thru to more complex checks. */ + } + + /* If the types have been previously registered and found equal + they still are. */ + leader1 = gimple_lookup_type_leader (t1); + leader2 = gimple_lookup_type_leader (t2); + if (leader1 == t2 + || t1 == leader2 + || (leader1 && leader1 == leader2)) + return true; + + /* If the hash values of t1 and t2 are different the types can't + possibly be the same. This helps keeping the type-pair hashtable + small, only tracking comparisons for hash collisions. */ + if (gimple_type_hash (t1) != gimple_type_hash (t2)) + return false; + + /* If we've visited this type pair before (in the case of aggregates + with self-referential types), and we made a decision, return it. */ + p = lookup_type_pair (t1, t2); + if (p->same_p[GTC_MERGE] == 0 || p->same_p[GTC_MERGE] == 1) + { + /* We have already decided whether T1 and T2 are the + same, return the cached result. */ + return p->same_p[GTC_MERGE] == 1; + } + + /* Now set up the SCC machinery for the comparison. */ + gtc_next_dfs_num = 1; + sccstate = pointer_map_create (); + gcc_obstack_init (&sccstate_obstack); + res = gimple_types_compatible_p_1 (t1, t2, p, + &sccstack, sccstate, &sccstate_obstack); + VEC_free (type_pair_t, heap, sccstack); + pointer_map_destroy (sccstate); + obstack_free (&sccstate_obstack, NULL); + + return res; +} -static unsigned int next_dfs_num; static hashval_t iterative_hash_gimple_type (tree, hashval_t, VEC(tree, heap) **, @@ -3535,12 +3995,15 @@ visit (tree t, struct sccs *state, hashval_t v, struct obstack *sccstate_obstack) { struct sccs *cstate = NULL; + struct tree_int_map m; void **slot; /* If there is a hash value recorded for this type then it can't possibly be part of our parent SCC. Simply mix in its hash. */ - if ((slot = pointer_map_contains (type_hash_cache, t))) - return iterative_hash_hashval_t ((hashval_t) (size_t) *slot, v); + m.base.from = t; + if ((slot = htab_find_slot (type_hash_cache, &m, NO_INSERT)) + && *slot) + return iterative_hash_hashval_t (((struct tree_int_map *) *slot)->to, v); if ((slot = pointer_map_contains (sccstate, t)) != NULL) cstate = (struct sccs *)*slot; @@ -3584,6 +4047,27 @@ iterative_hash_name (tree name, hashval_t v) return iterative_hash_object (IDENTIFIER_HASH_VALUE (name), v); } +/* A type, hashvalue pair for sorting SCC members. */ + +struct type_hash_pair { + tree type; + hashval_t hash; +}; + +/* Compare two type, hashvalue pairs. */ + +static int +type_hash_pair_compare (const void *p1_, const void *p2_) +{ + const struct type_hash_pair *p1 = (const struct type_hash_pair *) p1_; + const struct type_hash_pair *p2 = (const struct type_hash_pair *) p2_; + if (p1->hash < p2->hash) + return -1; + else if (p1->hash > p2->hash) + return 1; + return 0; +} + /* Returning a hash value for gimple type TYPE combined with VAL. SCCSTACK, SCCSTATE and SCCSTATE_OBSTACK are state for the DFS walk done. @@ -3606,11 +4090,8 @@ iterative_hash_gimple_type (tree type, hashval_t val, void **slot; struct sccs *state; -#ifdef ENABLE_CHECKING - /* Not visited during this DFS walk nor during previous walks. */ - gcc_assert (!pointer_map_contains (type_hash_cache, type) - && !pointer_map_contains (sccstate, type)); -#endif + /* Not visited during this DFS walk. */ + gcc_checking_assert (!pointer_map_contains (sccstate, type)); state = XOBNEW (sccstate_obstack, struct sccs); *pointer_map_insert (sccstate, type) = state; @@ -3623,7 +4104,8 @@ iterative_hash_gimple_type (tree type, hashval_t val, smaller sets; when searching for existing matching types to merge, only existing types having the same features as the new type will be checked. */ - v = iterative_hash_hashval_t (TREE_CODE (type), 0); + v = iterative_hash_name (TYPE_NAME (type), 0); + v = iterative_hash_hashval_t (TREE_CODE (type), v); v = iterative_hash_hashval_t (TYPE_QUALS (type), v); v = iterative_hash_hashval_t (TREE_ADDRESSABLE (type), v); @@ -3641,20 +4123,10 @@ iterative_hash_gimple_type (tree type, hashval_t val, } /* For pointer and reference types, fold in information about the type - pointed to but do not recurse into possibly incomplete types to - avoid hash differences for complete vs. incomplete types. */ + pointed to. */ if (POINTER_TYPE_P (type)) - { - if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (type))) - { - v = iterative_hash_hashval_t (TREE_CODE (TREE_TYPE (type)), v); - v = iterative_hash_name - (TYPE_NAME (TYPE_MAIN_VARIANT (TREE_TYPE (type))), v); - } - else - v = visit (TREE_TYPE (type), state, v, - sccstack, sccstate, sccstate_obstack); - } + v = visit (TREE_TYPE (type), state, v, + sccstack, sccstate, sccstate_obstack); /* For integer types hash the types min/max values and the string flag. */ if (TREE_CODE (type) == INTEGER_TYPE) @@ -3668,220 +4140,678 @@ iterative_hash_gimple_type (tree type, hashval_t val, v = iterative_hash_hashval_t (TYPE_STRING_FLAG (type), v); } - /* For array types hash their domain and the string flag. */ - if (TREE_CODE (type) == ARRAY_TYPE - && TYPE_DOMAIN (type)) - { - v = iterative_hash_hashval_t (TYPE_STRING_FLAG (type), v); - v = visit (TYPE_DOMAIN (type), state, v, - sccstack, sccstate, sccstate_obstack); - } + /* For array types hash their domain and the string flag. */ + if (TREE_CODE (type) == ARRAY_TYPE + && TYPE_DOMAIN (type)) + { + v = iterative_hash_hashval_t (TYPE_STRING_FLAG (type), v); + v = visit (TYPE_DOMAIN (type), state, v, + sccstack, sccstate, sccstate_obstack); + } + + /* Recurse for aggregates with a single element type. */ + if (TREE_CODE (type) == ARRAY_TYPE + || TREE_CODE (type) == COMPLEX_TYPE + || TREE_CODE (type) == VECTOR_TYPE) + v = visit (TREE_TYPE (type), state, v, + sccstack, sccstate, sccstate_obstack); + + /* Incorporate function return and argument types. */ + if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE) + { + unsigned na; + tree p; + + /* For method types also incorporate their parent class. */ + if (TREE_CODE (type) == METHOD_TYPE) + v = visit (TYPE_METHOD_BASETYPE (type), state, v, + sccstack, sccstate, sccstate_obstack); + + /* Check result and argument types. */ + v = visit (TREE_TYPE (type), state, v, + sccstack, sccstate, sccstate_obstack); + for (p = TYPE_ARG_TYPES (type), na = 0; p; p = TREE_CHAIN (p)) + { + v = visit (TREE_VALUE (p), state, v, + sccstack, sccstate, sccstate_obstack); + na++; + } + + v = iterative_hash_hashval_t (na, v); + } + + if (TREE_CODE (type) == RECORD_TYPE + || TREE_CODE (type) == UNION_TYPE + || TREE_CODE (type) == QUAL_UNION_TYPE) + { + unsigned nf; + tree f; + + for (f = TYPE_FIELDS (type), nf = 0; f; f = TREE_CHAIN (f)) + { + v = iterative_hash_name (DECL_NAME (f), v); + v = visit (TREE_TYPE (f), state, v, + sccstack, sccstate, sccstate_obstack); + nf++; + } + + v = iterative_hash_hashval_t (nf, v); + } + + /* Record hash for us. */ + state->u.hash = v; + + /* See if we found an SCC. */ + if (state->low == state->dfsnum) + { + tree x; + struct tree_int_map *m; + + /* Pop off the SCC and set its hash values. */ + x = VEC_pop (tree, *sccstack); + /* Optimize SCC size one. */ + if (x == type) + { + state->on_sccstack = false; + m = ggc_alloc_cleared_tree_int_map (); + m->base.from = x; + m->to = v; + slot = htab_find_slot (type_hash_cache, m, INSERT); + gcc_assert (!*slot); + *slot = (void *) m; + } + else + { + struct sccs *cstate; + unsigned first, i, size, j; + struct type_hash_pair *pairs; + /* Pop off the SCC and build an array of type, hash pairs. */ + first = VEC_length (tree, *sccstack) - 1; + while (VEC_index (tree, *sccstack, first) != type) + --first; + size = VEC_length (tree, *sccstack) - first + 1; + pairs = XALLOCAVEC (struct type_hash_pair, size); + i = 0; + cstate = (struct sccs *)*pointer_map_contains (sccstate, x); + cstate->on_sccstack = false; + pairs[i].type = x; + pairs[i].hash = cstate->u.hash; + do + { + x = VEC_pop (tree, *sccstack); + cstate = (struct sccs *)*pointer_map_contains (sccstate, x); + cstate->on_sccstack = false; + ++i; + pairs[i].type = x; + pairs[i].hash = cstate->u.hash; + } + while (x != type); + gcc_assert (i + 1 == size); + /* Sort the arrays of type, hash pairs so that when we mix in + all members of the SCC the hash value becomes independent on + the order we visited the SCC. Disregard hashes equal to + the hash of the type we mix into because we cannot guarantee + a stable sort for those across different TUs. */ + qsort (pairs, size, sizeof (struct type_hash_pair), + type_hash_pair_compare); + for (i = 0; i < size; ++i) + { + hashval_t hash; + m = ggc_alloc_cleared_tree_int_map (); + m->base.from = pairs[i].type; + hash = pairs[i].hash; + /* Skip same hashes. */ + for (j = i + 1; j < size && pairs[j].hash == pairs[i].hash; ++j) + ; + for (; j < size; ++j) + hash = iterative_hash_hashval_t (pairs[j].hash, hash); + for (j = 0; pairs[j].hash != pairs[i].hash; ++j) + hash = iterative_hash_hashval_t (pairs[j].hash, hash); + m->to = hash; + if (pairs[i].type == type) + v = hash; + slot = htab_find_slot (type_hash_cache, m, INSERT); + gcc_assert (!*slot); + *slot = (void *) m; + } + } + } + + return iterative_hash_hashval_t (v, val); +} + + +/* Returns a hash value for P (assumed to be a type). The hash value + is computed using some distinguishing features of the type. Note + that we cannot use pointer hashing here as we may be dealing with + two distinct instances of the same type. + + This function should produce the same hash value for two compatible + types according to gimple_types_compatible_p. */ + +static hashval_t +gimple_type_hash (const void *p) +{ + const_tree t = (const_tree) p; + VEC(tree, heap) *sccstack = NULL; + struct pointer_map_t *sccstate; + struct obstack sccstate_obstack; + hashval_t val; + void **slot; + struct tree_int_map m; + + if (type_hash_cache == NULL) + type_hash_cache = htab_create_ggc (512, tree_int_map_hash, + tree_int_map_eq, NULL); + + m.base.from = CONST_CAST_TREE (t); + if ((slot = htab_find_slot (type_hash_cache, &m, NO_INSERT)) + && *slot) + return iterative_hash_hashval_t (((struct tree_int_map *) *slot)->to, 0); + + /* Perform a DFS walk and pre-hash all reachable types. */ + next_dfs_num = 1; + sccstate = pointer_map_create (); + gcc_obstack_init (&sccstate_obstack); + val = iterative_hash_gimple_type (CONST_CAST_TREE (t), 0, + &sccstack, sccstate, &sccstate_obstack); + VEC_free (tree, heap, sccstack); + pointer_map_destroy (sccstate); + obstack_free (&sccstate_obstack, NULL); + + return val; +} + +/* Returning a hash value for gimple type TYPE combined with VAL. + + The hash value returned is equal for types considered compatible + by gimple_canonical_types_compatible_p. */ + +static hashval_t +iterative_hash_canonical_type (tree type, hashval_t val) +{ + hashval_t v; + void **slot; + struct tree_int_map *mp, m; + + m.base.from = type; + if ((slot = htab_find_slot (canonical_type_hash_cache, &m, INSERT)) + && *slot) + return iterative_hash_hashval_t (((struct tree_int_map *) *slot)->to, val); + + /* Combine a few common features of types so that types are grouped into + smaller sets; when searching for existing matching types to merge, + only existing types having the same features as the new type will be + checked. */ + v = iterative_hash_hashval_t (TREE_CODE (type), 0); + v = iterative_hash_hashval_t (TREE_ADDRESSABLE (type), v); + v = iterative_hash_hashval_t (TYPE_ALIGN (type), v); + v = iterative_hash_hashval_t (TYPE_MODE (type), v); + + /* Incorporate common features of numerical types. */ + if (INTEGRAL_TYPE_P (type) + || SCALAR_FLOAT_TYPE_P (type) + || FIXED_POINT_TYPE_P (type) + || TREE_CODE (type) == VECTOR_TYPE + || TREE_CODE (type) == COMPLEX_TYPE + || TREE_CODE (type) == OFFSET_TYPE + || POINTER_TYPE_P (type)) + { + v = iterative_hash_hashval_t (TYPE_PRECISION (type), v); + v = iterative_hash_hashval_t (TYPE_UNSIGNED (type), v); + } + + /* For pointer and reference types, fold in information about the type + pointed to but do not recurse to the pointed-to type. */ + if (POINTER_TYPE_P (type)) + { + v = iterative_hash_hashval_t (TYPE_REF_CAN_ALIAS_ALL (type), v); + v = iterative_hash_hashval_t (TYPE_ADDR_SPACE (TREE_TYPE (type)), v); + v = iterative_hash_hashval_t (TYPE_RESTRICT (type), v); + v = iterative_hash_hashval_t (TREE_CODE (TREE_TYPE (type)), v); + } + + /* For integer types hash the types min/max values and the string flag. */ + if (TREE_CODE (type) == INTEGER_TYPE) + { + v = iterative_hash_hashval_t (TYPE_STRING_FLAG (type), v); + v = iterative_hash_hashval_t (TYPE_IS_SIZETYPE (type), v); + } + + /* For array types hash their domain and the string flag. */ + if (TREE_CODE (type) == ARRAY_TYPE + && TYPE_DOMAIN (type)) + { + v = iterative_hash_hashval_t (TYPE_STRING_FLAG (type), v); + v = iterative_hash_canonical_type (TYPE_DOMAIN (type), v); + } + + /* Recurse for aggregates with a single element type. */ + if (TREE_CODE (type) == ARRAY_TYPE + || TREE_CODE (type) == COMPLEX_TYPE + || TREE_CODE (type) == VECTOR_TYPE) + v = iterative_hash_canonical_type (TREE_TYPE (type), v); + + /* Incorporate function return and argument types. */ + if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE) + { + unsigned na; + tree p; + + /* For method types also incorporate their parent class. */ + if (TREE_CODE (type) == METHOD_TYPE) + v = iterative_hash_canonical_type (TYPE_METHOD_BASETYPE (type), v); + + v = iterative_hash_canonical_type (TREE_TYPE (type), v); + + for (p = TYPE_ARG_TYPES (type), na = 0; p; p = TREE_CHAIN (p)) + { + v = iterative_hash_canonical_type (TREE_VALUE (p), v); + na++; + } + + v = iterative_hash_hashval_t (na, v); + } + + if (TREE_CODE (type) == RECORD_TYPE + || TREE_CODE (type) == UNION_TYPE + || TREE_CODE (type) == QUAL_UNION_TYPE) + { + unsigned nf; + tree f; + + for (f = TYPE_FIELDS (type), nf = 0; f; f = TREE_CHAIN (f)) + if (TREE_CODE (f) == FIELD_DECL) + { + v = iterative_hash_canonical_type (TREE_TYPE (f), v); + nf++; + } + + v = iterative_hash_hashval_t (nf, v); + } + + /* Cache the just computed hash value. */ + mp = ggc_alloc_cleared_tree_int_map (); + mp->base.from = type; + mp->to = v; + *slot = (void *) mp; + + return iterative_hash_hashval_t (v, val); +} + +static hashval_t +gimple_canonical_type_hash (const void *p) +{ + if (canonical_type_hash_cache == NULL) + canonical_type_hash_cache = htab_create_ggc (512, tree_int_map_hash, + tree_int_map_eq, NULL); + + return iterative_hash_canonical_type (CONST_CAST_TREE ((const_tree) p), 0); +} + + +/* Returns nonzero if P1 and P2 are equal. */ + +static int +gimple_type_eq (const void *p1, const void *p2) +{ + const_tree t1 = (const_tree) p1; + const_tree t2 = (const_tree) p2; + return gimple_types_compatible_p (CONST_CAST_TREE (t1), + CONST_CAST_TREE (t2)); +} + + +/* Worker for gimple_register_type. + Register type T in the global type table gimple_types. + When REGISTERING_MV is false first recurse for the main variant of T. */ + +static tree +gimple_register_type_1 (tree t, bool registering_mv) +{ + void **slot; + gimple_type_leader_entry *leader; + + /* If we registered this type before return the cached result. */ + leader = &gimple_type_leader[TYPE_UID (t) % GIMPLE_TYPE_LEADER_SIZE]; + if (leader->type == t) + return leader->leader; + + /* Always register the main variant first. This is important so we + pick up the non-typedef variants as canonical, otherwise we'll end + up taking typedef ids for structure tags during comparison. + It also makes sure that main variants will be merged to main variants. + As we are operating on a possibly partially fixed up type graph + do not bother to recurse more than once, otherwise we may end up + walking in circles. + If we are registering a main variant it will either remain its + own main variant or it will be merged to something else in which + case we do not care for the main variant leader. */ + if (!registering_mv + && TYPE_MAIN_VARIANT (t) != t) + gimple_register_type_1 (TYPE_MAIN_VARIANT (t), true); + + /* See if we already have an equivalent type registered. */ + slot = htab_find_slot (gimple_types, t, INSERT); + if (*slot + && *(tree *)slot != t) + { + tree new_type = (tree) *((tree *) slot); + leader->type = t; + leader->leader = new_type; + return new_type; + } + + /* If not, insert it to the cache and the hash. */ + leader->type = t; + leader->leader = t; + *slot = (void *) t; + return t; +} + +/* Register type T in the global type table gimple_types. + If another type T', compatible with T, already existed in + gimple_types then return T', otherwise return T. This is used by + LTO to merge identical types read from different TUs. */ + +tree +gimple_register_type (tree t) +{ + gcc_assert (TYPE_P (t)); + + if (!gimple_type_leader) + gimple_type_leader = ggc_alloc_cleared_vec_gimple_type_leader_entry_s + (GIMPLE_TYPE_LEADER_SIZE); + + if (gimple_types == NULL) + gimple_types = htab_create_ggc (16381, gimple_type_hash, gimple_type_eq, 0); + + return gimple_register_type_1 (t, false); +} + +/* The TYPE_CANONICAL merging machinery. It should closely resemble + the middle-end types_compatible_p function. It needs to avoid + claiming types are different for types that should be treated + the same with respect to TBAA. Canonical types are also used + for IL consistency checks via the useless_type_conversion_p + predicate which does not handle all type kinds itself but falls + back to pointer-comparison of TYPE_CANONICAL for aggregates + for example. */ + +/* Return true iff T1 and T2 are structurally identical for what + TBAA is concerned. */ + +static bool +gimple_canonical_types_compatible_p (tree t1, tree t2) +{ + /* Before starting to set up the SCC machinery handle simple cases. */ + + /* Check first for the obvious case of pointer identity. */ + if (t1 == t2) + return true; + + /* Check that we have two types to compare. */ + if (t1 == NULL_TREE || t2 == NULL_TREE) + return false; - /* Recurse for aggregates with a single element type. */ - if (TREE_CODE (type) == ARRAY_TYPE - || TREE_CODE (type) == COMPLEX_TYPE - || TREE_CODE (type) == VECTOR_TYPE) - v = visit (TREE_TYPE (type), state, v, - sccstack, sccstate, sccstate_obstack); + /* If the types have been previously registered and found equal + they still are. */ + if (TYPE_CANONICAL (t1) + && TYPE_CANONICAL (t1) == TYPE_CANONICAL (t2)) + return true; - /* Incorporate function return and argument types. */ - if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE) - { - unsigned na; - tree p; + /* Can't be the same type if the types don't have the same code. */ + if (TREE_CODE (t1) != TREE_CODE (t2)) + return false; - /* For method types also incorporate their parent class. */ - if (TREE_CODE (type) == METHOD_TYPE) - v = visit (TYPE_METHOD_BASETYPE (type), state, v, - sccstack, sccstate, sccstate_obstack); + if (TREE_ADDRESSABLE (t1) != TREE_ADDRESSABLE (t2)) + return false; - v = visit (TREE_TYPE (type), state, v, - sccstack, sccstate, sccstate_obstack); + /* Qualifiers do not matter for canonical type comparison purposes. */ - for (p = TYPE_ARG_TYPES (type), na = 0; p; p = TREE_CHAIN (p)) - { - v = visit (TREE_VALUE (p), state, v, - sccstack, sccstate, sccstate_obstack); - na++; - } + /* Void types and nullptr types are always the same. */ + if (TREE_CODE (t1) == VOID_TYPE + || TREE_CODE (t1) == NULLPTR_TYPE) + return true; - v = iterative_hash_hashval_t (na, v); - } + /* Can't be the same type if they have different alignment, or mode. */ + if (TYPE_ALIGN (t1) != TYPE_ALIGN (t2) + || TYPE_MODE (t1) != TYPE_MODE (t2)) + return false; - if (TREE_CODE (type) == RECORD_TYPE - || TREE_CODE (type) == UNION_TYPE - || TREE_CODE (type) == QUAL_UNION_TYPE) + /* Non-aggregate types can be handled cheaply. */ + if (INTEGRAL_TYPE_P (t1) + || SCALAR_FLOAT_TYPE_P (t1) + || FIXED_POINT_TYPE_P (t1) + || TREE_CODE (t1) == VECTOR_TYPE + || TREE_CODE (t1) == COMPLEX_TYPE + || TREE_CODE (t1) == OFFSET_TYPE + || POINTER_TYPE_P (t1)) { - unsigned nf; - tree f; + /* Can't be the same type if they have different sign or precision. */ + if (TYPE_PRECISION (t1) != TYPE_PRECISION (t2) + || TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2)) + return false; - v = iterative_hash_name (TYPE_NAME (TYPE_MAIN_VARIANT (type)), v); + if (TREE_CODE (t1) == INTEGER_TYPE + && (TYPE_IS_SIZETYPE (t1) != TYPE_IS_SIZETYPE (t2) + || TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2))) + return false; - for (f = TYPE_FIELDS (type), nf = 0; f; f = TREE_CHAIN (f)) + /* For canonical type comparisons we do not want to build SCCs + so we cannot compare pointed-to types. But we can, for now, + require the same pointed-to type kind and match what + useless_type_conversion_p would do. */ + if (POINTER_TYPE_P (t1)) { - v = iterative_hash_name (DECL_NAME (f), v); - v = visit (TREE_TYPE (f), state, v, - sccstack, sccstate, sccstate_obstack); - nf++; + /* If the two pointers have different ref-all attributes, + they can't be the same type. */ + if (TYPE_REF_CAN_ALIAS_ALL (t1) != TYPE_REF_CAN_ALIAS_ALL (t2)) + return false; + + if (TYPE_ADDR_SPACE (TREE_TYPE (t1)) + != TYPE_ADDR_SPACE (TREE_TYPE (t2))) + return false; + + if (TYPE_RESTRICT (t1) != TYPE_RESTRICT (t2)) + return false; + + if (TREE_CODE (TREE_TYPE (t1)) != TREE_CODE (TREE_TYPE (t2))) + return false; } - v = iterative_hash_hashval_t (nf, v); + /* Tail-recurse to components. */ + if (TREE_CODE (t1) == VECTOR_TYPE + || TREE_CODE (t1) == COMPLEX_TYPE) + return gimple_canonical_types_compatible_p (TREE_TYPE (t1), + TREE_TYPE (t2)); + + return true; } - /* Record hash for us. */ - state->hash = v; + /* If their attributes are not the same they can't be the same type. */ + if (!attribute_list_equal (TYPE_ATTRIBUTES (t1), TYPE_ATTRIBUTES (t2))) + return false; - /* See if we found an SCC. */ - if (state->low == state->dfsnum) + /* Do type-specific comparisons. */ + switch (TREE_CODE (t1)) { - tree x; - - /* Pop off the SCC and set its hash values. */ - do + case ARRAY_TYPE: + /* Array types are the same if the element types are the same and + the number of elements are the same. */ + if (!gimple_canonical_types_compatible_p (TREE_TYPE (t1), TREE_TYPE (t2)) + || TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2) + || TYPE_NONALIASED_COMPONENT (t1) != TYPE_NONALIASED_COMPONENT (t2)) + return false; + else { - struct sccs *cstate; - x = VEC_pop (tree, *sccstack); - gcc_assert (!pointer_map_contains (type_hash_cache, x)); - cstate = (struct sccs *)*pointer_map_contains (sccstate, x); - cstate->on_sccstack = false; - slot = pointer_map_insert (type_hash_cache, x); - *slot = (void *) (size_t) cstate->hash; + tree i1 = TYPE_DOMAIN (t1); + tree i2 = TYPE_DOMAIN (t2); + + /* For an incomplete external array, the type domain can be + NULL_TREE. Check this condition also. */ + if (i1 == NULL_TREE && i2 == NULL_TREE) + return true; + else if (i1 == NULL_TREE || i2 == NULL_TREE) + return false; + /* If for a complete array type the possibly gimplified sizes + are different the types are different. */ + else if (((TYPE_SIZE (i1) != NULL) ^ (TYPE_SIZE (i2) != NULL)) + || (TYPE_SIZE (i1) + && TYPE_SIZE (i2) + && !operand_equal_p (TYPE_SIZE (i1), TYPE_SIZE (i2), 0))) + return false; + else + { + tree min1 = TYPE_MIN_VALUE (i1); + tree min2 = TYPE_MIN_VALUE (i2); + tree max1 = TYPE_MAX_VALUE (i1); + tree max2 = TYPE_MAX_VALUE (i2); + + /* The minimum/maximum values have to be the same. */ + if ((min1 == min2 + || (min1 && min2 + && ((TREE_CODE (min1) == PLACEHOLDER_EXPR + && TREE_CODE (min2) == PLACEHOLDER_EXPR) + || operand_equal_p (min1, min2, 0)))) + && (max1 == max2 + || (max1 && max2 + && ((TREE_CODE (max1) == PLACEHOLDER_EXPR + && TREE_CODE (max2) == PLACEHOLDER_EXPR) + || operand_equal_p (max1, max2, 0))))) + return true; + else + return false; + } } - while (x != type); - } - return iterative_hash_hashval_t (v, val); -} + case METHOD_TYPE: + /* Method types should belong to the same class. */ + if (!gimple_canonical_types_compatible_p + (TYPE_METHOD_BASETYPE (t1), TYPE_METHOD_BASETYPE (t2))) + return false; + /* Fallthru */ -/* Returns a hash value for P (assumed to be a type). The hash value - is computed using some distinguishing features of the type. Note - that we cannot use pointer hashing here as we may be dealing with - two distinct instances of the same type. + case FUNCTION_TYPE: + /* Function types are the same if the return type and arguments types + are the same. */ + if (!gimple_canonical_types_compatible_p (TREE_TYPE (t1), TREE_TYPE (t2))) + return false; - This function should produce the same hash value for two compatible - types according to gimple_types_compatible_p. */ + if (!comp_type_attributes (t1, t2)) + return false; -static hashval_t -gimple_type_hash (const void *p) -{ - const_tree t = (const_tree) p; - VEC(tree, heap) *sccstack = NULL; - struct pointer_map_t *sccstate; - struct obstack sccstate_obstack; - hashval_t val; - void **slot; + if (TYPE_ARG_TYPES (t1) == TYPE_ARG_TYPES (t2)) + return true; + else + { + tree parms1, parms2; - if (type_hash_cache == NULL) - type_hash_cache = pointer_map_create (); + for (parms1 = TYPE_ARG_TYPES (t1), parms2 = TYPE_ARG_TYPES (t2); + parms1 && parms2; + parms1 = TREE_CHAIN (parms1), parms2 = TREE_CHAIN (parms2)) + { + if (!gimple_canonical_types_compatible_p + (TREE_VALUE (parms1), TREE_VALUE (parms2))) + return false; + } - if ((slot = pointer_map_contains (type_hash_cache, p)) != NULL) - return iterative_hash_hashval_t ((hashval_t) (size_t) *slot, 0); + if (parms1 || parms2) + return false; - /* Perform a DFS walk and pre-hash all reachable types. */ - next_dfs_num = 1; - sccstate = pointer_map_create (); - gcc_obstack_init (&sccstate_obstack); - val = iterative_hash_gimple_type (CONST_CAST_TREE (t), 0, - &sccstack, sccstate, &sccstate_obstack); - VEC_free (tree, heap, sccstack); - pointer_map_destroy (sccstate); - obstack_free (&sccstate_obstack, NULL); + return true; + } - return val; + case RECORD_TYPE: + case UNION_TYPE: + case QUAL_UNION_TYPE: + { + tree f1, f2; + + /* For aggregate types, all the fields must be the same. */ + for (f1 = TYPE_FIELDS (t1), f2 = TYPE_FIELDS (t2); + f1 && f2; + f1 = TREE_CHAIN (f1), f2 = TREE_CHAIN (f2)) + { + /* Skip non-fields. */ + while (f1 && TREE_CODE (f1) != FIELD_DECL) + f1 = TREE_CHAIN (f1); + while (f2 && TREE_CODE (f2) != FIELD_DECL) + f2 = TREE_CHAIN (f2); + if (!f1 || !f2) + break; + /* The fields must have the same name, offset and type. */ + if (DECL_NONADDRESSABLE_P (f1) != DECL_NONADDRESSABLE_P (f2) + || !gimple_compare_field_offset (f1, f2) + || !gimple_canonical_types_compatible_p + (TREE_TYPE (f1), TREE_TYPE (f2))) + return false; + } + + /* If one aggregate has more fields than the other, they + are not the same. */ + if (f1 || f2) + return false; + + return true; + } + + default: + gcc_unreachable (); + } } /* Returns nonzero if P1 and P2 are equal. */ static int -gimple_type_eq (const void *p1, const void *p2) +gimple_canonical_type_eq (const void *p1, const void *p2) { const_tree t1 = (const_tree) p1; const_tree t2 = (const_tree) p2; - return gimple_types_compatible_p (CONST_CAST_TREE (t1), CONST_CAST_TREE (t2)); + return gimple_canonical_types_compatible_p (CONST_CAST_TREE (t1), + CONST_CAST_TREE (t2)); } - /* Register type T in the global type table gimple_types. If another type T', compatible with T, already existed in gimple_types then return T', otherwise return T. This is used by - LTO to merge identical types read from different TUs. */ + LTO to merge identical types read from different TUs. + + ??? This merging does not exactly match how the tree.c middle-end + functions will assign TYPE_CANONICAL when new types are created + during optimization (which at least happens for pointer and array + types). */ tree -gimple_register_type (tree t) +gimple_register_canonical_type (tree t) { void **slot; gcc_assert (TYPE_P (t)); - /* Always register the main variant first. This is important so we - pick up the non-typedef variants as canonical, otherwise we'll end - up taking typedef ids for structure tags during comparison. */ - if (TYPE_MAIN_VARIANT (t) != t) - gimple_register_type (TYPE_MAIN_VARIANT (t)); + if (TYPE_CANONICAL (t)) + return TYPE_CANONICAL (t); - if (gimple_types == NULL) - gimple_types = htab_create (16381, gimple_type_hash, gimple_type_eq, 0); + if (gimple_canonical_types == NULL) + gimple_canonical_types = htab_create_ggc (16381, gimple_canonical_type_hash, + gimple_canonical_type_eq, 0); - slot = htab_find_slot (gimple_types, t, INSERT); + slot = htab_find_slot (gimple_canonical_types, t, INSERT); if (*slot && *(tree *)slot != t) { tree new_type = (tree) *((tree *) slot); - /* Do not merge types with different addressability. */ - gcc_assert (TREE_ADDRESSABLE (t) == TREE_ADDRESSABLE (new_type)); - - /* If t is not its main variant then make t unreachable from its - main variant list. Otherwise we'd queue up a lot of duplicates - there. */ - if (t != TYPE_MAIN_VARIANT (t)) - { - tree tem = TYPE_MAIN_VARIANT (t); - while (tem && TYPE_NEXT_VARIANT (tem) != t) - tem = TYPE_NEXT_VARIANT (tem); - if (tem) - TYPE_NEXT_VARIANT (tem) = TYPE_NEXT_VARIANT (t); - TYPE_NEXT_VARIANT (t) = NULL_TREE; - } - - /* If we are a pointer then remove us from the pointer-to or - reference-to chain. Otherwise we'd queue up a lot of duplicates - there. */ - if (TREE_CODE (t) == POINTER_TYPE) - { - if (TYPE_POINTER_TO (TREE_TYPE (t)) == t) - TYPE_POINTER_TO (TREE_TYPE (t)) = TYPE_NEXT_PTR_TO (t); - else - { - tree tem = TYPE_POINTER_TO (TREE_TYPE (t)); - while (tem && TYPE_NEXT_PTR_TO (tem) != t) - tem = TYPE_NEXT_PTR_TO (tem); - if (tem) - TYPE_NEXT_PTR_TO (tem) = TYPE_NEXT_PTR_TO (t); - } - TYPE_NEXT_PTR_TO (t) = NULL_TREE; - } - else if (TREE_CODE (t) == REFERENCE_TYPE) - { - if (TYPE_REFERENCE_TO (TREE_TYPE (t)) == t) - TYPE_REFERENCE_TO (TREE_TYPE (t)) = TYPE_NEXT_REF_TO (t); - else - { - tree tem = TYPE_REFERENCE_TO (TREE_TYPE (t)); - while (tem && TYPE_NEXT_REF_TO (tem) != t) - tem = TYPE_NEXT_REF_TO (tem); - if (tem) - TYPE_NEXT_REF_TO (tem) = TYPE_NEXT_REF_TO (t); - } - TYPE_NEXT_REF_TO (t) = NULL_TREE; - } - + TYPE_CANONICAL (t) = new_type; t = new_type; } else - *slot = (void *) t; + { + TYPE_CANONICAL (t) = t; + *slot = (void *) t; + } return t; } @@ -3902,16 +4832,36 @@ print_gimple_types_stats (void) htab_collisions (gimple_types)); else fprintf (stderr, "GIMPLE type table is empty\n"); - if (gtc_visited) - fprintf (stderr, "GIMPLE type comparison table: size %ld, %ld " - "elements, %ld searches, %ld collisions (ratio: %f)\n", - (long) htab_size (gtc_visited), - (long) htab_elements (gtc_visited), - (long) gtc_visited->searches, - (long) gtc_visited->collisions, - htab_collisions (gtc_visited)); + if (type_hash_cache) + fprintf (stderr, "GIMPLE type hash table: size %ld, %ld elements, " + "%ld searches, %ld collisions (ratio: %f)\n", + (long) htab_size (type_hash_cache), + (long) htab_elements (type_hash_cache), + (long) type_hash_cache->searches, + (long) type_hash_cache->collisions, + htab_collisions (type_hash_cache)); + else + fprintf (stderr, "GIMPLE type hash table is empty\n"); + if (gimple_canonical_types) + fprintf (stderr, "GIMPLE canonical type table: size %ld, %ld elements, " + "%ld searches, %ld collisions (ratio: %f)\n", + (long) htab_size (gimple_canonical_types), + (long) htab_elements (gimple_canonical_types), + (long) gimple_canonical_types->searches, + (long) gimple_canonical_types->collisions, + htab_collisions (gimple_canonical_types)); + else + fprintf (stderr, "GIMPLE canonical type table is empty\n"); + if (canonical_type_hash_cache) + fprintf (stderr, "GIMPLE canonical type hash table: size %ld, %ld elements, " + "%ld searches, %ld collisions (ratio: %f)\n", + (long) htab_size (canonical_type_hash_cache), + (long) htab_elements (canonical_type_hash_cache), + (long) canonical_type_hash_cache->searches, + (long) canonical_type_hash_cache->collisions, + htab_collisions (canonical_type_hash_cache)); else - fprintf (stderr, "GIMPLE type comparison table is empty\n"); + fprintf (stderr, "GIMPLE canonical type hash table is empty\n"); } /* Free the gimple type hashtables used for LTO type merging. */ @@ -3928,17 +4878,27 @@ free_gimple_type_tables (void) htab_delete (gimple_types); gimple_types = NULL; } + if (gimple_canonical_types) + { + htab_delete (gimple_canonical_types); + gimple_canonical_types = NULL; + } if (type_hash_cache) { - pointer_map_destroy (type_hash_cache); + htab_delete (type_hash_cache); type_hash_cache = NULL; } - if (gtc_visited) + if (canonical_type_hash_cache) + { + htab_delete (canonical_type_hash_cache); + canonical_type_hash_cache = NULL; + } + if (type_pair_cache) { - htab_delete (gtc_visited); - obstack_free (>c_ob, NULL); - gtc_visited = NULL; + free (type_pair_cache); + type_pair_cache = NULL; } + gimple_type_leader = NULL; } @@ -3966,6 +4926,10 @@ gimple_signed_or_unsigned_type (bool unsignedp, tree type) return unsignedp ? long_long_unsigned_type_node : long_long_integer_type_node; + if (int128_integer_type_node && (type1 == int128_integer_type_node || type1 == int128_unsigned_type_node)) + return unsignedp + ? int128_unsigned_type_node + : int128_integer_type_node; #if HOST_BITS_PER_WIDE_INT >= 64 if (type1 == intTI_type_node || type1 == unsigned_intTI_type_node) return unsignedp ? unsigned_intTI_type_node : intTI_type_node; @@ -4078,6 +5042,10 @@ gimple_signed_or_unsigned_type (bool unsignedp, tree type) return (unsignedp ? long_long_unsigned_type_node : long_long_integer_type_node); + if (int128_integer_type_node && TYPE_OK (int128_integer_type_node)) + return (unsignedp + ? int128_unsigned_type_node + : int128_integer_type_node); #if HOST_BITS_PER_WIDE_INT >= 64 if (TYPE_OK (intTI_type_node)) @@ -4163,63 +5131,6 @@ gimple_get_alias_set (tree t) if (t1 != t) return get_alias_set (t1); } - else if (POINTER_TYPE_P (t)) - { - /* From the common C and C++ langhook implementation: - - Unfortunately, there is no canonical form of a pointer type. - In particular, if we have `typedef int I', then `int *', and - `I *' are different types. So, we have to pick a canonical - representative. We do this below. - - Technically, this approach is actually more conservative that - it needs to be. In particular, `const int *' and `int *' - should be in different alias sets, according to the C and C++ - standard, since their types are not the same, and so, - technically, an `int **' and `const int **' cannot point at - the same thing. - - But, the standard is wrong. In particular, this code is - legal C++: - - int *ip; - int **ipp = &ip; - const int* const* cipp = ipp; - And, it doesn't make sense for that to be legal unless you - can dereference IPP and CIPP. So, we ignore cv-qualifiers on - the pointed-to types. This issue has been reported to the - C++ committee. */ - - /* In addition to the above canonicalization issue with LTO - we should also canonicalize `T (*)[]' to `T *' avoiding - alias issues with pointer-to element types and pointer-to - array types. - - Likewise we need to deal with the situation of incomplete - pointed-to types and make `*(struct X **)&a' and - `*(struct X {} **)&a' alias. Otherwise we will have to - guarantee that all pointer-to incomplete type variants - will be replaced by pointer-to complete type variants if - they are available. - - With LTO the convenient situation of using `void *' to - access and store any pointer type will also become - more apparent (and `void *' is just another pointer-to - incomplete type). Assigning alias-set zero to `void *' - and all pointer-to incomplete types is a not appealing - solution. Assigning an effective alias-set zero only - affecting pointers might be - by recording proper subset - relationships of all pointer alias-sets. - - Pointer-to function types are another grey area which - needs caution. Globbing them all into one alias-set - or the above effective zero set would work. */ - - /* For now just assign the same alias-set to all pointers. - That's simple and avoids all the above problems. */ - if (t != ptr_type_node) - return get_alias_set (ptr_type_node); - } return -1; } @@ -4252,7 +5163,7 @@ count_ptr_derefs (tree *tp, int *walk_subtrees, void *data) return NULL_TREE; } - if (INDIRECT_REF_P (*tp) && TREE_OPERAND (*tp, 0) == count_p->ptr) + if (TREE_CODE (*tp) == MEM_REF && TREE_OPERAND (*tp, 0) == count_p->ptr) { if (wi_p->is_lhs) count_p->num_stores++; @@ -4325,6 +5236,7 @@ get_base_loadstore (tree op) op = TREE_OPERAND (op, 0); if (DECL_P (op) || INDIRECT_REF_P (op) + || TREE_CODE (op) == MEM_REF || TREE_CODE (op) == TARGET_MEM_REF) return op; return NULL_TREE; @@ -4362,16 +5274,28 @@ walk_stmt_load_store_addr_ops (gimple stmt, void *data, if (TREE_CODE (rhs) == ADDR_EXPR) ret |= visit_addr (stmt, TREE_OPERAND (rhs, 0), data); else if (TREE_CODE (rhs) == TARGET_MEM_REF - && TMR_BASE (rhs) != NULL_TREE && TREE_CODE (TMR_BASE (rhs)) == ADDR_EXPR) ret |= visit_addr (stmt, TREE_OPERAND (TMR_BASE (rhs), 0), data); else if (TREE_CODE (rhs) == OBJ_TYPE_REF && TREE_CODE (OBJ_TYPE_REF_OBJECT (rhs)) == ADDR_EXPR) ret |= visit_addr (stmt, TREE_OPERAND (OBJ_TYPE_REF_OBJECT (rhs), 0), data); + else if (TREE_CODE (rhs) == CONSTRUCTOR) + { + unsigned int ix; + tree val; + + FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (rhs), ix, val) + if (TREE_CODE (val) == ADDR_EXPR) + ret |= visit_addr (stmt, TREE_OPERAND (val, 0), data); + else if (TREE_CODE (val) == OBJ_TYPE_REF + && TREE_CODE (OBJ_TYPE_REF_OBJECT (val)) == ADDR_EXPR) + ret |= visit_addr (stmt, + TREE_OPERAND (OBJ_TYPE_REF_OBJECT (val), + 0), data); + } lhs = gimple_assign_lhs (stmt); if (TREE_CODE (lhs) == TARGET_MEM_REF - && TMR_BASE (lhs) != NULL_TREE && TREE_CODE (TMR_BASE (lhs)) == ADDR_EXPR) ret |= visit_addr (stmt, TREE_OPERAND (TMR_BASE (lhs), 0), data); } @@ -4559,7 +5483,8 @@ gimple_ior_addresses_taken (bitmap addresses_taken, gimple stmt) const char * gimple_decl_printable_name (tree decl, int verbosity) { - gcc_assert (decl && DECL_NAME (decl)); + if (!DECL_NAME (decl)) + return NULL; if (DECL_ASSEMBLER_NAME_SET_P (decl)) { @@ -4584,43 +5509,33 @@ gimple_decl_printable_name (tree decl, int verbosity) return IDENTIFIER_POINTER (DECL_NAME (decl)); } +/* Return true when STMT is builtins call to CODE. */ -/* Fold a OBJ_TYPE_REF expression to the address of a function. - KNOWN_TYPE carries the true type of OBJ_TYPE_REF_OBJECT(REF). Adapted - from cp_fold_obj_type_ref, but it tolerates types with no binfo - data. */ - -tree -gimple_fold_obj_type_ref (tree ref, tree known_type) +bool +gimple_call_builtin_p (gimple stmt, enum built_in_function code) { - HOST_WIDE_INT index; - HOST_WIDE_INT i; - tree v; tree fndecl; + return (is_gimple_call (stmt) + && (fndecl = gimple_call_fndecl (stmt)) != NULL + && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL + && DECL_FUNCTION_CODE (fndecl) == code); +} - if (TYPE_BINFO (known_type) == NULL_TREE) - return NULL_TREE; +/* Return true if STMT clobbers memory. STMT is required to be a + GIMPLE_ASM. */ + +bool +gimple_asm_clobbers_memory_p (const_gimple stmt) +{ + unsigned i; - v = BINFO_VIRTUALS (TYPE_BINFO (known_type)); - index = tree_low_cst (OBJ_TYPE_REF_TOKEN (ref), 1); - i = 0; - while (i != index) + for (i = 0; i < gimple_asm_nclobbers (stmt); i++) { - i += (TARGET_VTABLE_USES_DESCRIPTORS - ? TARGET_VTABLE_USES_DESCRIPTORS : 1); - v = TREE_CHAIN (v); + tree op = gimple_asm_clobber_op (stmt, i); + if (strcmp (TREE_STRING_POINTER (TREE_VALUE (op)), "memory") == 0) + return true; } - fndecl = TREE_VALUE (v); - -#ifdef ENABLE_CHECKING - gcc_assert (tree_int_cst_equal (OBJ_TYPE_REF_TOKEN (ref), - DECL_VINDEX (fndecl))); -#endif - - cgraph_node (fndecl)->local.vtable_method = true; - - return build_fold_addr_expr (fndecl); + return false; } - #include "gt-gimple.h"