#include "tree-inline.h"
#include "value-prof.h"
#include "target.h"
+#include "ssaexpand.h"
+/* This variable holds information helping the rewriting of SSA trees
+ into RTL. */
+struct ssaexpand SA;
+
+/* This variable holds the currently expanded gimple statement for purposes
+ of comminucating the profile info to the builtin expanders. */
+gimple currently_expanding_gimple_stmt;
+
/* Return an expression tree corresponding to the RHS of GIMPLE
statement STMT. */
TREE_TYPE (gimple_assign_lhs (stmt)),
gimple_assign_rhs1 (stmt));
else if (grhs_class == GIMPLE_SINGLE_RHS)
- t = gimple_assign_rhs1 (stmt);
- else
- gcc_unreachable ();
-
- return t;
-}
-
-/* Return an expression tree corresponding to the PREDICATE of GIMPLE_COND
- statement STMT. */
-
-static tree
-gimple_cond_pred_to_tree (gimple stmt)
-{
- return build2 (gimple_cond_code (stmt), boolean_type_node,
- gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
-}
-
-/* Helper for gimple_to_tree. Set EXPR_LOCATION for every expression
- inside *TP. DATA is the location to set. */
-
-static tree
-set_expr_location_r (tree *tp, int *ws ATTRIBUTE_UNUSED, void *data)
-{
- location_t *loc = (location_t *) data;
- if (EXPR_P (*tp))
- SET_EXPR_LOCATION (*tp, *loc);
-
- return NULL_TREE;
-}
-
-
-/* RTL expansion has traditionally been done on trees, so the
- transition to doing it on GIMPLE tuples is very invasive to the RTL
- expander. To facilitate the transition, this function takes a
- GIMPLE tuple STMT and returns the same statement in the form of a
- tree. */
-
-static tree
-gimple_to_tree (gimple stmt)
-{
- tree t;
- int rn;
- tree_ann_common_t ann;
- location_t loc;
-
- switch (gimple_code (stmt))
- {
- case GIMPLE_ASSIGN:
- {
- tree lhs = gimple_assign_lhs (stmt);
-
- t = gimple_assign_rhs_to_tree (stmt);
- t = build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, t);
- if (gimple_assign_nontemporal_move_p (stmt))
- MOVE_NONTEMPORAL (t) = true;
- }
- break;
-
- case GIMPLE_COND:
- t = gimple_cond_pred_to_tree (stmt);
- t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
- break;
-
- case GIMPLE_GOTO:
- t = build1 (GOTO_EXPR, void_type_node, gimple_goto_dest (stmt));
- break;
-
- case GIMPLE_LABEL:
- t = build1 (LABEL_EXPR, void_type_node, gimple_label_label (stmt));
- break;
-
- case GIMPLE_RETURN:
- {
- tree retval = gimple_return_retval (stmt);
-
- if (retval && retval != error_mark_node)
- {
- tree result = DECL_RESULT (current_function_decl);
-
- /* If we are not returning the current function's RESULT_DECL,
- build an assignment to it. */
- if (retval != result)
- {
- /* I believe that a function's RESULT_DECL is unique. */
- gcc_assert (TREE_CODE (retval) != RESULT_DECL);
-
- retval = build2 (MODIFY_EXPR, TREE_TYPE (result),
- result, retval);
- }
- }
- t = build1 (RETURN_EXPR, void_type_node, retval);
- }
- break;
-
- case GIMPLE_ASM:
- {
- size_t i, n;
- tree out, in, cl;
- const char *s;
-
- out = NULL_TREE;
- n = gimple_asm_noutputs (stmt);
- if (n > 0)
- {
- t = out = gimple_asm_output_op (stmt, 0);
- for (i = 1; i < n; i++)
- {
- TREE_CHAIN (t) = gimple_asm_output_op (stmt, i);
- t = gimple_asm_output_op (stmt, i);
- }
- }
-
- in = NULL_TREE;
- n = gimple_asm_ninputs (stmt);
- if (n > 0)
- {
- t = in = gimple_asm_input_op (stmt, 0);
- for (i = 1; i < n; i++)
- {
- TREE_CHAIN (t) = gimple_asm_input_op (stmt, i);
- t = gimple_asm_input_op (stmt, i);
- }
- }
-
- cl = NULL_TREE;
- n = gimple_asm_nclobbers (stmt);
- if (n > 0)
- {
- t = cl = gimple_asm_clobber_op (stmt, 0);
- for (i = 1; i < n; i++)
- {
- TREE_CHAIN (t) = gimple_asm_clobber_op (stmt, i);
- t = gimple_asm_clobber_op (stmt, i);
- }
- }
-
- s = gimple_asm_string (stmt);
- t = build4 (ASM_EXPR, void_type_node, build_string (strlen (s), s),
- out, in, cl);
- ASM_VOLATILE_P (t) = gimple_asm_volatile_p (stmt);
- ASM_INPUT_P (t) = gimple_asm_input_p (stmt);
- }
- break;
-
- case GIMPLE_CALL:
- {
- size_t i;
- tree fn;
- tree_ann_common_t ann;
-
- t = build_vl_exp (CALL_EXPR, gimple_call_num_args (stmt) + 3);
-
- CALL_EXPR_FN (t) = gimple_call_fn (stmt);
- TREE_TYPE (t) = gimple_call_return_type (stmt);
- CALL_EXPR_STATIC_CHAIN (t) = gimple_call_chain (stmt);
-
- for (i = 0; i < gimple_call_num_args (stmt); i++)
- CALL_EXPR_ARG (t, i) = gimple_call_arg (stmt, i);
-
- if (!(gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE)))
- TREE_SIDE_EFFECTS (t) = 1;
-
- if (gimple_call_flags (stmt) & ECF_NOTHROW)
- TREE_NOTHROW (t) = 1;
-
- CALL_EXPR_TAILCALL (t) = gimple_call_tail_p (stmt);
- CALL_EXPR_RETURN_SLOT_OPT (t) = gimple_call_return_slot_opt_p (stmt);
- CALL_FROM_THUNK_P (t) = gimple_call_from_thunk_p (stmt);
- CALL_CANNOT_INLINE_P (t) = gimple_call_cannot_inline_p (stmt);
- CALL_EXPR_VA_ARG_PACK (t) = gimple_call_va_arg_pack_p (stmt);
-
- /* If the call has a LHS then create a MODIFY_EXPR to hold it. */
- {
- tree lhs = gimple_call_lhs (stmt);
-
- if (lhs)
- t = build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, t);
- }
-
- /* Record the original call statement, as it may be used
- to retrieve profile information during expansion. */
-
- if ((fn = gimple_call_fndecl (stmt)) != NULL_TREE
- && DECL_BUILT_IN (fn))
- {
- ann = get_tree_common_ann (t);
- ann->stmt = stmt;
- }
- }
- break;
-
- case GIMPLE_SWITCH:
- {
- tree label_vec;
- size_t i;
- tree elt = gimple_switch_label (stmt, 0);
-
- label_vec = make_tree_vec (gimple_switch_num_labels (stmt));
-
- if (!CASE_LOW (elt) && !CASE_HIGH (elt))
- {
- for (i = 1; i < gimple_switch_num_labels (stmt); i++)
- TREE_VEC_ELT (label_vec, i - 1) = gimple_switch_label (stmt, i);
-
- /* The default case in a SWITCH_EXPR must be at the end of
- the label vector. */
- TREE_VEC_ELT (label_vec, i - 1) = gimple_switch_label (stmt, 0);
- }
- else
- {
- for (i = 0; i < gimple_switch_num_labels (stmt); i++)
- TREE_VEC_ELT (label_vec, i) = gimple_switch_label (stmt, i);
- }
-
- t = build3 (SWITCH_EXPR, void_type_node, gimple_switch_index (stmt),
- NULL, label_vec);
- }
- break;
-
- case GIMPLE_NOP:
- case GIMPLE_PREDICT:
- t = build1 (NOP_EXPR, void_type_node, size_zero_node);
- break;
-
- case GIMPLE_RESX:
- t = build_resx (gimple_resx_region (stmt));
- break;
-
- default:
- if (errorcount == 0)
- {
- error ("Unrecognized GIMPLE statement during RTL expansion");
- print_gimple_stmt (stderr, stmt, 4, 0);
- gcc_unreachable ();
- }
- else
- {
- /* Ignore any bad gimple codes if we're going to die anyhow,
- so we can at least set TREE_ASM_WRITTEN and have the rest
- of compilation advance without sudden ICE death. */
- t = build1 (NOP_EXPR, void_type_node, size_zero_node);
- break;
- }
- }
-
- /* If STMT is inside an exception region, record it in the generated
- expression. */
- rn = lookup_stmt_eh_region (stmt);
- if (rn >= 0)
{
- tree call = get_call_expr_in (t);
-
- ann = get_tree_common_ann (t);
- ann->rn = rn;
-
- /* For a CALL_EXPR on the RHS of an assignment, calls.c looks up
- the CALL_EXPR not the assignment statment for EH region number. */
- if (call && call != t)
- {
- ann = get_tree_common_ann (call);
- ann->rn = rn;
- }
+ t = gimple_assign_rhs1 (stmt);
+ /* Avoid modifying this tree in place below. */
+ if (gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t)
+ && gimple_location (stmt) != EXPR_LOCATION (t))
+ t = copy_node (t);
}
+ else
+ gcc_unreachable ();
- /* Set EXPR_LOCATION in all the embedded expressions. */
- loc = gimple_location (stmt);
- walk_tree (&t, set_expr_location_r, (void *) &loc, NULL);
-
- TREE_BLOCK (t) = gimple_block (stmt);
+ if (gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t))
+ SET_EXPR_LOCATION (t, gimple_location (stmt));
return t;
}
-/* Release back to GC memory allocated by gimple_to_tree. */
-
-static void
-release_stmt_tree (gimple stmt, tree stmt_tree)
-{
- tree_ann_common_t ann;
-
- switch (gimple_code (stmt))
- {
- case GIMPLE_ASSIGN:
- if (get_gimple_rhs_class (gimple_expr_code (stmt)) != GIMPLE_SINGLE_RHS)
- ggc_free (TREE_OPERAND (stmt_tree, 1));
- break;
- case GIMPLE_COND:
- ggc_free (COND_EXPR_COND (stmt_tree));
- break;
- case GIMPLE_RETURN:
- if (TREE_OPERAND (stmt_tree, 0)
- && TREE_CODE (TREE_OPERAND (stmt_tree, 0)) == MODIFY_EXPR)
- ggc_free (TREE_OPERAND (stmt_tree, 0));
- break;
- case GIMPLE_CALL:
- if (gimple_call_lhs (stmt))
- {
- ann = tree_common_ann (TREE_OPERAND (stmt_tree, 1));
- if (ann)
- ggc_free (ann);
- ggc_free (TREE_OPERAND (stmt_tree, 1));
- }
- break;
- default:
- break;
- }
- ann = tree_common_ann (stmt_tree);
- if (ann)
- ggc_free (ann);
- ggc_free (stmt_tree);
-}
-
-
/* Verify that there is exactly single jump instruction since last and attach
REG_BR_PROB note specifying probability.
??? We really ought to pass the probability down to RTL expanders and let it
#define STACK_ALIGNMENT_NEEDED 1
#endif
+#define SSAVAR(x) (TREE_CODE (x) == SSA_NAME ? SSA_NAME_VAR (x) : x)
+
+/* Associate declaration T with storage space X. If T is no
+ SSA name this is exactly SET_DECL_RTL, otherwise make the
+ partition of T associated with X. */
+static inline void
+set_rtl (tree t, rtx x)
+{
+ if (TREE_CODE (t) == SSA_NAME)
+ {
+ SA.partition_to_pseudo[var_to_partition (SA.map, t)] = x;
+ if (x && !MEM_P (x))
+ set_reg_attrs_for_decl_rtl (SSA_NAME_VAR (t), x);
+ /* For the benefit of debug information at -O0 (where vartracking
+ doesn't run) record the place also in the base DECL if it's
+ a normal variable (not a parameter). */
+ if (x && x != pc_rtx && TREE_CODE (SSA_NAME_VAR (t)) == VAR_DECL)
+ {
+ tree var = SSA_NAME_VAR (t);
+ /* If we don't yet have something recorded, just record it now. */
+ if (!DECL_RTL_SET_P (var))
+ SET_DECL_RTL (var, x);
+ /* If we have it set alrady to "multiple places" don't
+ change this. */
+ else if (DECL_RTL (var) == pc_rtx)
+ ;
+ /* If we have something recorded and it's not the same place
+ as we want to record now, we have multiple partitions for the
+ same base variable, with different places. We can't just
+ randomly chose one, hence we have to say that we don't know.
+ This only happens with optimization, and there var-tracking
+ will figure out the right thing. */
+ else if (DECL_RTL (var) != x)
+ SET_DECL_RTL (var, pc_rtx);
+ }
+ }
+ else
+ SET_DECL_RTL (t, x);
+}
/* This structure holds data relevant to one variable that will be
placed in a stack slot. */
So here we only make sure stack_alignment_needed >= align. */
if (crtl->stack_alignment_needed < align)
crtl->stack_alignment_needed = align;
- if (crtl->max_used_stack_slot_alignment < crtl->stack_alignment_needed)
- crtl->max_used_stack_slot_alignment = crtl->stack_alignment_needed;
+ if (crtl->max_used_stack_slot_alignment < align)
+ crtl->max_used_stack_slot_alignment = align;
return align / BITS_PER_UNIT;
}
}
stack_vars[stack_vars_num].decl = decl;
stack_vars[stack_vars_num].offset = 0;
- stack_vars[stack_vars_num].size = tree_low_cst (DECL_SIZE_UNIT (decl), 1);
- stack_vars[stack_vars_num].alignb = get_decl_align_unit (decl);
+ stack_vars[stack_vars_num].size = tree_low_cst (DECL_SIZE_UNIT (SSAVAR (decl)), 1);
+ stack_vars[stack_vars_num].alignb = get_decl_align_unit (SSAVAR (decl));
/* All variables are initially in their own partition. */
stack_vars[stack_vars_num].representative = stack_vars_num;
stack_vars[stack_vars_num].next = EOC;
/* Ensure that this decl doesn't get put onto the list twice. */
- SET_DECL_RTL (decl, pc_rtx);
+ set_rtl (decl, pc_rtx);
stack_vars_num++;
}
}
/* A subroutine of partition_stack_vars. A comparison function for qsort,
- sorting an array of indices by the size of the object. */
+ sorting an array of indices by the size and type of the object. */
static int
stack_var_size_cmp (const void *a, const void *b)
{
HOST_WIDE_INT sa = stack_vars[*(const size_t *)a].size;
HOST_WIDE_INT sb = stack_vars[*(const size_t *)b].size;
- unsigned int uida = DECL_UID (stack_vars[*(const size_t *)a].decl);
- unsigned int uidb = DECL_UID (stack_vars[*(const size_t *)b].decl);
+ tree decla, declb;
+ unsigned int uida, uidb;
if (sa < sb)
return -1;
if (sa > sb)
return 1;
- /* For stack variables of the same size use the uid of the decl
- to make the sort stable. */
+ decla = stack_vars[*(const size_t *)a].decl;
+ declb = stack_vars[*(const size_t *)b].decl;
+ /* For stack variables of the same size use and id of the decls
+ to make the sort stable. Two SSA names are compared by their
+ version, SSA names come before non-SSA names, and two normal
+ decls are compared by their DECL_UID. */
+ if (TREE_CODE (decla) == SSA_NAME)
+ {
+ if (TREE_CODE (declb) == SSA_NAME)
+ uida = SSA_NAME_VERSION (decla), uidb = SSA_NAME_VERSION (declb);
+ else
+ return -1;
+ }
+ else if (TREE_CODE (declb) == SSA_NAME)
+ return 1;
+ else
+ uida = DECL_UID (decla), uidb = DECL_UID (declb);
if (uida < uidb)
return -1;
if (uida > uidb)
return 0;
}
+
+/* If the points-to solution *PI points to variables that are in a partition
+ together with other variables add all partition members to the pointed-to
+ variables bitmap. */
+
+static void
+add_partitioned_vars_to_ptset (struct pt_solution *pt,
+ struct pointer_map_t *decls_to_partitions,
+ struct pointer_set_t *visited, bitmap temp)
+{
+ bitmap_iterator bi;
+ unsigned i;
+ bitmap *part;
+
+ if (pt->anything
+ || pt->vars == NULL
+ /* The pointed-to vars bitmap is shared, it is enough to
+ visit it once. */
+ || pointer_set_insert(visited, pt->vars))
+ return;
+
+ bitmap_clear (temp);
+
+ /* By using a temporary bitmap to store all members of the partitions
+ we have to add we make sure to visit each of the partitions only
+ once. */
+ EXECUTE_IF_SET_IN_BITMAP (pt->vars, 0, i, bi)
+ if ((!temp
+ || !bitmap_bit_p (temp, i))
+ && (part = (bitmap *) pointer_map_contains (decls_to_partitions,
+ (void *)(size_t) i)))
+ bitmap_ior_into (temp, *part);
+ if (!bitmap_empty_p (temp))
+ bitmap_ior_into (pt->vars, temp);
+}
+
+/* Update points-to sets based on partition info, so we can use them on RTL.
+ The bitmaps representing stack partitions will be saved until expand,
+ where partitioned decls used as bases in memory expressions will be
+ rewritten. */
+
+static void
+update_alias_info_with_stack_vars (void)
+{
+ struct pointer_map_t *decls_to_partitions = NULL;
+ size_t i, j;
+ tree var = NULL_TREE;
+
+ for (i = 0; i < stack_vars_num; i++)
+ {
+ bitmap part = NULL;
+ tree name;
+ struct ptr_info_def *pi;
+
+ /* Not interested in partitions with single variable. */
+ if (stack_vars[i].representative != i
+ || stack_vars[i].next == EOC)
+ continue;
+
+ if (!decls_to_partitions)
+ {
+ decls_to_partitions = pointer_map_create ();
+ cfun->gimple_df->decls_to_pointers = pointer_map_create ();
+ }
+
+ /* Create an SSA_NAME that points to the partition for use
+ as base during alias-oracle queries on RTL for bases that
+ have been partitioned. */
+ if (var == NULL_TREE)
+ var = create_tmp_var (ptr_type_node, NULL);
+ name = make_ssa_name (var, NULL);
+
+ /* Create bitmaps representing partitions. They will be used for
+ points-to sets later, so use GGC alloc. */
+ part = BITMAP_GGC_ALLOC ();
+ for (j = i; j != EOC; j = stack_vars[j].next)
+ {
+ tree decl = stack_vars[j].decl;
+ unsigned int uid = DECL_UID (decl);
+ /* We should never end up partitioning SSA names (though they
+ may end up on the stack). Neither should we allocate stack
+ space to something that is unused and thus unreferenced. */
+ gcc_assert (DECL_P (decl)
+ && referenced_var_lookup (uid));
+ bitmap_set_bit (part, uid);
+ *((bitmap *) pointer_map_insert (decls_to_partitions,
+ (void *)(size_t) uid)) = part;
+ *((tree *) pointer_map_insert (cfun->gimple_df->decls_to_pointers,
+ decl)) = name;
+ }
+
+ /* Make the SSA name point to all partition members. */
+ pi = get_ptr_info (name);
+ pt_solution_set (&pi->pt, part);
+ }
+
+ /* Make all points-to sets that contain one member of a partition
+ contain all members of the partition. */
+ if (decls_to_partitions)
+ {
+ unsigned i;
+ struct pointer_set_t *visited = pointer_set_create ();
+ bitmap temp = BITMAP_ALLOC (NULL);
+
+ for (i = 1; i < num_ssa_names; i++)
+ {
+ tree name = ssa_name (i);
+ struct ptr_info_def *pi;
+
+ if (name
+ && POINTER_TYPE_P (TREE_TYPE (name))
+ && ((pi = SSA_NAME_PTR_INFO (name)) != NULL))
+ add_partitioned_vars_to_ptset (&pi->pt, decls_to_partitions,
+ visited, temp);
+ }
+
+ add_partitioned_vars_to_ptset (&cfun->gimple_df->escaped,
+ decls_to_partitions, visited, temp);
+ add_partitioned_vars_to_ptset (&cfun->gimple_df->callused,
+ decls_to_partitions, visited, temp);
+
+ pointer_set_destroy (visited);
+ pointer_map_destroy (decls_to_partitions);
+ BITMAP_FREE (temp);
+ }
+}
+
/* A subroutine of partition_stack_vars. The UNION portion of a UNION/FIND
partitioning algorithm. Partitions A and B are known to be non-conflicting.
Merge them into a single partition A.
break;
}
}
+
+ if (optimize)
+ update_alias_info_with_stack_vars ();
}
/* A debugging aid for expand_used_vars. Dump the generated partitions. */
gcc_assert (offset == trunc_int_for_mode (offset, Pmode));
x = plus_constant (virtual_stack_vars_rtx, offset);
- x = gen_rtx_MEM (DECL_MODE (decl), x);
-
- /* Set alignment we actually gave this decl. */
- offset -= frame_phase;
- align = offset & -offset;
- align *= BITS_PER_UNIT;
- if (align == 0)
- align = STACK_BOUNDARY;
- else if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
- align = MAX_SUPPORTED_STACK_ALIGNMENT;
- DECL_ALIGN (decl) = align;
- DECL_USER_ALIGN (decl) = 0;
+ x = gen_rtx_MEM (DECL_MODE (SSAVAR (decl)), x);
+
+ if (TREE_CODE (decl) != SSA_NAME)
+ {
+ /* Set alignment we actually gave this decl if it isn't an SSA name.
+ If it is we generate stack slots only accidentally so it isn't as
+ important, we'll simply use the alignment that is already set. */
+ offset -= frame_phase;
+ align = offset & -offset;
+ align *= BITS_PER_UNIT;
+ if (align == 0)
+ align = STACK_BOUNDARY;
+ else if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
+ align = MAX_SUPPORTED_STACK_ALIGNMENT;
+
+ DECL_ALIGN (decl) = align;
+ DECL_USER_ALIGN (decl) = 0;
+ }
- set_mem_attributes (x, decl, true);
- SET_DECL_RTL (decl, x);
+ set_mem_attributes (x, SSAVAR (decl), true);
+ set_rtl (decl, x);
}
/* A subroutine of expand_used_vars. Give each partition representative
/* Skip variables that have already had rtl assigned. See also
add_stack_var where we perpetrate this pc_rtx hack. */
- if (DECL_RTL (stack_vars[i].decl) != pc_rtx)
+ if ((TREE_CODE (stack_vars[i].decl) == SSA_NAME
+ ? SA.partition_to_pseudo[var_to_partition (SA.map, stack_vars[i].decl)]
+ : DECL_RTL (stack_vars[i].decl)) != pc_rtx)
continue;
/* Check the predicate to see whether this variable should be
size += stack_vars[i].size;
for (j = i; j != EOC; j = stack_vars[j].next)
- SET_DECL_RTL (stack_vars[j].decl, NULL);
+ set_rtl (stack_vars[j].decl, NULL);
}
return size;
}
{
HOST_WIDE_INT size, offset, align;
- size = tree_low_cst (DECL_SIZE_UNIT (var), 1);
- align = get_decl_align_unit (var);
+ size = tree_low_cst (DECL_SIZE_UNIT (SSAVAR (var)), 1);
+ align = get_decl_align_unit (SSAVAR (var));
offset = alloc_stack_frame_space (size, align);
expand_one_stack_var_at (var, offset);
static void
expand_one_register_var (tree var)
{
- tree type = TREE_TYPE (var);
- int unsignedp = TYPE_UNSIGNED (type);
- enum machine_mode reg_mode
- = promote_mode (type, DECL_MODE (var), &unsignedp, 0);
+ tree decl = SSAVAR (var);
+ tree type = TREE_TYPE (decl);
+ enum machine_mode reg_mode = promote_decl_mode (decl, NULL);
rtx x = gen_reg_rtx (reg_mode);
- SET_DECL_RTL (var, x);
+ set_rtl (var, x);
/* Note if the object is a user variable. */
- if (!DECL_ARTIFICIAL (var))
- mark_user_reg (x);
+ if (!DECL_ARTIFICIAL (decl))
+ mark_user_reg (x);
if (POINTER_TYPE_P (type))
- mark_reg_pointer (x, TYPE_ALIGN (TREE_TYPE (TREE_TYPE (var))));
+ mark_reg_pointer (x, TYPE_ALIGN (TREE_TYPE (type)));
}
/* A subroutine of expand_one_var. Called to assign rtl to a VAR_DECL that
static HOST_WIDE_INT
expand_one_var (tree var, bool toplevel, bool really_expand)
{
+ tree origvar = var;
+ var = SSAVAR (var);
+
if (SUPPORTS_STACK_ALIGNMENT
&& TREE_TYPE (var) != error_mark_node
&& TREE_CODE (var) == VAR_DECL)
variables, which won't be on stack, we collect alignment of
type and ignore user specified alignment. */
if (TREE_STATIC (var) || DECL_EXTERNAL (var))
- align = TYPE_ALIGN (TREE_TYPE (var));
+ align = MINIMUM_ALIGNMENT (TREE_TYPE (var),
+ TYPE_MODE (TREE_TYPE (var)),
+ TYPE_ALIGN (TREE_TYPE (var)));
else
- align = DECL_ALIGN (var);
+ align = MINIMUM_ALIGNMENT (var, DECL_MODE (var), DECL_ALIGN (var));
if (crtl->stack_alignment_estimated < align)
{
}
}
- if (TREE_CODE (var) != VAR_DECL)
+ if (TREE_CODE (origvar) == SSA_NAME)
+ {
+ gcc_assert (TREE_CODE (var) != VAR_DECL
+ || (!DECL_EXTERNAL (var)
+ && !DECL_HAS_VALUE_EXPR_P (var)
+ && !TREE_STATIC (var)
+ && TREE_TYPE (var) != error_mark_node
+ && !DECL_HARD_REGISTER (var)
+ && really_expand));
+ }
+ if (TREE_CODE (var) != VAR_DECL && TREE_CODE (origvar) != SSA_NAME)
;
else if (DECL_EXTERNAL (var))
;
;
else if (TREE_STATIC (var))
;
- else if (DECL_RTL_SET_P (var))
+ else if (TREE_CODE (origvar) != SSA_NAME && DECL_RTL_SET_P (var))
;
else if (TREE_TYPE (var) == error_mark_node)
{
if (really_expand)
expand_one_error_var (var);
}
- else if (DECL_HARD_REGISTER (var))
+ else if (TREE_CODE (var) == VAR_DECL && DECL_HARD_REGISTER (var))
{
if (really_expand)
expand_one_hard_reg_var (var);
else if (use_register_for_decl (var))
{
if (really_expand)
- expand_one_register_var (var);
+ expand_one_register_var (origvar);
}
else if (defer_stack_allocation (var, toplevel))
- add_stack_var (var);
+ add_stack_var (origvar);
else
{
if (really_expand)
- expand_one_stack_var (var);
+ expand_one_stack_var (origvar);
return tree_low_cst (DECL_SIZE_UNIT (var), 1);
}
return 0;
static void
create_stack_guard (void)
{
- tree guard = build_decl (VAR_DECL, NULL, ptr_type_node);
+ tree guard = build_decl (DECL_SOURCE_LOCATION (current_function_decl),
+ VAR_DECL, NULL, ptr_type_node);
TREE_THIS_VOLATILE (guard) = 1;
TREE_USED (guard) = 1;
expand_one_stack_var (guard);
expand_used_vars (void)
{
tree t, next, outer_block = DECL_INITIAL (current_function_decl);
+ unsigned i;
/* Compute the phase of the stack frame for this function. */
{
init_vars_expansion ();
+ for (i = 0; i < SA.map->num_partitions; i++)
+ {
+ tree var = partition_to_var (SA.map, i);
+
+ gcc_assert (is_gimple_reg (var));
+ if (TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
+ expand_one_var (var, true, true);
+ else
+ {
+ /* This is a PARM_DECL or RESULT_DECL. For those partitions that
+ contain the default def (representing the parm or result itself)
+ we don't do anything here. But those which don't contain the
+ default def (representing a temporary based on the parm/result)
+ we need to allocate space just like for normal VAR_DECLs. */
+ if (!bitmap_bit_p (SA.partition_has_default_def, i))
+ {
+ expand_one_var (var, true, true);
+ gcc_assert (SA.partition_to_pseudo[i]);
+ }
+ }
+ }
+
/* At this point all variables on the local_decls with TREE_USED
set are not associated with any block scope. Lay them out. */
t = cfun->local_decls;
next = TREE_CHAIN (t);
+ /* Expanded above already. */
+ if (is_gimple_reg (var))
+ {
+ TREE_USED (var) = 0;
+ ggc_free (t);
+ continue;
+ }
/* We didn't set a block for static or extern because it's hard
to tell the difference between a global variable (re)declared
in a local scope, and one that's really declared there to
begin with. And it doesn't really matter much, since we're
not giving them stack space. Expand them now. */
- if (TREE_STATIC (var) || DECL_EXTERNAL (var))
- expand_now = true;
-
- /* Any variable that could have been hoisted into an SSA_NAME
- will have been propagated anywhere the optimizers chose,
- i.e. not confined to their original block. Allocate them
- as if they were defined in the outermost scope. */
- else if (is_gimple_reg (var))
+ else if (TREE_STATIC (var) || DECL_EXTERNAL (var))
expand_now = true;
/* If the variable is not associated with any block, then it
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "\n;; ");
- print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
+ print_gimple_stmt (dump_file, stmt, 0,
+ TDF_SLIM | (dump_flags & TDF_LINENO));
fprintf (dump_file, "\n");
print_rtl (dump_file, since ? NEXT_INSN (since) : since);
}
+/* A subroutine of expand_gimple_cond. Given E, a fallthrough edge
+ of a basic block where we just expanded the conditional at the end,
+ possibly clean up the CFG and instruction sequence. */
+
+static void
+maybe_cleanup_end_of_block (edge e)
+{
+ /* Special case: when jumpif decides that the condition is
+ trivial it emits an unconditional jump (and the necessary
+ barrier). But we still have two edges, the fallthru one is
+ wrong. purge_dead_edges would clean this up later. Unfortunately
+ we have to insert insns (and split edges) before
+ find_many_sub_basic_blocks and hence before purge_dead_edges.
+ But splitting edges might create new blocks which depend on the
+ fact that if there are two edges there's no barrier. So the
+ barrier would get lost and verify_flow_info would ICE. Instead
+ of auditing all edge splitters to care for the barrier (which
+ normally isn't there in a cleaned CFG), fix it here. */
+ if (BARRIER_P (get_last_insn ()))
+ {
+ basic_block bb = e->src;
+ rtx insn;
+ remove_edge (e);
+ /* Now, we have a single successor block, if we have insns to
+ insert on the remaining edge we potentially will insert
+ it at the end of this block (if the dest block isn't feasible)
+ in order to avoid splitting the edge. This insertion will take
+ place in front of the last jump. But we might have emitted
+ multiple jumps (conditional and one unconditional) to the
+ same destination. Inserting in front of the last one then
+ is a problem. See PR 40021. We fix this by deleting all
+ jumps except the last unconditional one. */
+ insn = PREV_INSN (get_last_insn ());
+ /* Make sure we have an unconditional jump. Otherwise we're
+ confused. */
+ gcc_assert (JUMP_P (insn) && !any_condjump_p (insn));
+ for (insn = PREV_INSN (insn); insn != BB_HEAD (bb);)
+ {
+ insn = PREV_INSN (insn);
+ if (JUMP_P (NEXT_INSN (insn)))
+ delete_insn (NEXT_INSN (insn));
+ }
+ }
+}
+
/* A subroutine of expand_gimple_basic_block. Expand one GIMPLE_COND.
Returns a new basic block if we've terminated the current basic
block and created a new one. */
edge new_edge;
edge true_edge;
edge false_edge;
- tree pred = gimple_cond_pred_to_tree (stmt);
rtx last2, last;
+ enum tree_code code;
+ tree op0, op1;
+
+ code = gimple_cond_code (stmt);
+ op0 = gimple_cond_lhs (stmt);
+ op1 = gimple_cond_rhs (stmt);
+ /* We're sometimes presented with such code:
+ D.123_1 = x < y;
+ if (D.123_1 != 0)
+ ...
+ This would expand to two comparisons which then later might
+ be cleaned up by combine. But some pattern matchers like if-conversion
+ work better when there's only one compare, so make up for this
+ here as special exception if TER would have made the same change. */
+ if (gimple_cond_single_var_p (stmt)
+ && SA.values
+ && TREE_CODE (op0) == SSA_NAME
+ && bitmap_bit_p (SA.values, SSA_NAME_VERSION (op0)))
+ {
+ gimple second = SSA_NAME_DEF_STMT (op0);
+ if (gimple_code (second) == GIMPLE_ASSIGN
+ && TREE_CODE_CLASS (gimple_assign_rhs_code (second))
+ == tcc_comparison)
+ {
+ code = gimple_assign_rhs_code (second);
+ op0 = gimple_assign_rhs1 (second);
+ op1 = gimple_assign_rhs2 (second);
+ }
+ }
last2 = last = get_last_insn ();
two-way jump that needs to be decomposed into two basic blocks. */
if (false_edge->dest == bb->next_bb)
{
- jumpif (pred, label_rtx_for_bb (true_edge->dest));
+ jumpif_1 (code, op0, op1, label_rtx_for_bb (true_edge->dest));
add_reg_br_prob_note (last, true_edge->probability);
maybe_dump_rtl_for_gimple_stmt (stmt, last);
if (true_edge->goto_locus)
}
true_edge->goto_block = NULL;
false_edge->flags |= EDGE_FALLTHRU;
- ggc_free (pred);
+ maybe_cleanup_end_of_block (false_edge);
return NULL;
}
if (true_edge->dest == bb->next_bb)
{
- jumpifnot (pred, label_rtx_for_bb (false_edge->dest));
+ jumpifnot_1 (code, op0, op1, label_rtx_for_bb (false_edge->dest));
add_reg_br_prob_note (last, false_edge->probability);
maybe_dump_rtl_for_gimple_stmt (stmt, last);
if (false_edge->goto_locus)
}
false_edge->goto_block = NULL;
true_edge->flags |= EDGE_FALLTHRU;
- ggc_free (pred);
+ maybe_cleanup_end_of_block (true_edge);
return NULL;
}
- jumpif (pred, label_rtx_for_bb (true_edge->dest));
+ jumpif_1 (code, op0, op1, label_rtx_for_bb (true_edge->dest));
add_reg_br_prob_note (last, true_edge->probability);
last = get_last_insn ();
if (false_edge->goto_locus)
}
true_edge->goto_block = NULL;
- ggc_free (pred);
return new_bb;
}
-/* A subroutine of expand_gimple_basic_block. Expand one GIMPLE_CALL
- that has CALL_EXPR_TAILCALL set. Returns non-null if we actually
- generated a tail call (something that might be denied by the ABI
- rules governing the call; see calls.c).
-
- Sets CAN_FALLTHRU if we generated a *conditional* tail call, and
- can still reach the rest of BB. The case here is __builtin_sqrt,
- where the NaN result goes through the external function (with a
- tailcall) and the normal result happens via a sqrt instruction. */
+/* A subroutine of expand_gimple_stmt_1, expanding one GIMPLE_CALL
+ statement STMT. */
-static basic_block
-expand_gimple_tailcall (basic_block bb, gimple stmt, bool *can_fallthru)
+static void
+expand_call_stmt (gimple stmt)
{
- rtx last2, last;
- edge e;
- edge_iterator ei;
- int probability;
- gcov_type count;
- tree stmt_tree = gimple_to_tree (stmt);
+ tree exp;
+ tree lhs = gimple_call_lhs (stmt);
+ size_t i;
- last2 = last = get_last_insn ();
+ exp = build_vl_exp (CALL_EXPR, gimple_call_num_args (stmt) + 3);
- expand_expr_stmt (stmt_tree);
+ CALL_EXPR_FN (exp) = gimple_call_fn (stmt);
+ TREE_TYPE (exp) = gimple_call_return_type (stmt);
+ CALL_EXPR_STATIC_CHAIN (exp) = gimple_call_chain (stmt);
- release_stmt_tree (stmt, stmt_tree);
+ for (i = 0; i < gimple_call_num_args (stmt); i++)
+ CALL_EXPR_ARG (exp, i) = gimple_call_arg (stmt, i);
- for (last = NEXT_INSN (last); last; last = NEXT_INSN (last))
- if (CALL_P (last) && SIBLING_CALL_P (last))
- goto found;
+ if (gimple_has_side_effects (stmt))
+ TREE_SIDE_EFFECTS (exp) = 1;
- maybe_dump_rtl_for_gimple_stmt (stmt, last2);
+ if (gimple_call_nothrow_p (stmt))
+ TREE_NOTHROW (exp) = 1;
- *can_fallthru = true;
- return NULL;
+ CALL_EXPR_TAILCALL (exp) = gimple_call_tail_p (stmt);
+ CALL_EXPR_RETURN_SLOT_OPT (exp) = gimple_call_return_slot_opt_p (stmt);
+ CALL_FROM_THUNK_P (exp) = gimple_call_from_thunk_p (stmt);
+ CALL_CANNOT_INLINE_P (exp) = gimple_call_cannot_inline_p (stmt);
+ CALL_EXPR_VA_ARG_PACK (exp) = gimple_call_va_arg_pack_p (stmt);
+ SET_EXPR_LOCATION (exp, gimple_location (stmt));
+ TREE_BLOCK (exp) = gimple_block (stmt);
- found:
+ if (lhs)
+ expand_assignment (lhs, exp, false);
+ else
+ expand_expr_real_1 (exp, const0_rtx, VOIDmode, EXPAND_NORMAL, NULL);
+}
+
+/* A subroutine of expand_gimple_stmt, expanding one gimple statement
+ STMT that doesn't require special handling for outgoing edges. That
+ is no tailcalls and no GIMPLE_COND. */
+
+static void
+expand_gimple_stmt_1 (gimple stmt)
+{
+ tree op0;
+ switch (gimple_code (stmt))
+ {
+ case GIMPLE_GOTO:
+ op0 = gimple_goto_dest (stmt);
+ if (TREE_CODE (op0) == LABEL_DECL)
+ expand_goto (op0);
+ else
+ expand_computed_goto (op0);
+ break;
+ case GIMPLE_LABEL:
+ expand_label (gimple_label_label (stmt));
+ break;
+ case GIMPLE_NOP:
+ case GIMPLE_PREDICT:
+ break;
+ case GIMPLE_SWITCH:
+ expand_case (stmt);
+ break;
+ case GIMPLE_ASM:
+ expand_asm_stmt (stmt);
+ break;
+ case GIMPLE_CALL:
+ expand_call_stmt (stmt);
+ break;
+
+ case GIMPLE_RETURN:
+ op0 = gimple_return_retval (stmt);
+
+ if (op0 && op0 != error_mark_node)
+ {
+ tree result = DECL_RESULT (current_function_decl);
+
+ /* If we are not returning the current function's RESULT_DECL,
+ build an assignment to it. */
+ if (op0 != result)
+ {
+ /* I believe that a function's RESULT_DECL is unique. */
+ gcc_assert (TREE_CODE (op0) != RESULT_DECL);
+
+ /* ??? We'd like to use simply expand_assignment here,
+ but this fails if the value is of BLKmode but the return
+ decl is a register. expand_return has special handling
+ for this combination, which eventually should move
+ to common code. See comments there. Until then, let's
+ build a modify expression :-/ */
+ op0 = build2 (MODIFY_EXPR, TREE_TYPE (result),
+ result, op0);
+ }
+ }
+ if (!op0)
+ expand_null_return ();
+ else
+ expand_return (op0);
+ break;
+
+ case GIMPLE_ASSIGN:
+ {
+ tree lhs = gimple_assign_lhs (stmt);
+
+ /* Tree expand used to fiddle with |= and &= of two bitfield
+ COMPONENT_REFs here. This can't happen with gimple, the LHS
+ of binary assigns must be a gimple reg. */
+
+ if (TREE_CODE (lhs) != SSA_NAME
+ || get_gimple_rhs_class (gimple_expr_code (stmt))
+ == GIMPLE_SINGLE_RHS)
+ {
+ tree rhs = gimple_assign_rhs1 (stmt);
+ gcc_assert (get_gimple_rhs_class (gimple_expr_code (stmt))
+ == GIMPLE_SINGLE_RHS);
+ if (gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (rhs))
+ SET_EXPR_LOCATION (rhs, gimple_location (stmt));
+ expand_assignment (lhs, rhs,
+ gimple_assign_nontemporal_move_p (stmt));
+ }
+ else
+ {
+ rtx target, temp;
+ bool nontemporal = gimple_assign_nontemporal_move_p (stmt);
+ struct separate_ops ops;
+ bool promoted = false;
+
+ target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
+ if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
+ promoted = true;
+
+ ops.code = gimple_assign_rhs_code (stmt);
+ ops.type = TREE_TYPE (lhs);
+ switch (get_gimple_rhs_class (gimple_expr_code (stmt)))
+ {
+ case GIMPLE_BINARY_RHS:
+ ops.op1 = gimple_assign_rhs2 (stmt);
+ /* Fallthru */
+ case GIMPLE_UNARY_RHS:
+ ops.op0 = gimple_assign_rhs1 (stmt);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ ops.location = gimple_location (stmt);
+
+ /* If we want to use a nontemporal store, force the value to
+ register first. If we store into a promoted register,
+ don't directly expand to target. */
+ temp = nontemporal || promoted ? NULL_RTX : target;
+ temp = expand_expr_real_2 (&ops, temp, GET_MODE (target),
+ EXPAND_NORMAL);
+
+ if (temp == target)
+ ;
+ else if (promoted)
+ {
+ int unsignedp = SUBREG_PROMOTED_UNSIGNED_P (target);
+ /* If TEMP is a VOIDmode constant, use convert_modes to make
+ sure that we properly convert it. */
+ if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode)
+ {
+ temp = convert_modes (GET_MODE (target),
+ TYPE_MODE (ops.type),
+ temp, unsignedp);
+ temp = convert_modes (GET_MODE (SUBREG_REG (target)),
+ GET_MODE (target), temp, unsignedp);
+ }
+
+ convert_move (SUBREG_REG (target), temp, unsignedp);
+ }
+ else if (nontemporal && emit_storent_insn (target, temp))
+ ;
+ else
+ {
+ temp = force_operand (temp, target);
+ if (temp != target)
+ emit_move_insn (target, temp);
+ }
+ }
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Expand one gimple statement STMT and return the last RTL instruction
+ before any of the newly generated ones.
+
+ In addition to generating the necessary RTL instructions this also
+ sets REG_EH_REGION notes if necessary and sets the current source
+ location for diagnostics. */
+
+static rtx
+expand_gimple_stmt (gimple stmt)
+{
+ int lp_nr = 0;
+ rtx last = NULL;
+ location_t saved_location = input_location;
+
+ last = get_last_insn ();
+
+ /* If this is an expression of some kind and it has an associated line
+ number, then emit the line number before expanding the expression.
+
+ We need to save and restore the file and line information so that
+ errors discovered during expansion are emitted with the right
+ information. It would be better of the diagnostic routines
+ used the file/line information embedded in the tree nodes rather
+ than globals. */
+ gcc_assert (cfun);
+
+ if (gimple_has_location (stmt))
+ {
+ input_location = gimple_location (stmt);
+ set_curr_insn_source_location (input_location);
+
+ /* Record where the insns produced belong. */
+ set_curr_insn_block (gimple_block (stmt));
+ }
+
+ expand_gimple_stmt_1 (stmt);
+ /* Free any temporaries used to evaluate this statement. */
+ free_temp_slots ();
+
+ input_location = saved_location;
+
+ /* Mark all insns that may trap. */
+ lp_nr = lookup_stmt_eh_lp (stmt);
+ if (lp_nr)
+ {
+ rtx insn;
+ for (insn = next_real_insn (last); insn;
+ insn = next_real_insn (insn))
+ {
+ if (! find_reg_note (insn, REG_EH_REGION, NULL_RTX)
+ /* If we want exceptions for non-call insns, any
+ may_trap_p instruction may throw. */
+ && GET_CODE (PATTERN (insn)) != CLOBBER
+ && GET_CODE (PATTERN (insn)) != USE
+ && insn_could_throw_p (insn))
+ make_reg_eh_region_note (insn, 0, lp_nr);
+ }
+ }
+
+ return last;
+}
+
+/* A subroutine of expand_gimple_basic_block. Expand one GIMPLE_CALL
+ that has CALL_EXPR_TAILCALL set. Returns non-null if we actually
+ generated a tail call (something that might be denied by the ABI
+ rules governing the call; see calls.c).
+
+ Sets CAN_FALLTHRU if we generated a *conditional* tail call, and
+ can still reach the rest of BB. The case here is __builtin_sqrt,
+ where the NaN result goes through the external function (with a
+ tailcall) and the normal result happens via a sqrt instruction. */
+
+static basic_block
+expand_gimple_tailcall (basic_block bb, gimple stmt, bool *can_fallthru)
+{
+ rtx last2, last;
+ edge e;
+ edge_iterator ei;
+ int probability;
+ gcov_type count;
+
+ last2 = last = expand_gimple_stmt (stmt);
+
+ for (last = NEXT_INSN (last); last; last = NEXT_INSN (last))
+ if (CALL_P (last) && SIBLING_CALL_P (last))
+ goto found;
+
+ maybe_dump_rtl_for_gimple_stmt (stmt, last2);
+
+ *can_fallthru = true;
+ return NULL;
+
+ found:
/* ??? Wouldn't it be better to just reset any pending stack adjust?
Any instructions emitted here are about to be deleted. */
do_pending_stack_adjust ();
return bb;
}
+/* Return the difference between the floor and the truncated result of
+ a signed division by OP1 with remainder MOD. */
+static rtx
+floor_sdiv_adjust (enum machine_mode mode, rtx mod, rtx op1)
+{
+ /* (mod != 0 ? (op1 / mod < 0 ? -1 : 0) : 0) */
+ return gen_rtx_IF_THEN_ELSE
+ (mode, gen_rtx_NE (BImode, mod, const0_rtx),
+ gen_rtx_IF_THEN_ELSE
+ (mode, gen_rtx_LT (BImode,
+ gen_rtx_DIV (mode, op1, mod),
+ const0_rtx),
+ constm1_rtx, const0_rtx),
+ const0_rtx);
+}
+
+/* Return the difference between the ceil and the truncated result of
+ a signed division by OP1 with remainder MOD. */
+static rtx
+ceil_sdiv_adjust (enum machine_mode mode, rtx mod, rtx op1)
+{
+ /* (mod != 0 ? (op1 / mod > 0 ? 1 : 0) : 0) */
+ return gen_rtx_IF_THEN_ELSE
+ (mode, gen_rtx_NE (BImode, mod, const0_rtx),
+ gen_rtx_IF_THEN_ELSE
+ (mode, gen_rtx_GT (BImode,
+ gen_rtx_DIV (mode, op1, mod),
+ const0_rtx),
+ const1_rtx, const0_rtx),
+ const0_rtx);
+}
+
+/* Return the difference between the ceil and the truncated result of
+ an unsigned division by OP1 with remainder MOD. */
+static rtx
+ceil_udiv_adjust (enum machine_mode mode, rtx mod, rtx op1 ATTRIBUTE_UNUSED)
+{
+ /* (mod != 0 ? 1 : 0) */
+ return gen_rtx_IF_THEN_ELSE
+ (mode, gen_rtx_NE (BImode, mod, const0_rtx),
+ const1_rtx, const0_rtx);
+}
+
+/* Return the difference between the rounded and the truncated result
+ of a signed division by OP1 with remainder MOD. Halfway cases are
+ rounded away from zero, rather than to the nearest even number. */
+static rtx
+round_sdiv_adjust (enum machine_mode mode, rtx mod, rtx op1)
+{
+ /* (abs (mod) >= abs (op1) - abs (mod)
+ ? (op1 / mod > 0 ? 1 : -1)
+ : 0) */
+ return gen_rtx_IF_THEN_ELSE
+ (mode, gen_rtx_GE (BImode, gen_rtx_ABS (mode, mod),
+ gen_rtx_MINUS (mode,
+ gen_rtx_ABS (mode, op1),
+ gen_rtx_ABS (mode, mod))),
+ gen_rtx_IF_THEN_ELSE
+ (mode, gen_rtx_GT (BImode,
+ gen_rtx_DIV (mode, op1, mod),
+ const0_rtx),
+ const1_rtx, constm1_rtx),
+ const0_rtx);
+}
+
+/* Return the difference between the rounded and the truncated result
+ of a unsigned division by OP1 with remainder MOD. Halfway cases
+ are rounded away from zero, rather than to the nearest even
+ number. */
+static rtx
+round_udiv_adjust (enum machine_mode mode, rtx mod, rtx op1)
+{
+ /* (mod >= op1 - mod ? 1 : 0) */
+ return gen_rtx_IF_THEN_ELSE
+ (mode, gen_rtx_GE (BImode, mod,
+ gen_rtx_MINUS (mode, op1, mod)),
+ const1_rtx, const0_rtx);
+}
+
+/* Convert X to MODE, that must be Pmode or ptr_mode, without emitting
+ any rtl. */
+
+static rtx
+convert_debug_memory_address (enum machine_mode mode, rtx x)
+{
+ enum machine_mode xmode = GET_MODE (x);
+
+#ifndef POINTERS_EXTEND_UNSIGNED
+ gcc_assert (mode == Pmode);
+ gcc_assert (xmode == mode || xmode == VOIDmode);
+#else
+ gcc_assert (mode == Pmode || mode == ptr_mode);
+
+ if (GET_MODE (x) == mode || GET_MODE (x) == VOIDmode)
+ return x;
+
+ if (GET_MODE_BITSIZE (mode) < GET_MODE_BITSIZE (xmode))
+ x = simplify_gen_subreg (mode, x, xmode,
+ subreg_lowpart_offset
+ (mode, xmode));
+ else if (POINTERS_EXTEND_UNSIGNED > 0)
+ x = gen_rtx_ZERO_EXTEND (mode, x);
+ else if (!POINTERS_EXTEND_UNSIGNED)
+ x = gen_rtx_SIGN_EXTEND (mode, x);
+ else
+ gcc_unreachable ();
+#endif /* POINTERS_EXTEND_UNSIGNED */
+
+ return x;
+}
+
+/* Return an RTX equivalent to the value of the tree expression
+ EXP. */
+
+static rtx
+expand_debug_expr (tree exp)
+{
+ rtx op0 = NULL_RTX, op1 = NULL_RTX, op2 = NULL_RTX;
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ int unsignedp = TYPE_UNSIGNED (TREE_TYPE (exp));
+ addr_space_t as;
+ enum machine_mode address_mode;
+ enum machine_mode pointer_mode;
+
+ switch (TREE_CODE_CLASS (TREE_CODE (exp)))
+ {
+ case tcc_expression:
+ switch (TREE_CODE (exp))
+ {
+ case COND_EXPR:
+ goto ternary;
+
+ case TRUTH_ANDIF_EXPR:
+ case TRUTH_ORIF_EXPR:
+ case TRUTH_AND_EXPR:
+ case TRUTH_OR_EXPR:
+ case TRUTH_XOR_EXPR:
+ goto binary;
+
+ case TRUTH_NOT_EXPR:
+ goto unary;
+
+ default:
+ break;
+ }
+ break;
+
+ ternary:
+ op2 = expand_debug_expr (TREE_OPERAND (exp, 2));
+ if (!op2)
+ return NULL_RTX;
+ /* Fall through. */
+
+ binary:
+ case tcc_binary:
+ case tcc_comparison:
+ op1 = expand_debug_expr (TREE_OPERAND (exp, 1));
+ if (!op1)
+ return NULL_RTX;
+ /* Fall through. */
+
+ unary:
+ case tcc_unary:
+ op0 = expand_debug_expr (TREE_OPERAND (exp, 0));
+ if (!op0)
+ return NULL_RTX;
+ break;
+
+ case tcc_type:
+ case tcc_statement:
+ gcc_unreachable ();
+
+ case tcc_constant:
+ case tcc_exceptional:
+ case tcc_declaration:
+ case tcc_reference:
+ case tcc_vl_exp:
+ break;
+ }
+
+ switch (TREE_CODE (exp))
+ {
+ case STRING_CST:
+ if (!lookup_constant_def (exp))
+ {
+ if (strlen (TREE_STRING_POINTER (exp)) + 1
+ != (size_t) TREE_STRING_LENGTH (exp))
+ return NULL_RTX;
+ op0 = gen_rtx_CONST_STRING (Pmode, TREE_STRING_POINTER (exp));
+ op0 = gen_rtx_MEM (BLKmode, op0);
+ set_mem_attributes (op0, exp, 0);
+ return op0;
+ }
+ /* Fall through... */
+
+ case INTEGER_CST:
+ case REAL_CST:
+ case FIXED_CST:
+ op0 = expand_expr (exp, NULL_RTX, mode, EXPAND_INITIALIZER);
+ return op0;
+
+ case COMPLEX_CST:
+ gcc_assert (COMPLEX_MODE_P (mode));
+ op0 = expand_debug_expr (TREE_REALPART (exp));
+ op1 = expand_debug_expr (TREE_IMAGPART (exp));
+ return gen_rtx_CONCAT (mode, op0, op1);
+
+ case DEBUG_EXPR_DECL:
+ op0 = DECL_RTL_IF_SET (exp);
+
+ if (op0)
+ return op0;
+
+ op0 = gen_rtx_DEBUG_EXPR (mode);
+ DEBUG_EXPR_TREE_DECL (op0) = exp;
+ SET_DECL_RTL (exp, op0);
+
+ return op0;
+
+ case VAR_DECL:
+ case PARM_DECL:
+ case FUNCTION_DECL:
+ case LABEL_DECL:
+ case CONST_DECL:
+ case RESULT_DECL:
+ op0 = DECL_RTL_IF_SET (exp);
+
+ /* This decl was probably optimized away. */
+ if (!op0)
+ {
+ if (TREE_CODE (exp) != VAR_DECL
+ || DECL_EXTERNAL (exp)
+ || !TREE_STATIC (exp)
+ || !DECL_NAME (exp)
+ || DECL_HARD_REGISTER (exp)
+ || mode == VOIDmode)
+ return NULL;
+
+ op0 = DECL_RTL (exp);
+ SET_DECL_RTL (exp, NULL);
+ if (!MEM_P (op0)
+ || GET_CODE (XEXP (op0, 0)) != SYMBOL_REF
+ || SYMBOL_REF_DECL (XEXP (op0, 0)) != exp)
+ return NULL;
+ }
+ else
+ op0 = copy_rtx (op0);
+
+ if (GET_MODE (op0) == BLKmode)
+ {
+ gcc_assert (MEM_P (op0));
+ op0 = adjust_address_nv (op0, mode, 0);
+ return op0;
+ }
+
+ /* Fall through. */
+
+ adjust_mode:
+ case PAREN_EXPR:
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ {
+ enum machine_mode inner_mode = GET_MODE (op0);
+
+ if (mode == inner_mode)
+ return op0;
+
+ if (inner_mode == VOIDmode)
+ {
+ inner_mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
+ if (mode == inner_mode)
+ return op0;
+ }
+
+ if (FLOAT_MODE_P (mode) && FLOAT_MODE_P (inner_mode))
+ {
+ if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (inner_mode))
+ op0 = simplify_gen_subreg (mode, op0, inner_mode, 0);
+ else if (GET_MODE_BITSIZE (mode) < GET_MODE_BITSIZE (inner_mode))
+ op0 = simplify_gen_unary (FLOAT_TRUNCATE, mode, op0, inner_mode);
+ else
+ op0 = simplify_gen_unary (FLOAT_EXTEND, mode, op0, inner_mode);
+ }
+ else if (FLOAT_MODE_P (mode))
+ {
+ if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ op0 = simplify_gen_unary (UNSIGNED_FLOAT, mode, op0, inner_mode);
+ else
+ op0 = simplify_gen_unary (FLOAT, mode, op0, inner_mode);
+ }
+ else if (FLOAT_MODE_P (inner_mode))
+ {
+ if (unsignedp)
+ op0 = simplify_gen_unary (UNSIGNED_FIX, mode, op0, inner_mode);
+ else
+ op0 = simplify_gen_unary (FIX, mode, op0, inner_mode);
+ }
+ else if (CONSTANT_P (op0)
+ || GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (inner_mode))
+ op0 = simplify_gen_subreg (mode, op0, inner_mode,
+ subreg_lowpart_offset (mode,
+ inner_mode));
+ else if (unsignedp)
+ op0 = gen_rtx_ZERO_EXTEND (mode, op0);
+ else
+ op0 = gen_rtx_SIGN_EXTEND (mode, op0);
+
+ return op0;
+ }
+
+ case INDIRECT_REF:
+ case ALIGN_INDIRECT_REF:
+ case MISALIGNED_INDIRECT_REF:
+ op0 = expand_debug_expr (TREE_OPERAND (exp, 0));
+ if (!op0)
+ return NULL;
+
+ if (POINTER_TYPE_P (TREE_TYPE (exp)))
+ as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp)));
+ else
+ as = ADDR_SPACE_GENERIC;
+
+ address_mode = targetm.addr_space.address_mode (as);
+ pointer_mode = targetm.addr_space.pointer_mode (as);
+
+ gcc_assert (GET_MODE (op0) == address_mode
+ || GET_MODE (op0) == pointer_mode
+ || GET_CODE (op0) == CONST_INT
+ || GET_CODE (op0) == CONST_DOUBLE);
+
+ if (TREE_CODE (exp) == ALIGN_INDIRECT_REF)
+ {
+ int align = TYPE_ALIGN_UNIT (TREE_TYPE (exp));
+ op0 = gen_rtx_AND (address_mode, op0, GEN_INT (-align));
+ }
+
+ op0 = gen_rtx_MEM (mode, op0);
+
+ set_mem_attributes (op0, exp, 0);
+ set_mem_addr_space (op0, as);
+
+ return op0;
+
+ case TARGET_MEM_REF:
+ if (TMR_SYMBOL (exp) && !DECL_RTL_SET_P (TMR_SYMBOL (exp)))
+ return NULL;
+
+ op0 = expand_debug_expr
+ (tree_mem_ref_addr (build_pointer_type (TREE_TYPE (exp)),
+ exp));
+ if (!op0)
+ return NULL;
+
+ as = TYPE_ADDR_SPACE (TREE_TYPE (exp));
+ address_mode = targetm.addr_space.address_mode (as);
+ pointer_mode = targetm.addr_space.pointer_mode (as);
+
+ gcc_assert (GET_MODE (op0) == address_mode
+ || GET_MODE (op0) == pointer_mode
+ || GET_CODE (op0) == CONST_INT
+ || GET_CODE (op0) == CONST_DOUBLE);
+
+ op0 = gen_rtx_MEM (mode, op0);
+
+ set_mem_attributes (op0, exp, 0);
+ set_mem_addr_space (op0, as);
+
+ return op0;
+
+ case ARRAY_REF:
+ case ARRAY_RANGE_REF:
+ case COMPONENT_REF:
+ case BIT_FIELD_REF:
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ case VIEW_CONVERT_EXPR:
+ {
+ enum machine_mode mode1;
+ HOST_WIDE_INT bitsize, bitpos;
+ tree offset;
+ int volatilep = 0;
+ tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset,
+ &mode1, &unsignedp, &volatilep, false);
+ rtx orig_op0;
+
+ if (bitsize == 0)
+ return NULL;
+
+ orig_op0 = op0 = expand_debug_expr (tem);
+
+ if (!op0)
+ return NULL;
+
+ if (offset)
+ {
+ enum machine_mode addrmode, offmode;
+
+ gcc_assert (MEM_P (op0));
+
+ op0 = XEXP (op0, 0);
+ addrmode = GET_MODE (op0);
+ if (addrmode == VOIDmode)
+ addrmode = Pmode;
+
+ op1 = expand_debug_expr (offset);
+ if (!op1)
+ return NULL;
+
+ offmode = GET_MODE (op1);
+ if (offmode == VOIDmode)
+ offmode = TYPE_MODE (TREE_TYPE (offset));
+
+ if (addrmode != offmode)
+ op1 = simplify_gen_subreg (addrmode, op1, offmode,
+ subreg_lowpart_offset (addrmode,
+ offmode));
+
+ /* Don't use offset_address here, we don't need a
+ recognizable address, and we don't want to generate
+ code. */
+ op0 = gen_rtx_MEM (mode, gen_rtx_PLUS (addrmode, op0, op1));
+ }
+
+ if (MEM_P (op0))
+ {
+ if (mode1 == VOIDmode)
+ /* Bitfield. */
+ mode1 = smallest_mode_for_size (bitsize, MODE_INT);
+ if (bitpos >= BITS_PER_UNIT)
+ {
+ op0 = adjust_address_nv (op0, mode1, bitpos / BITS_PER_UNIT);
+ bitpos %= BITS_PER_UNIT;
+ }
+ else if (bitpos < 0)
+ {
+ HOST_WIDE_INT units
+ = (-bitpos + BITS_PER_UNIT - 1) / BITS_PER_UNIT;
+ op0 = adjust_address_nv (op0, mode1, units);
+ bitpos += units * BITS_PER_UNIT;
+ }
+ else if (bitpos == 0 && bitsize == GET_MODE_BITSIZE (mode))
+ op0 = adjust_address_nv (op0, mode, 0);
+ else if (GET_MODE (op0) != mode1)
+ op0 = adjust_address_nv (op0, mode1, 0);
+ else
+ op0 = copy_rtx (op0);
+ if (op0 == orig_op0)
+ op0 = shallow_copy_rtx (op0);
+ set_mem_attributes (op0, exp, 0);
+ }
+
+ if (bitpos == 0 && mode == GET_MODE (op0))
+ return op0;
+
+ if (bitpos < 0)
+ return NULL;
+
+ if ((bitpos % BITS_PER_UNIT) == 0
+ && bitsize == GET_MODE_BITSIZE (mode1))
+ {
+ enum machine_mode opmode = GET_MODE (op0);
+
+ gcc_assert (opmode != BLKmode);
+
+ if (opmode == VOIDmode)
+ opmode = mode1;
+
+ /* This condition may hold if we're expanding the address
+ right past the end of an array that turned out not to
+ be addressable (i.e., the address was only computed in
+ debug stmts). The gen_subreg below would rightfully
+ crash, and the address doesn't really exist, so just
+ drop it. */
+ if (bitpos >= GET_MODE_BITSIZE (opmode))
+ return NULL;
+
+ return simplify_gen_subreg (mode, op0, opmode,
+ bitpos / BITS_PER_UNIT);
+ }
+
+ return simplify_gen_ternary (SCALAR_INT_MODE_P (GET_MODE (op0))
+ && TYPE_UNSIGNED (TREE_TYPE (exp))
+ ? SIGN_EXTRACT
+ : ZERO_EXTRACT, mode,
+ GET_MODE (op0) != VOIDmode
+ ? GET_MODE (op0) : mode1,
+ op0, GEN_INT (bitsize), GEN_INT (bitpos));
+ }
+
+ case ABS_EXPR:
+ return gen_rtx_ABS (mode, op0);
+
+ case NEGATE_EXPR:
+ return gen_rtx_NEG (mode, op0);
+
+ case BIT_NOT_EXPR:
+ return gen_rtx_NOT (mode, op0);
+
+ case FLOAT_EXPR:
+ if (unsignedp)
+ return gen_rtx_UNSIGNED_FLOAT (mode, op0);
+ else
+ return gen_rtx_FLOAT (mode, op0);
+
+ case FIX_TRUNC_EXPR:
+ if (unsignedp)
+ return gen_rtx_UNSIGNED_FIX (mode, op0);
+ else
+ return gen_rtx_FIX (mode, op0);
+
+ case POINTER_PLUS_EXPR:
+ case PLUS_EXPR:
+ return gen_rtx_PLUS (mode, op0, op1);
+
+ case MINUS_EXPR:
+ return gen_rtx_MINUS (mode, op0, op1);
+
+ case MULT_EXPR:
+ return gen_rtx_MULT (mode, op0, op1);
+
+ case RDIV_EXPR:
+ case TRUNC_DIV_EXPR:
+ case EXACT_DIV_EXPR:
+ if (unsignedp)
+ return gen_rtx_UDIV (mode, op0, op1);
+ else
+ return gen_rtx_DIV (mode, op0, op1);
+
+ case TRUNC_MOD_EXPR:
+ if (unsignedp)
+ return gen_rtx_UMOD (mode, op0, op1);
+ else
+ return gen_rtx_MOD (mode, op0, op1);
+
+ case FLOOR_DIV_EXPR:
+ if (unsignedp)
+ return gen_rtx_UDIV (mode, op0, op1);
+ else
+ {
+ rtx div = gen_rtx_DIV (mode, op0, op1);
+ rtx mod = gen_rtx_MOD (mode, op0, op1);
+ rtx adj = floor_sdiv_adjust (mode, mod, op1);
+ return gen_rtx_PLUS (mode, div, adj);
+ }
+
+ case FLOOR_MOD_EXPR:
+ if (unsignedp)
+ return gen_rtx_UMOD (mode, op0, op1);
+ else
+ {
+ rtx mod = gen_rtx_MOD (mode, op0, op1);
+ rtx adj = floor_sdiv_adjust (mode, mod, op1);
+ adj = gen_rtx_NEG (mode, gen_rtx_MULT (mode, adj, op1));
+ return gen_rtx_PLUS (mode, mod, adj);
+ }
+
+ case CEIL_DIV_EXPR:
+ if (unsignedp)
+ {
+ rtx div = gen_rtx_UDIV (mode, op0, op1);
+ rtx mod = gen_rtx_UMOD (mode, op0, op1);
+ rtx adj = ceil_udiv_adjust (mode, mod, op1);
+ return gen_rtx_PLUS (mode, div, adj);
+ }
+ else
+ {
+ rtx div = gen_rtx_DIV (mode, op0, op1);
+ rtx mod = gen_rtx_MOD (mode, op0, op1);
+ rtx adj = ceil_sdiv_adjust (mode, mod, op1);
+ return gen_rtx_PLUS (mode, div, adj);
+ }
+
+ case CEIL_MOD_EXPR:
+ if (unsignedp)
+ {
+ rtx mod = gen_rtx_UMOD (mode, op0, op1);
+ rtx adj = ceil_udiv_adjust (mode, mod, op1);
+ adj = gen_rtx_NEG (mode, gen_rtx_MULT (mode, adj, op1));
+ return gen_rtx_PLUS (mode, mod, adj);
+ }
+ else
+ {
+ rtx mod = gen_rtx_MOD (mode, op0, op1);
+ rtx adj = ceil_sdiv_adjust (mode, mod, op1);
+ adj = gen_rtx_NEG (mode, gen_rtx_MULT (mode, adj, op1));
+ return gen_rtx_PLUS (mode, mod, adj);
+ }
+
+ case ROUND_DIV_EXPR:
+ if (unsignedp)
+ {
+ rtx div = gen_rtx_UDIV (mode, op0, op1);
+ rtx mod = gen_rtx_UMOD (mode, op0, op1);
+ rtx adj = round_udiv_adjust (mode, mod, op1);
+ return gen_rtx_PLUS (mode, div, adj);
+ }
+ else
+ {
+ rtx div = gen_rtx_DIV (mode, op0, op1);
+ rtx mod = gen_rtx_MOD (mode, op0, op1);
+ rtx adj = round_sdiv_adjust (mode, mod, op1);
+ return gen_rtx_PLUS (mode, div, adj);
+ }
+
+ case ROUND_MOD_EXPR:
+ if (unsignedp)
+ {
+ rtx mod = gen_rtx_UMOD (mode, op0, op1);
+ rtx adj = round_udiv_adjust (mode, mod, op1);
+ adj = gen_rtx_NEG (mode, gen_rtx_MULT (mode, adj, op1));
+ return gen_rtx_PLUS (mode, mod, adj);
+ }
+ else
+ {
+ rtx mod = gen_rtx_MOD (mode, op0, op1);
+ rtx adj = round_sdiv_adjust (mode, mod, op1);
+ adj = gen_rtx_NEG (mode, gen_rtx_MULT (mode, adj, op1));
+ return gen_rtx_PLUS (mode, mod, adj);
+ }
+
+ case LSHIFT_EXPR:
+ return gen_rtx_ASHIFT (mode, op0, op1);
+
+ case RSHIFT_EXPR:
+ if (unsignedp)
+ return gen_rtx_LSHIFTRT (mode, op0, op1);
+ else
+ return gen_rtx_ASHIFTRT (mode, op0, op1);
+
+ case LROTATE_EXPR:
+ return gen_rtx_ROTATE (mode, op0, op1);
+
+ case RROTATE_EXPR:
+ return gen_rtx_ROTATERT (mode, op0, op1);
+
+ case MIN_EXPR:
+ if (unsignedp)
+ return gen_rtx_UMIN (mode, op0, op1);
+ else
+ return gen_rtx_SMIN (mode, op0, op1);
+
+ case MAX_EXPR:
+ if (unsignedp)
+ return gen_rtx_UMAX (mode, op0, op1);
+ else
+ return gen_rtx_SMAX (mode, op0, op1);
+
+ case BIT_AND_EXPR:
+ case TRUTH_AND_EXPR:
+ return gen_rtx_AND (mode, op0, op1);
+
+ case BIT_IOR_EXPR:
+ case TRUTH_OR_EXPR:
+ return gen_rtx_IOR (mode, op0, op1);
+
+ case BIT_XOR_EXPR:
+ case TRUTH_XOR_EXPR:
+ return gen_rtx_XOR (mode, op0, op1);
+
+ case TRUTH_ANDIF_EXPR:
+ return gen_rtx_IF_THEN_ELSE (mode, op0, op1, const0_rtx);
+
+ case TRUTH_ORIF_EXPR:
+ return gen_rtx_IF_THEN_ELSE (mode, op0, const_true_rtx, op1);
+
+ case TRUTH_NOT_EXPR:
+ return gen_rtx_EQ (mode, op0, const0_rtx);
+
+ case LT_EXPR:
+ if (unsignedp)
+ return gen_rtx_LTU (mode, op0, op1);
+ else
+ return gen_rtx_LT (mode, op0, op1);
+
+ case LE_EXPR:
+ if (unsignedp)
+ return gen_rtx_LEU (mode, op0, op1);
+ else
+ return gen_rtx_LE (mode, op0, op1);
+
+ case GT_EXPR:
+ if (unsignedp)
+ return gen_rtx_GTU (mode, op0, op1);
+ else
+ return gen_rtx_GT (mode, op0, op1);
+
+ case GE_EXPR:
+ if (unsignedp)
+ return gen_rtx_GEU (mode, op0, op1);
+ else
+ return gen_rtx_GE (mode, op0, op1);
+
+ case EQ_EXPR:
+ return gen_rtx_EQ (mode, op0, op1);
+
+ case NE_EXPR:
+ return gen_rtx_NE (mode, op0, op1);
+
+ case UNORDERED_EXPR:
+ return gen_rtx_UNORDERED (mode, op0, op1);
+
+ case ORDERED_EXPR:
+ return gen_rtx_ORDERED (mode, op0, op1);
+
+ case UNLT_EXPR:
+ return gen_rtx_UNLT (mode, op0, op1);
+
+ case UNLE_EXPR:
+ return gen_rtx_UNLE (mode, op0, op1);
+
+ case UNGT_EXPR:
+ return gen_rtx_UNGT (mode, op0, op1);
+
+ case UNGE_EXPR:
+ return gen_rtx_UNGE (mode, op0, op1);
+
+ case UNEQ_EXPR:
+ return gen_rtx_UNEQ (mode, op0, op1);
+
+ case LTGT_EXPR:
+ return gen_rtx_LTGT (mode, op0, op1);
+
+ case COND_EXPR:
+ return gen_rtx_IF_THEN_ELSE (mode, op0, op1, op2);
+
+ case COMPLEX_EXPR:
+ gcc_assert (COMPLEX_MODE_P (mode));
+ if (GET_MODE (op0) == VOIDmode)
+ op0 = gen_rtx_CONST (GET_MODE_INNER (mode), op0);
+ if (GET_MODE (op1) == VOIDmode)
+ op1 = gen_rtx_CONST (GET_MODE_INNER (mode), op1);
+ return gen_rtx_CONCAT (mode, op0, op1);
+
+ case CONJ_EXPR:
+ if (GET_CODE (op0) == CONCAT)
+ return gen_rtx_CONCAT (mode, XEXP (op0, 0),
+ gen_rtx_NEG (GET_MODE_INNER (mode),
+ XEXP (op0, 1)));
+ else
+ {
+ enum machine_mode imode = GET_MODE_INNER (mode);
+ rtx re, im;
+
+ if (MEM_P (op0))
+ {
+ re = adjust_address_nv (op0, imode, 0);
+ im = adjust_address_nv (op0, imode, GET_MODE_SIZE (imode));
+ }
+ else
+ {
+ enum machine_mode ifmode = int_mode_for_mode (mode);
+ enum machine_mode ihmode = int_mode_for_mode (imode);
+ rtx halfsize;
+ if (ifmode == BLKmode || ihmode == BLKmode)
+ return NULL;
+ halfsize = GEN_INT (GET_MODE_BITSIZE (ihmode));
+ re = op0;
+ if (mode != ifmode)
+ re = gen_rtx_SUBREG (ifmode, re, 0);
+ re = gen_rtx_ZERO_EXTRACT (ihmode, re, halfsize, const0_rtx);
+ if (imode != ihmode)
+ re = gen_rtx_SUBREG (imode, re, 0);
+ im = copy_rtx (op0);
+ if (mode != ifmode)
+ im = gen_rtx_SUBREG (ifmode, im, 0);
+ im = gen_rtx_ZERO_EXTRACT (ihmode, im, halfsize, halfsize);
+ if (imode != ihmode)
+ im = gen_rtx_SUBREG (imode, im, 0);
+ }
+ im = gen_rtx_NEG (imode, im);
+ return gen_rtx_CONCAT (mode, re, im);
+ }
+
+ case ADDR_EXPR:
+ op0 = expand_debug_expr (TREE_OPERAND (exp, 0));
+ if (!op0 || !MEM_P (op0))
+ return NULL;
+
+ op0 = convert_debug_memory_address (mode, XEXP (op0, 0));
+
+ return op0;
+
+ case VECTOR_CST:
+ exp = build_constructor_from_list (TREE_TYPE (exp),
+ TREE_VECTOR_CST_ELTS (exp));
+ /* Fall through. */
+
+ case CONSTRUCTOR:
+ if (TREE_CODE (TREE_TYPE (exp)) == VECTOR_TYPE)
+ {
+ unsigned i;
+ tree val;
+
+ op0 = gen_rtx_CONCATN
+ (mode, rtvec_alloc (TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp))));
+
+ FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), i, val)
+ {
+ op1 = expand_debug_expr (val);
+ if (!op1)
+ return NULL;
+ XVECEXP (op0, 0, i) = op1;
+ }
+
+ if (i < TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp)))
+ {
+ op1 = expand_debug_expr
+ (fold_convert (TREE_TYPE (TREE_TYPE (exp)), integer_zero_node));
+
+ if (!op1)
+ return NULL;
+
+ for (; i < TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp)); i++)
+ XVECEXP (op0, 0, i) = op1;
+ }
+
+ return op0;
+ }
+ else
+ goto flag_unsupported;
+
+ case CALL_EXPR:
+ /* ??? Maybe handle some builtins? */
+ return NULL;
+
+ case SSA_NAME:
+ {
+ int part = var_to_partition (SA.map, exp);
+
+ if (part == NO_PARTITION)
+ return NULL;
+
+ gcc_assert (part >= 0 && (unsigned)part < SA.map->num_partitions);
+
+ op0 = SA.partition_to_pseudo[part];
+ goto adjust_mode;
+ }
+
+ case ERROR_MARK:
+ return NULL;
+
+ default:
+ flag_unsupported:
+#ifdef ENABLE_CHECKING
+ debug_tree (exp);
+ gcc_unreachable ();
+#else
+ return NULL;
+#endif
+ }
+}
+
+/* Expand the _LOCs in debug insns. We run this after expanding all
+ regular insns, so that any variables referenced in the function
+ will have their DECL_RTLs set. */
+
+static void
+expand_debug_locations (void)
+{
+ rtx insn;
+ rtx last = get_last_insn ();
+ int save_strict_alias = flag_strict_aliasing;
+
+ /* New alias sets while setting up memory attributes cause
+ -fcompare-debug failures, even though it doesn't bring about any
+ codegen changes. */
+ flag_strict_aliasing = 0;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (DEBUG_INSN_P (insn))
+ {
+ tree value = (tree)INSN_VAR_LOCATION_LOC (insn);
+ rtx val;
+ enum machine_mode mode;
+
+ if (value == NULL_TREE)
+ val = NULL_RTX;
+ else
+ {
+ val = expand_debug_expr (value);
+ gcc_assert (last == get_last_insn ());
+ }
+
+ if (!val)
+ val = gen_rtx_UNKNOWN_VAR_LOC ();
+ else
+ {
+ mode = GET_MODE (INSN_VAR_LOCATION (insn));
+
+ gcc_assert (mode == GET_MODE (val)
+ || (GET_MODE (val) == VOIDmode
+ && (CONST_INT_P (val)
+ || GET_CODE (val) == CONST_FIXED
+ || GET_CODE (val) == CONST_DOUBLE
+ || GET_CODE (val) == LABEL_REF)));
+ }
+
+ INSN_VAR_LOCATION_LOC (insn) = val;
+ }
+
+ flag_strict_aliasing = save_strict_alias;
+}
+
/* Expand basic block BB from GIMPLE trees to RTL. */
static basic_block
if (stmt)
{
- tree stmt_tree = gimple_to_tree (stmt);
- expand_expr_stmt (stmt_tree);
- release_stmt_tree (stmt, stmt_tree);
+ expand_gimple_stmt (stmt);
gsi_next (&gsi);
}
NOTE_BASIC_BLOCK (note) = bb;
- for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
- {
- /* Clear EDGE_EXECUTABLE. This flag is never used in the backend. */
- e->flags &= ~EDGE_EXECUTABLE;
-
- /* At the moment not all abnormal edges match the RTL representation.
- It is safe to remove them here as find_many_sub_basic_blocks will
- rediscover them. In the future we should get this fixed properly. */
- if (e->flags & EDGE_ABNORMAL)
- remove_edge (e);
- else
- ei_next (&ei);
- }
-
for (; !gsi_end_p (gsi); gsi_next (&gsi))
{
- gimple stmt = gsi_stmt (gsi);
basic_block new_bb;
+ stmt = gsi_stmt (gsi);
+ currently_expanding_gimple_stmt = stmt;
+
/* Expand this statement, then evaluate the resulting RTL and
fixup the CFG accordingly. */
if (gimple_code (stmt) == GIMPLE_COND)
if (new_bb)
return new_bb;
}
+ else if (gimple_debug_bind_p (stmt))
+ {
+ location_t sloc = get_curr_insn_source_location ();
+ tree sblock = get_curr_insn_block ();
+ gimple_stmt_iterator nsi = gsi;
+
+ for (;;)
+ {
+ tree var = gimple_debug_bind_get_var (stmt);
+ tree value;
+ rtx val;
+ enum machine_mode mode;
+
+ if (gimple_debug_bind_has_value_p (stmt))
+ value = gimple_debug_bind_get_value (stmt);
+ else
+ value = NULL_TREE;
+
+ last = get_last_insn ();
+
+ set_curr_insn_source_location (gimple_location (stmt));
+ set_curr_insn_block (gimple_block (stmt));
+
+ if (DECL_P (var))
+ mode = DECL_MODE (var);
+ else
+ mode = TYPE_MODE (TREE_TYPE (var));
+
+ val = gen_rtx_VAR_LOCATION
+ (mode, var, (rtx)value, VAR_INIT_STATUS_INITIALIZED);
+
+ val = emit_debug_insn (val);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ /* We can't dump the insn with a TREE where an RTX
+ is expected. */
+ INSN_VAR_LOCATION_LOC (val) = const0_rtx;
+ maybe_dump_rtl_for_gimple_stmt (stmt, last);
+ INSN_VAR_LOCATION_LOC (val) = (rtx)value;
+ }
+
+ gsi = nsi;
+ gsi_next (&nsi);
+ if (gsi_end_p (nsi))
+ break;
+ stmt = gsi_stmt (nsi);
+ if (!gimple_debug_bind_p (stmt))
+ break;
+ }
+
+ set_curr_insn_source_location (sloc);
+ set_curr_insn_block (sblock);
+ }
else
{
if (is_gimple_call (stmt) && gimple_call_tail_p (stmt))
return new_bb;
}
}
- else if (gimple_code (stmt) != GIMPLE_CHANGE_DYNAMIC_TYPE)
+ else
{
- tree stmt_tree = gimple_to_tree (stmt);
- last = get_last_insn ();
- expand_expr_stmt (stmt_tree);
+ def_operand_p def_p;
+ def_p = SINGLE_SSA_DEF_OPERAND (stmt, SSA_OP_DEF);
+
+ if (def_p != NULL)
+ {
+ /* Ignore this stmt if it is in the list of
+ replaceable expressions. */
+ if (SA.values
+ && bitmap_bit_p (SA.values,
+ SSA_NAME_VERSION (DEF_FROM_PTR (def_p))))
+ continue;
+ }
+ last = expand_gimple_stmt (stmt);
maybe_dump_rtl_for_gimple_stmt (stmt, last);
- release_stmt_tree (stmt, stmt_tree);
}
}
}
+ currently_expanding_gimple_stmt = NULL;
+
/* Expand implicit goto and convert goto_locus. */
FOR_EACH_EDGE (e, ei, bb->succs)
{
}
}
+ /* Expanded RTL can create a jump in the last instruction of block.
+ This later might be assumed to be a jump to successor and break edge insertion.
+ We need to insert dummy move to prevent this. PR41440. */
+ if (single_succ_p (bb)
+ && (single_succ_edge (bb)->flags & EDGE_FALLTHRU)
+ && (last = get_last_insn ())
+ && JUMP_P (last))
+ {
+ rtx dummy = gen_reg_rtx (SImode);
+ emit_insn_after_noloc (gen_move_insn (dummy, dummy), last, NULL);
+ }
+
do_pending_stack_adjust ();
/* Find the block tail. The last insn in the block is the insn
if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
{
t = get_base_address (t);
- if (t && DECL_P (t))
+ if (t && DECL_P (t)
+ && DECL_MODE (t) != BLKmode)
TREE_ADDRESSABLE (t) = 1;
}
|| crtl->has_nonlocal_goto)
crtl->need_drap = true;
- gcc_assert (crtl->stack_alignment_needed
- <= crtl->stack_alignment_estimated);
+ /* Call update_stack_boundary here again to update incoming stack
+ boundary. It may set incoming stack alignment to a different
+ value after RTL expansion. TARGET_FUNCTION_OK_FOR_SIBCALL may
+ use the minimum incoming stack alignment to check if it is OK
+ to perform sibcall optimization since sibcall optimization will
+ only align the outgoing stack to incoming stack boundary. */
+ if (targetm.calls.update_stack_boundary)
+ targetm.calls.update_stack_boundary ();
+
+ /* The incoming stack frame has to be aligned at least at
+ parm_stack_boundary. */
+ gcc_assert (crtl->parm_stack_boundary <= INCOMING_STACK_BOUNDARY);
/* Update crtl->stack_alignment_estimated and use it later to align
stack. We check PREFERRED_STACK_BOUNDARY if there may be non-call
if (preferred_stack_boundary > crtl->stack_alignment_needed)
crtl->stack_alignment_needed = preferred_stack_boundary;
+ gcc_assert (crtl->stack_alignment_needed
+ <= crtl->stack_alignment_estimated);
+
crtl->stack_realign_needed
= INCOMING_STACK_BOUNDARY < crtl->stack_alignment_estimated;
crtl->stack_realign_tried = crtl->stack_realign_needed;
sbitmap blocks;
edge_iterator ei;
edge e;
+ unsigned i;
+
+ rewrite_out_of_ssa (&SA);
+ SA.partition_to_pseudo = (rtx *)xcalloc (SA.map->num_partitions,
+ sizeof (rtx));
/* Some backends want to know that we are expanding to RTL. */
currently_expanding_to_rtl = 1;
rtl_profile_for_bb (ENTRY_BLOCK_PTR);
insn_locators_alloc ();
- if (!DECL_BUILT_IN (current_function_decl))
+ if (!DECL_IS_BUILTIN (current_function_decl))
{
/* Eventually, all FEs should explicitly set function_start_locus. */
if (cfun->function_start_locus == UNKNOWN_LOCATION)
targetm.expand_to_rtl_hook ();
crtl->stack_alignment_needed = STACK_BOUNDARY;
crtl->max_used_stack_slot_alignment = STACK_BOUNDARY;
- crtl->stack_alignment_estimated = STACK_BOUNDARY;
+ crtl->stack_alignment_estimated = 0;
crtl->preferred_stack_boundary = STACK_BOUNDARY;
cfun->cfg->max_jumptable_ents = 0;
/* Set up parameters and prepare for return, for the function. */
expand_function_start (current_function_decl);
+ /* Now that we also have the parameter RTXs, copy them over to our
+ partitions. */
+ for (i = 0; i < SA.map->num_partitions; i++)
+ {
+ tree var = SSA_NAME_VAR (partition_to_var (SA.map, i));
+
+ if (TREE_CODE (var) != VAR_DECL
+ && !SA.partition_to_pseudo[i])
+ SA.partition_to_pseudo[i] = DECL_RTL_IF_SET (var);
+ gcc_assert (SA.partition_to_pseudo[i]);
+
+ /* If this decl was marked as living in multiple places, reset
+ this now to NULL. */
+ if (DECL_RTL_IF_SET (var) == pc_rtx)
+ SET_DECL_RTL (var, NULL);
+
+ /* Some RTL parts really want to look at DECL_RTL(x) when x
+ was a decl marked in REG_ATTR or MEM_ATTR. We could use
+ SET_DECL_RTL here making this available, but that would mean
+ to select one of the potentially many RTLs for one DECL. Instead
+ of doing that we simply reset the MEM_EXPR of the RTL in question,
+ then nobody can get at it and hence nobody can call DECL_RTL on it. */
+ if (!DECL_RTL_SET_P (var))
+ {
+ if (MEM_P (SA.partition_to_pseudo[i]))
+ set_mem_expr (SA.partition_to_pseudo[i], NULL);
+ }
+ }
+
/* If this function is `main', emit a call to `__main'
to run global initializers, etc. */
if (DECL_NAME (current_function_decl)
if (crtl->stack_protect_guard)
stack_protect_prologue ();
- /* Update stack boundary if needed. */
- if (SUPPORTS_STACK_ALIGNMENT)
- {
- /* Call update_stack_boundary here to update incoming stack
- boundary before TARGET_FUNCTION_OK_FOR_SIBCALL is called.
- TARGET_FUNCTION_OK_FOR_SIBCALL needs to know the accurate
- incoming stack alignment to check if it is OK to perform
- sibcall optimization since sibcall optimization will only
- align the outgoing stack to incoming stack boundary. */
- if (targetm.calls.update_stack_boundary)
- targetm.calls.update_stack_boundary ();
-
- /* The incoming stack frame has to be aligned at least at
- parm_stack_boundary. */
- gcc_assert (crtl->parm_stack_boundary <= INCOMING_STACK_BOUNDARY);
- }
+ expand_phi_nodes (&SA);
/* Register rtl specific functions for cfg. */
rtl_register_cfg_hooks ();
init_block = construct_init_block ();
/* Clear EDGE_EXECUTABLE on the entry edge(s). It is cleaned from the
- remaining edges in expand_gimple_basic_block. */
+ remaining edges later. */
FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
e->flags &= ~EDGE_EXECUTABLE;
FOR_BB_BETWEEN (bb, init_block->next_bb, EXIT_BLOCK_PTR, next_bb)
bb = expand_gimple_basic_block (bb);
+ if (MAY_HAVE_DEBUG_INSNS)
+ expand_debug_locations ();
+
+ execute_free_datastructures ();
+ finish_out_of_ssa (&SA);
+
/* Expansion is used by optimization passes too, set maybe_hot_insn_p
conservatively to true until they are all profile aware. */
pointer_map_destroy (lab_rtx_for_bb);
set_curr_insn_block (DECL_INITIAL (current_function_decl));
insn_locators_finalize ();
- /* We're done expanding trees to RTL. */
- currently_expanding_to_rtl = 0;
-
- /* Convert tree EH labels to RTL EH labels and zap the tree EH table. */
- convert_from_eh_region_ranges ();
+ /* Zap the tree EH table. */
set_eh_throw_stmt_table (cfun, NULL);
rebuild_jump_labels (get_insns ());
- find_exception_handler_labels ();
+
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ {
+ edge e;
+ edge_iterator ei;
+ for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
+ {
+ if (e->insns.r)
+ commit_one_edge_insertion (e);
+ else
+ ei_next (&ei);
+ }
+ }
+
+ /* We're done expanding trees to RTL. */
+ currently_expanding_to_rtl = 0;
+
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, EXIT_BLOCK_PTR, next_bb)
+ {
+ edge e;
+ edge_iterator ei;
+ for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
+ {
+ /* Clear EDGE_EXECUTABLE. This flag is never used in the backend. */
+ e->flags &= ~EDGE_EXECUTABLE;
+
+ /* At the moment not all abnormal edges match the RTL
+ representation. It is safe to remove them here as
+ find_many_sub_basic_blocks will rediscover them.
+ In the future we should get this fixed properly. */
+ if ((e->flags & EDGE_ABNORMAL)
+ && !(e->flags & EDGE_SIBCALL))
+ remove_edge (e);
+ else
+ ei_next (&ei);
+ }
+ }
blocks = sbitmap_alloc (last_basic_block);
sbitmap_ones (blocks);
find_many_sub_basic_blocks (blocks);
- purge_all_dead_edges ();
sbitmap_free (blocks);
+ purge_all_dead_edges ();
compact_blocks ();
NULL, /* next */
0, /* static_pass_number */
TV_EXPAND, /* tv_id */
- /* ??? If TER is enabled, we actually receive GENERIC. */
- PROP_gimple_leh | PROP_cfg, /* properties_required */
+ PROP_ssa | PROP_gimple_leh | PROP_cfg,/* properties_required */
PROP_rtl, /* properties_provided */
- PROP_trees, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_dump_func, /* todo_flags_finish */
+ PROP_ssa | PROP_trees, /* properties_destroyed */
+ TODO_verify_ssa | TODO_verify_flow
+ | TODO_verify_stmts, /* todo_flags_start */
+ TODO_dump_func
+ | TODO_ggc_collect /* todo_flags_finish */
}
};