/* A pass for lowering trees to RTL.
- Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
This file is part of GCC.
#include "except.h"
#include "flags.h"
#include "diagnostic.h"
+#include "tree-pretty-print.h"
+#include "gimple-pretty-print.h"
#include "toplev.h"
#include "debug.h"
#include "params.h"
#include "value-prof.h"
#include "target.h"
#include "ssaexpand.h"
-
+#include "bitmap.h"
+#include "sbitmap.h"
+#include "insn-attr.h" /* For INSN_SCHEDULING. */
/* This variable holds information helping the rewriting of SSA trees
into RTL. */
grhs_class = get_gimple_rhs_class (gimple_expr_code (stmt));
- if (grhs_class == GIMPLE_BINARY_RHS)
+ if (grhs_class == GIMPLE_TERNARY_RHS)
+ t = build3 (gimple_assign_rhs_code (stmt),
+ TREE_TYPE (gimple_assign_lhs (stmt)),
+ gimple_assign_rhs1 (stmt),
+ gimple_assign_rhs2 (stmt),
+ gimple_assign_rhs3 (stmt));
+ else if (grhs_class == GIMPLE_BINARY_RHS)
t = build2 (gimple_assign_rhs_code (stmt),
TREE_TYPE (gimple_assign_lhs (stmt)),
gimple_assign_rhs1 (stmt),
{
t = gimple_assign_rhs1 (stmt);
/* Avoid modifying this tree in place below. */
- if (gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t)
- && gimple_location (stmt) != EXPR_LOCATION (t))
+ if ((gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t)
+ && gimple_location (stmt) != EXPR_LOCATION (t))
+ || (gimple_block (stmt)
+ && currently_expanding_to_rtl
+ && EXPR_P (t)
+ && gimple_block (stmt) != TREE_BLOCK (t)))
t = copy_node (t);
}
else
if (gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t))
SET_EXPR_LOCATION (t, gimple_location (stmt));
+ if (gimple_block (stmt) && currently_expanding_to_rtl && EXPR_P (t))
+ TREE_BLOCK (t) = gimple_block (stmt);
return t;
}
static unsigned int
get_decl_align_unit (tree decl)
{
- unsigned int align;
-
- align = LOCAL_DECL_ALIGNMENT (decl);
-
- if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
- align = MAX_SUPPORTED_STACK_ALIGNMENT;
-
- if (SUPPORTS_STACK_ALIGNMENT)
- {
- if (crtl->stack_alignment_estimated < align)
- {
- gcc_assert(!crtl->stack_realign_processed);
- crtl->stack_alignment_estimated = align;
- }
- }
-
- /* stack_alignment_needed > PREFERRED_STACK_BOUNDARY is permitted.
- So here we only make sure stack_alignment_needed >= align. */
- if (crtl->stack_alignment_needed < align)
- crtl->stack_alignment_needed = align;
- if (crtl->max_used_stack_slot_alignment < align)
- crtl->max_used_stack_slot_alignment = align;
-
+ unsigned int align = LOCAL_DECL_ALIGNMENT (decl);
return align / BITS_PER_UNIT;
}
Return the frame offset. */
static HOST_WIDE_INT
-alloc_stack_frame_space (HOST_WIDE_INT size, HOST_WIDE_INT align)
+alloc_stack_frame_space (HOST_WIDE_INT size, unsigned HOST_WIDE_INT align)
{
HOST_WIDE_INT offset, new_frame_offset;
static void
add_stack_var (tree decl)
{
+ struct stack_var *v;
+
if (stack_vars_num >= stack_vars_alloc)
{
if (stack_vars_alloc)
stack_vars
= XRESIZEVEC (struct stack_var, stack_vars, stack_vars_alloc);
}
- stack_vars[stack_vars_num].decl = decl;
- stack_vars[stack_vars_num].offset = 0;
- stack_vars[stack_vars_num].size = tree_low_cst (DECL_SIZE_UNIT (SSAVAR (decl)), 1);
- stack_vars[stack_vars_num].alignb = get_decl_align_unit (SSAVAR (decl));
+ v = &stack_vars[stack_vars_num];
+
+ v->decl = decl;
+ v->offset = 0;
+ v->size = tree_low_cst (DECL_SIZE_UNIT (SSAVAR (decl)), 1);
+ /* Ensure that all variables have size, so that &a != &b for any two
+ variables that are simultaneously live. */
+ if (v->size == 0)
+ v->size = 1;
+ v->alignb = get_decl_align_unit (SSAVAR (decl));
/* All variables are initially in their own partition. */
- stack_vars[stack_vars_num].representative = stack_vars_num;
- stack_vars[stack_vars_num].next = EOC;
+ v->representative = stack_vars_num;
+ v->next = EOC;
/* All variables initially conflict with no other. */
- stack_vars[stack_vars_num].conflicts = NULL;
+ v->conflicts = NULL;
/* Ensure that this decl doesn't get put onto the list twice. */
set_rtl (decl, pc_rtx);
if (TREE_CODE (type) != RECORD_TYPE)
return false;
- for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
if (TREE_CODE (field) == FIELD_DECL)
if (aggregate_contains_union_type (TREE_TYPE (field)))
return true;
}
/* A subroutine of partition_stack_vars. A comparison function for qsort,
- sorting an array of indices by the size and type of the object. */
+ sorting an array of indices by the properties of the object. */
static int
-stack_var_size_cmp (const void *a, const void *b)
+stack_var_cmp (const void *a, const void *b)
{
- HOST_WIDE_INT sa = stack_vars[*(const size_t *)a].size;
- HOST_WIDE_INT sb = stack_vars[*(const size_t *)b].size;
- tree decla, declb;
+ size_t ia = *(const size_t *)a;
+ size_t ib = *(const size_t *)b;
+ unsigned int aligna = stack_vars[ia].alignb;
+ unsigned int alignb = stack_vars[ib].alignb;
+ HOST_WIDE_INT sizea = stack_vars[ia].size;
+ HOST_WIDE_INT sizeb = stack_vars[ib].size;
+ tree decla = stack_vars[ia].decl;
+ tree declb = stack_vars[ib].decl;
+ bool largea, largeb;
unsigned int uida, uidb;
- if (sa < sb)
+ /* Primary compare on "large" alignment. Large comes first. */
+ largea = (aligna * BITS_PER_UNIT > MAX_SUPPORTED_STACK_ALIGNMENT);
+ largeb = (alignb * BITS_PER_UNIT > MAX_SUPPORTED_STACK_ALIGNMENT);
+ if (largea != largeb)
+ return (int)largeb - (int)largea;
+
+ /* Secondary compare on size, decreasing */
+ if (sizea < sizeb)
+ return -1;
+ if (sizea > sizeb)
+ return 1;
+
+ /* Tertiary compare on true alignment, decreasing. */
+ if (aligna < alignb)
return -1;
- if (sa > sb)
+ if (aligna > alignb)
return 1;
- decla = stack_vars[*(const size_t *)a].decl;
- declb = stack_vars[*(const size_t *)b].decl;
- /* For stack variables of the same size use and id of the decls
- to make the sort stable. Two SSA names are compared by their
- version, SSA names come before non-SSA names, and two normal
- decls are compared by their DECL_UID. */
+
+ /* Final compare on ID for sort stability, increasing.
+ Two SSA names are compared by their version, SSA names come before
+ non-SSA names, and two normal decls are compared by their DECL_UID. */
if (TREE_CODE (decla) == SSA_NAME)
{
if (TREE_CODE (declb) == SSA_NAME)
else
uida = DECL_UID (decla), uidb = DECL_UID (declb);
if (uida < uidb)
- return -1;
- if (uida > uidb)
return 1;
+ if (uida > uidb)
+ return -1;
return 0;
}
for (j = i; j != EOC; j = stack_vars[j].next)
{
tree decl = stack_vars[j].decl;
- unsigned int uid = DECL_UID (decl);
+ unsigned int uid = DECL_PT_UID (decl);
/* We should never end up partitioning SSA names (though they
may end up on the stack). Neither should we allocate stack
- space to something that is unused and thus unreferenced. */
+ space to something that is unused and thus unreferenced, except
+ for -O0 where we are preserving even unreferenced variables. */
gcc_assert (DECL_P (decl)
- && referenced_var_lookup (uid));
+ && (!optimize
+ || referenced_var_lookup (DECL_UID (decl))));
bitmap_set_bit (part, uid);
*((bitmap *) pointer_map_insert (decls_to_partitions,
(void *)(size_t) uid)) = part;
/* Make the SSA name point to all partition members. */
pi = get_ptr_info (name);
- pt_solution_set (&pi->pt, part);
+ pt_solution_set (&pi->pt, part, false, false);
}
/* Make all points-to sets that contain one member of a partition
add_partitioned_vars_to_ptset (&cfun->gimple_df->escaped,
decls_to_partitions, visited, temp);
- add_partitioned_vars_to_ptset (&cfun->gimple_df->callused,
- decls_to_partitions, visited, temp);
pointer_set_destroy (visited);
pointer_map_destroy (decls_to_partitions);
if (n == 1)
return;
- qsort (stack_vars_sorted, n, sizeof (size_t), stack_var_size_cmp);
+ qsort (stack_vars_sorted, n, sizeof (size_t), stack_var_cmp);
for (si = 0; si < n; ++si)
{
size_t i = stack_vars_sorted[si];
HOST_WIDE_INT isize = stack_vars[i].size;
+ unsigned int ialign = stack_vars[i].alignb;
HOST_WIDE_INT offset = 0;
for (sj = si; sj-- > 0; )
if (stack_var_conflict_p (i, j))
continue;
+ /* Do not mix objects of "small" (supported) alignment
+ and "large" (unsupported) alignment. */
+ if ((ialign * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)
+ != (jalign * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT))
+ continue;
+
/* Refine the remaining space check to include alignment. */
if (offset & (jalign - 1))
{
}
}
- if (optimize)
- update_alias_info_with_stack_vars ();
+ update_alias_info_with_stack_vars ();
}
/* A debugging aid for expand_used_vars. Dump the generated partitions. */
}
}
-/* Assign rtl to DECL at frame offset OFFSET. */
+/* Assign rtl to DECL at BASE + OFFSET. */
static void
-expand_one_stack_var_at (tree decl, HOST_WIDE_INT offset)
+expand_one_stack_var_at (tree decl, rtx base, unsigned base_align,
+ HOST_WIDE_INT offset)
{
- /* Alignment is unsigned. */
- unsigned HOST_WIDE_INT align;
+ unsigned align;
rtx x;
/* If this fails, we've overflowed the stack frame. Error nicely? */
gcc_assert (offset == trunc_int_for_mode (offset, Pmode));
- x = plus_constant (virtual_stack_vars_rtx, offset);
+ x = plus_constant (base, offset);
x = gen_rtx_MEM (DECL_MODE (SSAVAR (decl)), x);
if (TREE_CODE (decl) != SSA_NAME)
/* Set alignment we actually gave this decl if it isn't an SSA name.
If it is we generate stack slots only accidentally so it isn't as
important, we'll simply use the alignment that is already set. */
- offset -= frame_phase;
+ if (base == virtual_stack_vars_rtx)
+ offset -= frame_phase;
align = offset & -offset;
align *= BITS_PER_UNIT;
- if (align == 0)
- align = STACK_BOUNDARY;
- else if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
- align = MAX_SUPPORTED_STACK_ALIGNMENT;
+ if (align == 0 || align > base_align)
+ align = base_align;
+
+ /* One would think that we could assert that we're not decreasing
+ alignment here, but (at least) the i386 port does exactly this
+ via the MINIMUM_ALIGNMENT hook. */
DECL_ALIGN (decl) = align;
DECL_USER_ALIGN (decl) = 0;
expand_stack_vars (bool (*pred) (tree))
{
size_t si, i, j, n = stack_vars_num;
+ HOST_WIDE_INT large_size = 0, large_alloc = 0;
+ rtx large_base = NULL;
+ unsigned large_align = 0;
+ tree decl;
+
+ /* Determine if there are any variables requiring "large" alignment.
+ Since these are dynamically allocated, we only process these if
+ no predicate involved. */
+ large_align = stack_vars[stack_vars_sorted[0]].alignb * BITS_PER_UNIT;
+ if (pred == NULL && large_align > MAX_SUPPORTED_STACK_ALIGNMENT)
+ {
+ /* Find the total size of these variables. */
+ for (si = 0; si < n; ++si)
+ {
+ unsigned alignb;
+
+ i = stack_vars_sorted[si];
+ alignb = stack_vars[i].alignb;
+
+ /* Stop when we get to the first decl with "small" alignment. */
+ if (alignb * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)
+ break;
+
+ /* Skip variables that aren't partition representatives. */
+ if (stack_vars[i].representative != i)
+ continue;
+
+ /* Skip variables that have already had rtl assigned. See also
+ add_stack_var where we perpetrate this pc_rtx hack. */
+ decl = stack_vars[i].decl;
+ if ((TREE_CODE (decl) == SSA_NAME
+ ? SA.partition_to_pseudo[var_to_partition (SA.map, decl)]
+ : DECL_RTL (decl)) != pc_rtx)
+ continue;
+
+ large_size += alignb - 1;
+ large_size &= -(HOST_WIDE_INT)alignb;
+ large_size += stack_vars[i].size;
+ }
+
+ /* If there were any, allocate space. */
+ if (large_size > 0)
+ large_base = allocate_dynamic_stack_space (GEN_INT (large_size), 0,
+ large_align, true);
+ }
for (si = 0; si < n; ++si)
{
+ rtx base;
+ unsigned base_align, alignb;
HOST_WIDE_INT offset;
i = stack_vars_sorted[si];
/* Skip variables that have already had rtl assigned. See also
add_stack_var where we perpetrate this pc_rtx hack. */
- if ((TREE_CODE (stack_vars[i].decl) == SSA_NAME
- ? SA.partition_to_pseudo[var_to_partition (SA.map, stack_vars[i].decl)]
- : DECL_RTL (stack_vars[i].decl)) != pc_rtx)
+ decl = stack_vars[i].decl;
+ if ((TREE_CODE (decl) == SSA_NAME
+ ? SA.partition_to_pseudo[var_to_partition (SA.map, decl)]
+ : DECL_RTL (decl)) != pc_rtx)
continue;
/* Check the predicate to see whether this variable should be
allocated in this pass. */
- if (pred && !pred (stack_vars[i].decl))
+ if (pred && !pred (decl))
continue;
- offset = alloc_stack_frame_space (stack_vars[i].size,
- stack_vars[i].alignb);
+ alignb = stack_vars[i].alignb;
+ if (alignb * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)
+ {
+ offset = alloc_stack_frame_space (stack_vars[i].size, alignb);
+ base = virtual_stack_vars_rtx;
+ base_align = crtl->max_used_stack_slot_alignment;
+ }
+ else
+ {
+ /* Large alignment is only processed in the last pass. */
+ if (pred)
+ continue;
+ gcc_assert (large_base != NULL);
+
+ large_alloc += alignb - 1;
+ large_alloc &= -(HOST_WIDE_INT)alignb;
+ offset = large_alloc;
+ large_alloc += stack_vars[i].size;
+
+ base = large_base;
+ base_align = large_align;
+ }
/* Create rtl for each variable based on their location within the
partition. */
{
gcc_assert (stack_vars[j].offset <= stack_vars[i].size);
expand_one_stack_var_at (stack_vars[j].decl,
+ base, base_align,
stack_vars[j].offset + offset);
}
}
+
+ gcc_assert (large_alloc == large_size);
}
/* Take into account all sizes of partitions and reset DECL_RTLs. */
static void
expand_one_stack_var (tree var)
{
- HOST_WIDE_INT size, offset, align;
+ HOST_WIDE_INT size, offset;
+ unsigned byte_align;
size = tree_low_cst (DECL_SIZE_UNIT (SSAVAR (var)), 1);
- align = get_decl_align_unit (SSAVAR (var));
- offset = alloc_stack_frame_space (size, align);
+ byte_align = get_decl_align_unit (SSAVAR (var));
+
+ /* We handle highly aligned variables in expand_stack_vars. */
+ gcc_assert (byte_align * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT);
- expand_one_stack_var_at (var, offset);
+ offset = alloc_stack_frame_space (size, byte_align);
+
+ expand_one_stack_var_at (var, virtual_stack_vars_rtx,
+ crtl->max_used_stack_slot_alignment, offset);
}
/* A subroutine of expand_one_var. Called to assign rtl to a VAR_DECL
if (flag_stack_protect)
return true;
+ /* We handle "large" alignment via dynamic allocation. We want to handle
+ this extra complication in only one place, so defer them. */
+ if (DECL_ALIGN (var) > MAX_SUPPORTED_STACK_ALIGNMENT)
+ return true;
+
/* Variables in the outermost scope automatically conflict with
every other variable. The only reason to want to defer them
at all is that, after sorting, we can more efficiently pack
static HOST_WIDE_INT
expand_one_var (tree var, bool toplevel, bool really_expand)
{
+ unsigned int align = BITS_PER_UNIT;
tree origvar = var;
+
var = SSAVAR (var);
- if (SUPPORTS_STACK_ALIGNMENT
- && TREE_TYPE (var) != error_mark_node
- && TREE_CODE (var) == VAR_DECL)
+ if (TREE_TYPE (var) != error_mark_node && TREE_CODE (var) == VAR_DECL)
{
- unsigned int align;
-
/* Because we don't know if VAR will be in register or on stack,
we conservatively assume it will be on stack even if VAR is
eventually put into register after RA pass. For non-automatic
align = MINIMUM_ALIGNMENT (TREE_TYPE (var),
TYPE_MODE (TREE_TYPE (var)),
TYPE_ALIGN (TREE_TYPE (var)));
+ else if (DECL_HAS_VALUE_EXPR_P (var)
+ || (DECL_RTL_SET_P (var) && MEM_P (DECL_RTL (var))))
+ /* Don't consider debug only variables with DECL_HAS_VALUE_EXPR_P set
+ or variables which were assigned a stack slot already by
+ expand_one_stack_var_at - in the latter case DECL_ALIGN has been
+ changed from the offset chosen to it. */
+ align = crtl->stack_alignment_estimated;
else
align = MINIMUM_ALIGNMENT (var, DECL_MODE (var), DECL_ALIGN (var));
- if (crtl->stack_alignment_estimated < align)
- {
- /* stack_alignment_estimated shouldn't change after stack
- realign decision made */
- gcc_assert(!crtl->stack_realign_processed);
- crtl->stack_alignment_estimated = align;
- }
+ /* If the variable alignment is very large we'll dynamicaly allocate
+ it, which means that in-frame portion is just a pointer. */
+ if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
+ align = POINTER_SIZE;
+ }
+
+ if (SUPPORTS_STACK_ALIGNMENT
+ && crtl->stack_alignment_estimated < align)
+ {
+ /* stack_alignment_estimated shouldn't change after stack
+ realign decision made */
+ gcc_assert(!crtl->stack_realign_processed);
+ crtl->stack_alignment_estimated = align;
}
+ /* stack_alignment_needed > PREFERRED_STACK_BOUNDARY is permitted.
+ So here we only make sure stack_alignment_needed >= align. */
+ if (crtl->stack_alignment_needed < align)
+ crtl->stack_alignment_needed = align;
+ if (crtl->max_used_stack_slot_alignment < align)
+ crtl->max_used_stack_slot_alignment = align;
+
if (TREE_CODE (origvar) == SSA_NAME)
{
gcc_assert (TREE_CODE (var) != VAR_DECL
old_sv_num = toplevel ? 0 : stack_vars_num;
/* Expand all variables at this level. */
- for (t = BLOCK_VARS (block); t ; t = TREE_CHAIN (t))
+ for (t = BLOCK_VARS (block); t ; t = DECL_CHAIN (t))
if (TREE_USED (t))
expand_one_var (t, toplevel, true);
{
tree t;
- for (t = BLOCK_VARS (block); t ; t = TREE_CHAIN (t))
+ for (t = BLOCK_VARS (block); t ; t = DECL_CHAIN (t))
/* if (!TREE_STATIC (t) && !DECL_EXTERNAL (t)) */
TREE_USED (t) = 0;
HOST_WIDE_INT size = 0;
/* Expand all variables at this level. */
- for (t = BLOCK_VARS (block); t ; t = TREE_CHAIN (t))
+ for (t = BLOCK_VARS (block); t ; t = DECL_CHAIN (t))
if (TREE_USED (t))
size += expand_one_var (t, toplevel, false);
init_vars_expansion (void)
{
tree t;
+ unsigned ix;
/* Set TREE_USED on all variables in the local_decls. */
- for (t = cfun->local_decls; t; t = TREE_CHAIN (t))
- TREE_USED (TREE_VALUE (t)) = 1;
+ FOR_EACH_LOCAL_DECL (cfun, ix, t)
+ TREE_USED (t) = 1;
/* Clear TREE_USED on all variables associated with a block scope. */
clear_tree_used (DECL_INITIAL (current_function_decl));
stack_vars_alloc = stack_vars_num = 0;
}
-/* Make a fair guess for the size of the stack frame of the current
- function. This doesn't have to be exact, the result is only used
+/* Make a fair guess for the size of the stack frame of the decl
+ passed. This doesn't have to be exact, the result is only used
in the inline heuristics. So we don't want to run the full stack
var packing algorithm (which is quadratic in the number of stack
vars). Instead, we calculate the total size of all stack vars.
vars doesn't happen very often. */
HOST_WIDE_INT
-estimated_stack_frame_size (void)
+estimated_stack_frame_size (tree decl)
{
HOST_WIDE_INT size = 0;
size_t i;
- tree t, outer_block = DECL_INITIAL (current_function_decl);
+ tree var, outer_block = DECL_INITIAL (current_function_decl);
+ unsigned ix;
+ tree old_cur_fun_decl = current_function_decl;
+ current_function_decl = decl;
+ push_cfun (DECL_STRUCT_FUNCTION (decl));
init_vars_expansion ();
- for (t = cfun->local_decls; t; t = TREE_CHAIN (t))
+ FOR_EACH_LOCAL_DECL (cfun, ix, var)
{
- tree var = TREE_VALUE (t);
-
if (TREE_USED (var))
size += expand_one_var (var, true, false);
TREE_USED (var) = 1;
size += account_stack_vars ();
fini_vars_expansion ();
}
-
+ pop_cfun ();
+ current_function_decl = old_cur_fun_decl;
return size;
}
static void
expand_used_vars (void)
{
- tree t, next, outer_block = DECL_INITIAL (current_function_decl);
- tree maybe_local_decls = NULL_TREE;
+ tree var, outer_block = DECL_INITIAL (current_function_decl);
+ VEC(tree,heap) *maybe_local_decls = NULL;
unsigned i;
+ unsigned len;
/* Compute the phase of the stack frame for this function. */
{
/* At this point all variables on the local_decls with TREE_USED
set are not associated with any block scope. Lay them out. */
- t = cfun->local_decls;
- cfun->local_decls = NULL_TREE;
- for (; t; t = next)
+
+ len = VEC_length (tree, cfun->local_decls);
+ FOR_EACH_LOCAL_DECL (cfun, i, var)
{
- tree var = TREE_VALUE (t);
bool expand_now = false;
- next = TREE_CHAIN (t);
-
/* Expanded above already. */
if (is_gimple_reg (var))
{
/* Keep artificial non-ignored vars in cfun->local_decls
chain until instantiate_decls. */
if (rtl && (MEM_P (rtl) || GET_CODE (rtl) == CONCAT))
- {
- TREE_CHAIN (t) = cfun->local_decls;
- cfun->local_decls = t;
- continue;
- }
+ add_local_decl (cfun, var);
else if (rtl == NULL_RTX)
- {
- /* If rtl isn't set yet, which can happen e.g. with
- -fstack-protector, retry before returning from this
- function. */
- TREE_CHAIN (t) = maybe_local_decls;
- maybe_local_decls = t;
- continue;
- }
+ /* If rtl isn't set yet, which can happen e.g. with
+ -fstack-protector, retry before returning from this
+ function. */
+ VEC_safe_push (tree, heap, maybe_local_decls, var);
}
-
- ggc_free (t);
}
+ /* We duplicated some of the decls in CFUN->LOCAL_DECLS.
+
+ +-----------------+-----------------+
+ | ...processed... | ...duplicates...|
+ +-----------------+-----------------+
+ ^
+ +-- LEN points here.
+
+ We just want the duplicates, as those are the artificial
+ non-ignored vars that we want to keep until instantiate_decls.
+ Move them down and truncate the array. */
+ if (!VEC_empty (tree, cfun->local_decls))
+ VEC_block_remove (tree, cfun->local_decls, 0, len);
+
/* At this point, all variables within the block tree with TREE_USED
set are actually used by the optimized function. Lay them out. */
expand_used_vars_for_block (outer_block, true);
/* If there were any artificial non-ignored vars without rtl
found earlier, see if deferred stack allocation hasn't assigned
rtl to them. */
- for (t = maybe_local_decls; t; t = next)
+ FOR_EACH_VEC_ELT_REVERSE (tree, maybe_local_decls, i, var)
{
- tree var = TREE_VALUE (t);
rtx rtl = DECL_RTL_IF_SET (var);
- next = TREE_CHAIN (t);
-
/* Keep artificial non-ignored vars in cfun->local_decls
chain until instantiate_decls. */
if (rtl && (MEM_P (rtl) || GET_CODE (rtl) == CONCAT))
- {
- TREE_CHAIN (t) = cfun->local_decls;
- cfun->local_decls = t;
- continue;
- }
-
- ggc_free (t);
+ add_local_decl (cfun, var);
}
+ VEC_free (tree, heap, maybe_local_decls);
/* If the target requires that FRAME_OFFSET be aligned, do it. */
if (STACK_ALIGNMENT_NEEDED)
{
insn = PREV_INSN (insn);
if (JUMP_P (NEXT_INSN (insn)))
- delete_insn (NEXT_INSN (insn));
+ {
+ if (!any_condjump_p (NEXT_INSN (insn)))
+ {
+ gcc_assert (BARRIER_P (NEXT_INSN (NEXT_INSN (insn))));
+ delete_insn (NEXT_INSN (NEXT_INSN (insn)));
+ }
+ delete_insn (NEXT_INSN (insn));
+ }
}
}
}
ops.type = TREE_TYPE (lhs);
switch (get_gimple_rhs_class (gimple_expr_code (stmt)))
{
+ case GIMPLE_TERNARY_RHS:
+ ops.op2 = gimple_assign_rhs3 (stmt);
+ /* Fallthru */
case GIMPLE_BINARY_RHS:
ops.op1 = gimple_assign_rhs2 (stmt);
/* Fallthru */
enum machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
int unsignedp = TYPE_UNSIGNED (TREE_TYPE (exp));
addr_space_t as;
- enum machine_mode address_mode;
switch (TREE_CODE_CLASS (TREE_CODE (exp)))
{
{
case COND_EXPR:
case DOT_PROD_EXPR:
+ case WIDEN_MULT_PLUS_EXPR:
+ case WIDEN_MULT_MINUS_EXPR:
+ case FMA_EXPR:
goto ternary;
case TRUTH_ANDIF_EXPR:
/* If op0 is not BLKmode, but BLKmode is, adjust_mode
below would ICE. While it is likely a FE bug,
try to be robust here. See PR43166. */
- || mode == BLKmode)
+ || mode == BLKmode
+ || (mode == VOIDmode && GET_MODE (op0) != VOIDmode))
{
gcc_assert (MEM_P (op0));
op0 = adjust_address_nv (op0, mode, 0);
op0 = simplify_gen_subreg (mode, op0, inner_mode,
subreg_lowpart_offset (mode,
inner_mode));
- else if (unsignedp)
+ else if (TREE_CODE_CLASS (TREE_CODE (exp)) == tcc_unary
+ ? TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0)))
+ : unsignedp)
op0 = gen_rtx_ZERO_EXTEND (mode, op0);
else
op0 = gen_rtx_SIGN_EXTEND (mode, op0);
return op0;
}
+ case MEM_REF:
case INDIRECT_REF:
- case ALIGN_INDIRECT_REF:
- case MISALIGNED_INDIRECT_REF:
op0 = expand_debug_expr (TREE_OPERAND (exp, 0));
if (!op0)
return NULL;
- if (POINTER_TYPE_P (TREE_TYPE (exp)))
- {
- as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp)));
- address_mode = targetm.addr_space.address_mode (as);
- }
- else
+ if (TREE_CODE (exp) == MEM_REF)
{
- as = ADDR_SPACE_GENERIC;
- address_mode = Pmode;
- }
+ op1 = expand_debug_expr (TREE_OPERAND (exp, 1));
+ if (!op1 || !CONST_INT_P (op1))
+ return NULL;
- if (TREE_CODE (exp) == ALIGN_INDIRECT_REF)
- {
- int align = TYPE_ALIGN_UNIT (TREE_TYPE (exp));
- op0 = gen_rtx_AND (address_mode, op0, GEN_INT (-align));
+ op0 = plus_constant (op0, INTVAL (op1));
}
+ if (POINTER_TYPE_P (TREE_TYPE (exp)))
+ as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp)));
+ else
+ as = ADDR_SPACE_GENERIC;
+
op0 = gen_rtx_MEM (mode, op0);
set_mem_attributes (op0, exp, 0);
return op0;
case TARGET_MEM_REF:
- if (TMR_SYMBOL (exp) && !DECL_RTL_SET_P (TMR_SYMBOL (exp)))
+ if (TREE_CODE (TMR_BASE (exp)) == ADDR_EXPR
+ && !DECL_RTL_SET_P (TREE_OPERAND (TMR_BASE (exp), 0)))
return NULL;
op0 = expand_debug_expr
{
enum machine_mode addrmode, offmode;
- gcc_assert (MEM_P (op0));
+ if (!MEM_P (op0))
+ return NULL;
op0 = XEXP (op0, 0);
addrmode = GET_MODE (op0);
if (bitpos < 0)
return NULL;
+ if (GET_MODE (op0) == BLKmode)
+ return NULL;
+
if ((bitpos % BITS_PER_UNIT) == 0
&& bitsize == GET_MODE_BITSIZE (mode1))
{
enum machine_mode opmode = GET_MODE (op0);
- gcc_assert (opmode != BLKmode);
-
if (opmode == VOIDmode)
- opmode = mode1;
+ opmode = TYPE_MODE (TREE_TYPE (tem));
/* This condition may hold if we're expanding the address
right past the end of an array that turned out not to
? SIGN_EXTRACT
: ZERO_EXTRACT, mode,
GET_MODE (op0) != VOIDmode
- ? GET_MODE (op0) : mode1,
+ ? GET_MODE (op0)
+ : TYPE_MODE (TREE_TYPE (tem)),
op0, GEN_INT (bitsize), GEN_INT (bitpos));
}
return gen_rtx_FIX (mode, op0);
case POINTER_PLUS_EXPR:
+ /* For the rare target where pointers are not the same size as
+ size_t, we need to check for mis-matched modes and correct
+ the addend. */
+ if (op0 && op1
+ && GET_MODE (op0) != VOIDmode && GET_MODE (op1) != VOIDmode
+ && GET_MODE (op0) != GET_MODE (op1))
+ {
+ if (GET_MODE_BITSIZE (GET_MODE (op0)) < GET_MODE_BITSIZE (GET_MODE (op1)))
+ op1 = gen_rtx_TRUNCATE (GET_MODE (op0), op1);
+ else
+ /* We always sign-extend, regardless of the signedness of
+ the operand, because the operand is always unsigned
+ here even if the original C expression is signed. */
+ op1 = gen_rtx_SIGN_EXTEND (GET_MODE (op0), op1);
+ }
+ /* Fall through. */
case PLUS_EXPR:
return gen_rtx_PLUS (mode, op0, op1);
case ADDR_EXPR:
op0 = expand_debug_expr (TREE_OPERAND (exp, 0));
if (!op0 || !MEM_P (op0))
- return NULL;
+ {
+ if ((TREE_CODE (TREE_OPERAND (exp, 0)) == VAR_DECL
+ || TREE_CODE (TREE_OPERAND (exp, 0)) == PARM_DECL
+ || TREE_CODE (TREE_OPERAND (exp, 0)) == RESULT_DECL)
+ && !TREE_ADDRESSABLE (TREE_OPERAND (exp, 0)))
+ return gen_rtx_DEBUG_IMPLICIT_PTR (mode, TREE_OPERAND (exp, 0));
+
+ if (handled_component_p (TREE_OPERAND (exp, 0)))
+ {
+ HOST_WIDE_INT bitoffset, bitsize, maxsize;
+ tree decl
+ = get_ref_base_and_extent (TREE_OPERAND (exp, 0),
+ &bitoffset, &bitsize, &maxsize);
+ if ((TREE_CODE (decl) == VAR_DECL
+ || TREE_CODE (decl) == PARM_DECL
+ || TREE_CODE (decl) == RESULT_DECL)
+ && !TREE_ADDRESSABLE (decl)
+ && (bitoffset % BITS_PER_UNIT) == 0
+ && bitsize > 0
+ && bitsize == maxsize)
+ return plus_constant (gen_rtx_DEBUG_IMPLICIT_PTR (mode, decl),
+ bitoffset / BITS_PER_UNIT);
+ }
+
+ return NULL;
+ }
op0 = convert_debug_memory_address (mode, XEXP (op0, 0));
if (i < TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp)))
{
op1 = expand_debug_expr
- (fold_convert (TREE_TYPE (TREE_TYPE (exp)), integer_zero_node));
+ (build_zero_cst (TREE_TYPE (TREE_TYPE (exp))));
if (!op1)
return NULL;
return NULL;
case WIDEN_MULT_EXPR:
+ case WIDEN_MULT_PLUS_EXPR:
+ case WIDEN_MULT_MINUS_EXPR:
if (SCALAR_INT_MODE_P (GET_MODE (op0))
&& SCALAR_INT_MODE_P (mode))
{
+ enum machine_mode inner_mode = GET_MODE (op0);
if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))))
- op0 = gen_rtx_ZERO_EXTEND (mode, op0);
+ op0 = simplify_gen_unary (ZERO_EXTEND, mode, op0, inner_mode);
else
- op0 = gen_rtx_SIGN_EXTEND (mode, op0);
+ op0 = simplify_gen_unary (SIGN_EXTEND, mode, op0, inner_mode);
if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 1))))
- op1 = gen_rtx_ZERO_EXTEND (mode, op1);
+ op1 = simplify_gen_unary (ZERO_EXTEND, mode, op1, inner_mode);
else
- op1 = gen_rtx_SIGN_EXTEND (mode, op1);
- return gen_rtx_MULT (mode, op0, op1);
+ op1 = simplify_gen_unary (SIGN_EXTEND, mode, op1, inner_mode);
+ op0 = gen_rtx_MULT (mode, op0, op1);
+ if (TREE_CODE (exp) == WIDEN_MULT_EXPR)
+ return op0;
+ else if (TREE_CODE (exp) == WIDEN_MULT_PLUS_EXPR)
+ return gen_rtx_PLUS (mode, op0, op2);
+ else
+ return gen_rtx_MINUS (mode, op2, op0);
}
return NULL;
}
return NULL;
+ case FMA_EXPR:
+ return gen_rtx_FMA (mode, op0, op1, op2);
+
default:
flag_unsupported:
#ifdef ENABLE_CHECKING
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple stmt = gsi_stmt (gsi);
- walk_gimple_op (stmt, discover_nonconstant_array_refs_r, NULL);
+ if (!is_gimple_debug (stmt))
+ walk_gimple_op (stmt, discover_nonconstant_array_refs_r, NULL);
}
}
stack. We check PREFERRED_STACK_BOUNDARY if there may be non-call
exceptions since callgraph doesn't collect incoming stack alignment
in this case. */
- if (flag_non_call_exceptions
+ if (cfun->can_throw_non_call_exceptions
&& PREFERRED_STACK_BOUNDARY > crtl->preferred_stack_boundary)
preferred_stack_boundary = PREFERRED_STACK_BOUNDARY;
else
sbitmap blocks;
edge_iterator ei;
edge e;
+ rtx var_seq;
unsigned i;
+ timevar_push (TV_OUT_OF_SSA);
rewrite_out_of_ssa (&SA);
+ timevar_pop (TV_OUT_OF_SSA);
SA.partition_to_pseudo = (rtx *)xcalloc (SA.map->num_partitions,
sizeof (rtx));
else
set_curr_insn_source_location (cfun->function_start_locus);
}
+ else
+ set_curr_insn_source_location (UNKNOWN_LOCATION);
set_curr_insn_block (DECL_INITIAL (current_function_decl));
prologue_locator = curr_insn_locator ();
+#ifdef INSN_SCHEDULING
+ init_sched_attrs ();
+#endif
+
/* Make sure first insn is a note even if we don't want linenums.
This makes sure the first insn will never be deleted.
Also, final expects a note to appear there. */
crtl->preferred_stack_boundary = STACK_BOUNDARY;
cfun->cfg->max_jumptable_ents = 0;
+ /* Resovle the function section. Some targets, like ARM EABI rely on knowledge
+ of the function section at exapnsion time to predict distance of calls. */
+ resolve_unique_section (current_function_decl, 0, flag_function_sections);
/* Expand the variables recorded during gimple lowering. */
+ timevar_push (TV_VAR_EXPAND);
+ start_sequence ();
+
expand_used_vars ();
+ var_seq = get_insns ();
+ end_sequence ();
+ timevar_pop (TV_VAR_EXPAND);
+
/* Honor stack protection warnings. */
if (warn_stack_protect)
{
if (cfun->calls_alloca)
warning (OPT_Wstack_protector,
- "not protecting local variables: variable length buffer");
+ "stack protector not protecting local variables: "
+ "variable length buffer");
if (has_short_buffer && !crtl->stack_protect_guard)
warning (OPT_Wstack_protector,
- "not protecting function: no buffer at least %d bytes long",
+ "stack protector not protecting function: "
+ "all local arrays are less than %d bytes long",
(int) PARAM_VALUE (PARAM_SSP_BUFFER_SIZE));
}
/* Set up parameters and prepare for return, for the function. */
expand_function_start (current_function_decl);
+ /* If we emitted any instructions for setting up the variables,
+ emit them before the FUNCTION_START note. */
+ if (var_seq)
+ {
+ emit_insn_before (var_seq, parm_birth_insn);
+
+ /* In expand_function_end we'll insert the alloca save/restore
+ before parm_birth_insn. We've just insertted an alloca call.
+ Adjust the pointer to match. */
+ parm_birth_insn = var_seq;
+ }
+
/* Now that we also have the parameter RTXs, copy them over to our
partitions. */
for (i = 0; i < SA.map->num_partitions; i++)
expand_debug_locations ();
execute_free_datastructures ();
+ timevar_push (TV_OUT_OF_SSA);
finish_out_of_ssa (&SA);
+ timevar_pop (TV_OUT_OF_SSA);
+ timevar_push (TV_POST_EXPAND);
/* We are no longer in SSA form. */
cfun->gimple_df->in_ssa_p = false;
for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
{
if (e->insns.r)
- commit_one_edge_insertion (e);
+ {
+ /* Avoid putting insns before parm_birth_insn. */
+ if (e->src == ENTRY_BLOCK_PTR
+ && single_succ_p (ENTRY_BLOCK_PTR)
+ && parm_birth_insn)
+ {
+ rtx insns = e->insns.r;
+ e->insns.r = NULL_RTX;
+ emit_insn_after_noloc (insns, parm_birth_insn, e->dest);
+ }
+ else
+ commit_one_edge_insertion (e);
+ }
else
ei_next (&ei);
}
the common parent easily. */
set_block_levels (DECL_INITIAL (cfun->decl), 0);
default_rtl_profile ();
+ timevar_pop (TV_POST_EXPAND);
return 0;
}