/* Expands front end tree to back end RTL for GCC.
Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
- 2010 Free Software Foundation, Inc.
+ 2010, 2011 Free Software Foundation, Inc.
This file is part of GCC.
prologue_insn_hash = NULL;
epilogue_insn_hash = NULL;
- if (crtl->emit.regno_pointer_align)
- free (crtl->emit.regno_pointer_align);
+ free (crtl->emit.regno_pointer_align);
memset (crtl, 0, sizeof (struct rtl_data));
f->eh = NULL;
-2 means use BITS_PER_UNIT,
positive specifies alignment boundary in bits.
- If REDUCE_ALIGNMENT_OK is true, it is OK to reduce alignment.
+ KIND has ASLK_REDUCE_ALIGN bit set if it is OK to reduce
+ alignment and ASLK_RECORD_PAD bit set if we should remember
+ extra space we allocated for alignment purposes. When we are
+ called from assign_stack_temp_for_type, it is not set so we don't
+ track the same stack slot in two independent lists.
We do not round to stack_boundary here. */
rtx
assign_stack_local_1 (enum machine_mode mode, HOST_WIDE_INT size,
- int align,
- bool reduce_alignment_ok ATTRIBUTE_UNUSED)
+ int align, int kind)
{
rtx x, addr;
int bigend_correction = 0;
/* It is OK to reduce the alignment as long as the
requested size is 0 or the estimated stack
alignment >= mode alignment. */
- gcc_assert (reduce_alignment_ok
+ gcc_assert ((kind & ASLK_REDUCE_ALIGN)
|| size == 0
|| (crtl->stack_alignment_estimated
>= GET_MODE_ALIGNMENT (mode)));
if (mode != BLKmode || size != 0)
{
- struct frame_space **psp;
-
- for (psp = &crtl->frame_space_list; *psp; psp = &(*psp)->next)
+ if (kind & ASLK_RECORD_PAD)
{
- struct frame_space *space = *psp;
- if (!try_fit_stack_local (space->start, space->length, size,
- alignment, &slot_offset))
- continue;
- *psp = space->next;
- if (slot_offset > space->start)
- add_frame_space (space->start, slot_offset);
- if (slot_offset + size < space->start + space->length)
- add_frame_space (slot_offset + size,
- space->start + space->length);
- goto found_space;
+ struct frame_space **psp;
+
+ for (psp = &crtl->frame_space_list; *psp; psp = &(*psp)->next)
+ {
+ struct frame_space *space = *psp;
+ if (!try_fit_stack_local (space->start, space->length, size,
+ alignment, &slot_offset))
+ continue;
+ *psp = space->next;
+ if (slot_offset > space->start)
+ add_frame_space (space->start, slot_offset);
+ if (slot_offset + size < space->start + space->length)
+ add_frame_space (slot_offset + size,
+ space->start + space->length);
+ goto found_space;
+ }
}
}
else if (!STACK_ALIGNMENT_NEEDED)
frame_offset -= size;
try_fit_stack_local (frame_offset, size, size, alignment, &slot_offset);
- if (slot_offset > frame_offset)
- add_frame_space (frame_offset, slot_offset);
- if (slot_offset + size < old_frame_offset)
- add_frame_space (slot_offset + size, old_frame_offset);
+ if (kind & ASLK_RECORD_PAD)
+ {
+ if (slot_offset > frame_offset)
+ add_frame_space (frame_offset, slot_offset);
+ if (slot_offset + size < old_frame_offset)
+ add_frame_space (slot_offset + size, old_frame_offset);
+ }
}
else
{
frame_offset += size;
try_fit_stack_local (old_frame_offset, size, size, alignment, &slot_offset);
- if (slot_offset > old_frame_offset)
- add_frame_space (old_frame_offset, slot_offset);
- if (slot_offset + size < frame_offset)
- add_frame_space (slot_offset + size, frame_offset);
+ if (kind & ASLK_RECORD_PAD)
+ {
+ if (slot_offset > old_frame_offset)
+ add_frame_space (old_frame_offset, slot_offset);
+ if (slot_offset + size < frame_offset)
+ add_frame_space (slot_offset + size, frame_offset);
+ }
}
found_space:
rtx
assign_stack_local (enum machine_mode mode, HOST_WIDE_INT size, int align)
{
- return assign_stack_local_1 (mode, size, align, false);
+ return assign_stack_local_1 (mode, size, align, ASLK_RECORD_PAD);
}
\f
\f
and round it now. We also make sure ALIGNMENT is at least
BIGGEST_ALIGNMENT. */
gcc_assert (mode != BLKmode || align == BIGGEST_ALIGNMENT);
- p->slot = assign_stack_local (mode,
- (mode == BLKmode
- ? CEIL_ROUND (size, (int) align / BITS_PER_UNIT)
- : size),
- align);
+ p->slot = assign_stack_local_1 (mode,
+ (mode == BLKmode
+ ? CEIL_ROUND (size,
+ (int) align
+ / BITS_PER_UNIT)
+ : size),
+ align, 0);
p->align = align;
if (type != 0)
{
MEM_VOLATILE_P (slot) = TYPE_VOLATILE (type);
- MEM_SET_IN_STRUCT_P (slot, (AGGREGATE_TYPE_P (type)
- || TREE_CODE (type) == COMPLEX_TYPE));
+ gcc_checking_assert (!MEM_SCALAR_P (slot) && !MEM_IN_STRUCT_P (slot));
+ if (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
+ MEM_IN_STRUCT_P (slot) = 1;
+ else
+ MEM_SCALAR_P (slot) = 1;
}
MEM_NOTRAP_P (slot) = 1;
static int
safe_insn_predicate (int code, int operand, rtx x)
{
- const struct insn_operand_data *op_data;
-
- if (code < 0)
- return true;
-
- op_data = &insn_data[code].operand[operand];
- if (op_data->predicate == NULL)
- return true;
-
- return op_data->predicate (x, op_data->mode);
+ return code < 0 || insn_operand_matches ((enum insn_code) code, operand, x);
}
/* A subroutine of instantiate_virtual_regs. Instantiate any virtual
if (! EXPR_P (t))
{
*walk_subtrees = 0;
- if (DECL_P (t) && DECL_RTL_SET_P (t))
- instantiate_decl_rtl (DECL_RTL (t));
+ if (DECL_P (t))
+ {
+ if (DECL_RTL_SET_P (t))
+ instantiate_decl_rtl (DECL_RTL (t));
+ if (TREE_CODE (t) == PARM_DECL && DECL_NAMELESS (t)
+ && DECL_INCOMING_RTL (t))
+ instantiate_decl_rtl (DECL_INCOMING_RTL (t));
+ if ((TREE_CODE (t) == VAR_DECL
+ || TREE_CODE (t) == RESULT_DECL)
+ && DECL_HAS_VALUE_EXPR_P (t))
+ {
+ tree v = DECL_VALUE_EXPR (t);
+ walk_tree (&v, instantiate_expr, NULL, NULL);
+ }
+ }
}
return NULL;
}
}
}
+ if ((decl = DECL_RESULT (fndecl))
+ && TREE_CODE (decl) == RESULT_DECL)
+ {
+ if (DECL_RTL_SET_P (decl))
+ instantiate_decl_rtl (DECL_RTL (decl));
+ if (DECL_HAS_VALUE_EXPR_P (decl))
+ {
+ tree v = DECL_VALUE_EXPR (decl);
+ walk_tree (&v, instantiate_expr, NULL, NULL);
+ }
+ }
+
/* Now process all variables defined in the function or its subblocks. */
instantiate_decls_1 (DECL_INITIAL (fndecl));
/* See allocate_dynamic_stack_space for the rationale. */
#ifdef SETJMP_VIA_SAVE_AREA
- if (flag_stack_usage && cfun->calls_setjmp)
+ if (flag_stack_usage_info && cfun->calls_setjmp)
{
int align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
dynamic_offset = (dynamic_offset + align - 1) / align * align;
int by = (UNITS_PER_WORD - size) * BITS_PER_UNIT;
rtx reg = gen_rtx_REG (word_mode, REGNO (entry_parm));
- x = expand_shift (LSHIFT_EXPR, word_mode, reg,
- build_int_cst (NULL_TREE, by),
- NULL_RTX, 1);
+ x = expand_shift (LSHIFT_EXPR, word_mode, reg, by, NULL_RTX, 1);
tem = change_address (mem, word_mode, 0);
emit_move_insn (tem, x);
}
record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
{
HARD_REG_SET *pset = (HARD_REG_SET *)data;
- if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
- {
- int nregs = hard_regno_nregs[REGNO (x)][GET_MODE (x)];
- while (nregs-- > 0)
- SET_HARD_REG_BIT (*pset, REGNO (x) + nregs);
- }
+ if (REG_P (x) && HARD_REGISTER_P (x))
+ add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
}
/* A subroutine of assign_parms. Allocate a pseudo to hold the current
op0 = parmreg;
op1 = validated_mem;
if (icode != CODE_FOR_nothing
- && insn_data[icode].operand[0].predicate (op0, promoted_nominal_mode)
- && insn_data[icode].operand[1].predicate (op1, data->passed_mode))
+ && insn_operand_matches (icode, 0, op0)
+ && insn_operand_matches (icode, 1, op1))
{
enum rtx_code code = unsignedp ? ZERO_EXTEND : SIGN_EXTEND;
rtx insn, insns;
}
/* Record permanently how this parm was passed. */
- set_decl_incoming_rtl (parm, data.entry_parm, data.passed_pointer);
+ if (data.passed_pointer)
+ {
+ rtx incoming_rtl
+ = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (data.passed_type)),
+ data.entry_parm);
+ set_decl_incoming_rtl (parm, incoming_rtl, true);
+ }
+ else
+ set_decl_incoming_rtl (parm, data.entry_parm, false);
/* Update info on where next arg arrives in registers. */
targetm.calls.function_arg_advance (&all.args_so_far, data.promoted_mode,
t = built_in_decls[BUILT_IN_ALLOCA];
t = build_call_expr (t, 1, DECL_SIZE_UNIT (parm));
/* The call has been built for a variable-sized object. */
- ALLOCA_FOR_VAR_P (t) = 1;
+ CALL_ALLOCA_FOR_VAR_P (t) = 1;
t = fold_convert (ptr_type, t);
t = build2 (MODIFY_EXPR, TREE_TYPE (addr), addr, t);
gimplify_and_add (t, &stmts);
return prev;
}
+/* Concatenate two chains of blocks (chained through BLOCK_CHAIN)
+ by modifying the last node in chain 1 to point to chain 2. */
+
+tree
+block_chainon (tree op1, tree op2)
+{
+ tree t1;
+
+ if (!op1)
+ return op2;
+ if (!op2)
+ return op1;
+
+ for (t1 = op1; BLOCK_CHAIN (t1); t1 = BLOCK_CHAIN (t1))
+ continue;
+ BLOCK_CHAIN (t1) = op2;
+
+#ifdef ENABLE_TREE_CHECKING
+ {
+ tree t2;
+ for (t2 = op2; t2; t2 = BLOCK_CHAIN (t2))
+ gcc_assert (t2 != t1);
+ }
+#endif
+
+ return op1;
+}
+
/* Count the subblocks of the list starting with BLOCK. If VECTOR is
non-NULL, list them all into VECTOR, in a depth-first preorder
traversal of the block tree. Also clear TREE_ASM_WRITTEN in all
return funcdef_no++;
}
+/* Return value of funcdef. */
+int
+get_last_funcdef_no (void)
+{
+ return funcdef_no;
+}
+
/* Allocate a function structure for FNDECL and set its contents
to the defaults. Set cfun to the newly-allocated object.
Some of the helper functions invoked during initialization assume
init_expr ();
default_rtl_profile ();
- if (flag_stack_usage)
+ if (flag_stack_usage_info)
{
cfun->su = ggc_alloc_cleared_stack_usage ();
cfun->su->static_stack_size = -1;
else
allocate_struct_function (subr, false);
prepare_function_start ();
+ decide_function_section (subr);
/* Warn if this value is an aggregate type,
regardless of which calling convention we are using for it. */
#endif
}
- /* After the display initializations is where the stack checking
- probe should go. */
- if(flag_stack_check)
+ /* If we are doing generic stack checking, the probe should go here. */
+ if (flag_stack_check == GENERIC_STACK_CHECK)
stack_check_probe_note = emit_note (NOTE_INSN_DELETED);
/* Make sure there is a line number after the function entry setup code. */
if (! EXIT_IGNORE_STACK
&& cfun->calls_alloca)
{
- rtx tem = 0;
+ rtx tem = 0, seq;
+
+ start_sequence ();
+ emit_stack_save (SAVE_FUNCTION, &tem);
+ seq = get_insns ();
+ end_sequence ();
+ emit_insn_before (seq, parm_birth_insn);
- emit_stack_save (SAVE_FUNCTION, &tem, parm_birth_insn);
- emit_stack_restore (SAVE_FUNCTION, tem, NULL_RTX);
+ emit_stack_restore (SAVE_FUNCTION, tem);
}
/* ??? This should no longer be necessary since stupid is no longer with
}
#ifdef HAVE_return
+/* Insert use of return register before the end of BB. */
+
+static void
+emit_use_return_register_into_block (basic_block bb)
+{
+ rtx seq;
+ start_sequence ();
+ use_return_register ();
+ seq = get_insns ();
+ end_sequence ();
+ emit_insn_before (seq, BB_END (bb));
+}
+
/* Insert gen_return at the end of block BB. This also means updating
block_for_insn appropriately. */
{
bool inserted;
rtx seq ATTRIBUTE_UNUSED, epilogue_end ATTRIBUTE_UNUSED;
- edge entry_edge ATTRIBUTE_UNUSED;
- edge e;
+ edge entry_edge, e;
edge_iterator ei;
rtl_profile_for_bb (ENTRY_BLOCK_PTR);
record_insns (seq, NULL, &prologue_insn_hash);
set_insn_locators (seq, prologue_locator);
- /* This relies on the fact that committing the edge insertion
- will look for basic blocks within the inserted instructions,
- which in turn relies on the fact that we are not in CFG
- layout mode here. */
insert_insn_on_edge (seq, entry_edge);
inserted = true;
#endif
with a simple return instruction. */
if (simplejump_p (jump))
{
+ /* The use of the return register might be present in the exit
+ fallthru block. Either:
+ - removing the use is safe, and we should remove the use in
+ the exit fallthru block, or
+ - removing the use is not safe, and we should add it here.
+ For now, we conservatively choose the latter. Either of the
+ 2 helps in crossjumping. */
+ emit_use_return_register_into_block (bb);
+
emit_return_into_block (bb);
delete_insn (jump);
}
continue;
}
+ /* See comment in simple_jump_p case above. */
+ emit_use_return_register_into_block (bb);
+
/* If this block has only one successor, it both jumps
and falls through to the fallthru block, so we can't
delete the edge. */
start_sequence ();
epilogue_end = emit_note (NOTE_INSN_EPILOGUE_BEG);
seq = gen_epilogue ();
- emit_jump_insn (seq);
+ if (seq)
+ emit_jump_insn (seq);
/* Retain a map of the epilogue insns. */
record_insns (seq, NULL, &epilogue_insn_hash);
cur_bb->aux = cur_bb->next_bb;
cfg_layout_finalize ();
}
+
epilogue_done:
default_rtl_profile ();
if (inserted)
{
+ sbitmap blocks;
+
commit_edge_insertions ();
+ /* Look for basic blocks within the prologue insns. */
+ blocks = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (blocks);
+ SET_BIT (blocks, entry_edge->dest->index);
+ find_many_sub_basic_blocks (blocks);
+ sbitmap_free (blocks);
+
/* The epilogue insns we inserted may cause the exit edge to no longer
be fallthru. */
FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
thread_prologue_and_epilogue_insns ();
/* The stack usage info is finalized during prologue expansion. */
- if (flag_stack_usage)
+ if (flag_stack_usage_info)
output_stack_usage ();
return 0;