#include "target.h"
#include "cfglayout.h"
#include "tree-gimple.h"
+#include "tree-pass.h"
+#include "predict.h"
#ifndef LOCAL_ALIGNMENT
#define LOCAL_ALIGNMENT(TYPE, ALIGNMENT) ALIGNMENT
static HOST_WIDE_INT
get_func_frame_size (struct function *f)
{
-#ifdef FRAME_GROWS_DOWNWARD
- return -f->x_frame_offset;
-#else
- return f->x_frame_offset;
-#endif
+ if (FRAME_GROWS_DOWNWARD)
+ return -f->x_frame_offset;
+ else
+ return f->x_frame_offset;
}
/* Return size needed for stack frame based on slots so far allocated.
else
alignment = align / BITS_PER_UNIT;
-#ifdef FRAME_GROWS_DOWNWARD
- function->x_frame_offset -= size;
-#endif
+ if (FRAME_GROWS_DOWNWARD)
+ function->x_frame_offset -= size;
/* Ignore alignment we can't do with expected alignment of the boundary. */
if (alignment * BITS_PER_UNIT > PREFERRED_STACK_BOUNDARY)
division with a negative dividend isn't as well defined as we might
like. So we instead assume that ALIGNMENT is a power of two and
use logical operations which are unambiguous. */
-#ifdef FRAME_GROWS_DOWNWARD
- function->x_frame_offset
- = (FLOOR_ROUND (function->x_frame_offset - frame_phase,
- (unsigned HOST_WIDE_INT) alignment)
- + frame_phase);
-#else
- function->x_frame_offset
- = (CEIL_ROUND (function->x_frame_offset - frame_phase,
- (unsigned HOST_WIDE_INT) alignment)
- + frame_phase);
-#endif
+ if (FRAME_GROWS_DOWNWARD)
+ function->x_frame_offset
+ = (FLOOR_ROUND (function->x_frame_offset - frame_phase,
+ (unsigned HOST_WIDE_INT) alignment)
+ + frame_phase);
+ else
+ function->x_frame_offset
+ = (CEIL_ROUND (function->x_frame_offset - frame_phase,
+ (unsigned HOST_WIDE_INT) alignment)
+ + frame_phase);
}
/* On a big-endian machine, if we are allocating more space than we will use,
use the least significant bytes of those that are allocated. */
- if (BYTES_BIG_ENDIAN && mode != BLKmode)
+ if (BYTES_BIG_ENDIAN && mode != BLKmode && GET_MODE_SIZE (mode) < size)
bigend_correction = size - GET_MODE_SIZE (mode);
/* If we have already instantiated virtual registers, return the actual
(function->x_frame_offset + bigend_correction,
Pmode));
-#ifndef FRAME_GROWS_DOWNWARD
- function->x_frame_offset += size;
-#endif
+ if (!FRAME_GROWS_DOWNWARD)
+ function->x_frame_offset += size;
x = gen_rtx_MEM (mode, addr);
+ MEM_NOTRAP_P (x) = 1;
function->x_stack_slot_list
= gen_rtx_EXPR_LIST (VOIDmode, x, function->x_stack_slot_list);
+ /* Try to detect frame size overflows on native platforms. */
+#if BITS_PER_WORD >= 32
+ if ((FRAME_GROWS_DOWNWARD
+ ? (unsigned HOST_WIDE_INT) -function->x_frame_offset
+ : (unsigned HOST_WIDE_INT) function->x_frame_offset)
+ > ((unsigned HOST_WIDE_INT) 1 << (BITS_PER_WORD - 1))
+ /* Leave room for the fixed part of the frame. */
+ - 64 * UNITS_PER_WORD)
+ {
+ error ("%Jtotal size of local objects too large", function->decl);
+ /* Avoid duplicate error messages as much as possible. */
+ function->x_frame_offset = 0;
+ }
+#endif
+
return x;
}
/* Try to find an available, already-allocated temporary of the proper
mode which meets the size and alignment requirements. Choose the
- smallest one with the closest alignment. */
- for (p = avail_temp_slots; p; p = p->next)
+ smallest one with the closest alignment.
+
+ If assign_stack_temp is called outside of the tree->rtl expansion,
+ we cannot reuse the stack slots (that may still refer to
+ VIRTUAL_STACK_VARS_REGNUM). */
+ if (!virtuals_instantiated)
{
- if (p->align >= align && p->size >= size && GET_MODE (p->slot) == mode
- && objects_must_conflict_p (p->type, type)
- && (best_p == 0 || best_p->size > p->size
- || (best_p->size == p->size && best_p->align > p->align)))
+ for (p = avail_temp_slots; p; p = p->next)
{
- if (p->align == align && p->size == size)
+ if (p->align >= align && p->size >= size
+ && GET_MODE (p->slot) == mode
+ && objects_must_conflict_p (p->type, type)
+ && (best_p == 0 || best_p->size > p->size
+ || (best_p->size == p->size && best_p->align > p->align)))
{
- selected = p;
- cut_slot_from_list (selected, &avail_temp_slots);
- best_p = 0;
- break;
+ if (p->align == align && p->size == size)
+ {
+ selected = p;
+ cut_slot_from_list (selected, &avail_temp_slots);
+ best_p = 0;
+ break;
+ }
+ best_p = p;
}
- best_p = p;
}
}
p->size = best_p->size - rounded_size;
p->base_offset = best_p->base_offset + rounded_size;
p->full_size = best_p->full_size - rounded_size;
- p->slot = gen_rtx_MEM (BLKmode,
- plus_constant (XEXP (best_p->slot, 0),
- rounded_size));
+ p->slot = adjust_address_nv (best_p->slot, BLKmode, rounded_size);
p->align = best_p->align;
p->address = 0;
p->type = best_p->type;
can be either above or below this stack slot depending on which
way the frame grows. We include the extra space if and only if it
is above this slot. */
-#ifdef FRAME_GROWS_DOWNWARD
- p->size = frame_offset_old - frame_offset;
-#else
- p->size = size;
-#endif
+ if (FRAME_GROWS_DOWNWARD)
+ p->size = frame_offset_old - frame_offset;
+ else
+ p->size = size;
/* Now define the fields used by combine_temp_slots. */
-#ifdef FRAME_GROWS_DOWNWARD
- p->base_offset = frame_offset;
- p->full_size = frame_offset_old - frame_offset;
-#else
- p->base_offset = frame_offset_old;
- p->full_size = frame_offset - frame_offset_old;
-#endif
+ if (FRAME_GROWS_DOWNWARD)
+ {
+ p->base_offset = frame_offset;
+ p->full_size = frame_offset_old - frame_offset;
+ }
+ else
+ {
+ p->base_offset = frame_offset_old;
+ p->full_size = frame_offset - frame_offset_old;
+ }
p->address = 0;
selected = p;
MEM_VOLATILE_P (slot) = TYPE_VOLATILE (type);
MEM_SET_IN_STRUCT_P (slot, AGGREGATE_TYPE_P (type));
}
+ MEM_NOTRAP_P (slot) = 1;
return slot;
}
if (decl && size == -1
&& TREE_CODE (TYPE_SIZE_UNIT (type)) == INTEGER_CST)
{
- error ("%Jsize of variable %qD is too large", decl, decl);
+ error ("size of variable %q+D is too large", decl);
size = 1;
}
#endif
#endif
-/* On most machines, the CFA coincides with the first incoming parm. */
-
-#ifndef ARG_POINTER_CFA_OFFSET
-#define ARG_POINTER_CFA_OFFSET(FNDECL) FIRST_PARM_OFFSET (FNDECL)
-#endif
-
\f
/* Given a piece of RTX and a pointer to a HOST_WIDE_INT, if the RTX
is a virtual register, return the equivalent hard register and set the
else if (x == virtual_outgoing_args_rtx)
new = stack_pointer_rtx, offset = out_arg_offset;
else if (x == virtual_cfa_rtx)
- new = arg_pointer_rtx, offset = cfa_offset;
+ {
+#ifdef FRAME_POINTER_CFA_OFFSET
+ new = frame_pointer_rtx;
+#else
+ new = arg_pointer_rtx;
+#endif
+ offset = cfa_offset;
+ }
else
return NULL_RTX;
for_each_rtx (&XEXP (x, 0), instantiate_virtual_regs_in_rtx, NULL);
}
+/* Helper for instantiate_decls called via walk_tree: Process all decls
+ in the given DECL_VALUE_EXPR. */
+
+static tree
+instantiate_expr (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
+{
+ tree t = *tp;
+ if (! EXPR_P (t))
+ {
+ *walk_subtrees = 0;
+ if (DECL_P (t) && DECL_RTL_SET_P (t))
+ instantiate_decl (DECL_RTL (t));
+ }
+ return NULL;
+}
+
/* Subroutine of instantiate_decls: Process all decls in the given
BLOCK node and all its subblocks. */
tree t;
for (t = BLOCK_VARS (let); t; t = TREE_CHAIN (t))
- if (DECL_RTL_SET_P (t))
- instantiate_decl (DECL_RTL (t));
+ {
+ if (DECL_RTL_SET_P (t))
+ instantiate_decl (DECL_RTL (t));
+ if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
+ {
+ tree v = DECL_VALUE_EXPR (t);
+ walk_tree (&v, instantiate_expr, NULL, NULL);
+ }
+ }
/* Process all subblocks. */
for (t = BLOCK_SUBBLOCKS (let); t; t = TREE_CHAIN (t))
{
instantiate_decl (DECL_RTL (decl));
instantiate_decl (DECL_INCOMING_RTL (decl));
+ if (DECL_HAS_VALUE_EXPR_P (decl))
+ {
+ tree v = DECL_VALUE_EXPR (decl);
+ walk_tree (&v, instantiate_expr, NULL, NULL);
+ }
}
/* Now process all variables defined in the function or its subblocks. */
/* Pass through the INSNS of function FNDECL and convert virtual register
references to hard register references. */
-void
+static void
instantiate_virtual_regs (void)
{
rtx insn;
var_offset = STARTING_FRAME_OFFSET;
dynamic_offset = STACK_DYNAMIC_OFFSET (current_function_decl);
out_arg_offset = STACK_POINTER_OFFSET;
+#ifdef FRAME_POINTER_CFA_OFFSET
+ cfa_offset = FRAME_POINTER_CFA_OFFSET (current_function_decl);
+#else
cfa_offset = ARG_POINTER_CFA_OFFSET (current_function_decl);
+#endif
/* Initialize recognition, indicating that volatile is OK. */
init_recog ();
frame_pointer_rtx. */
virtuals_instantiated = 1;
}
+
+struct tree_opt_pass pass_instantiate_virtual_regs =
+{
+ "vregs", /* name */
+ NULL, /* gate */
+ instantiate_virtual_regs, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ 0, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func, /* todo_flags_finish */
+ 0 /* letter */
+};
+
\f
/* Return 1 if EXP is an aggregate type (or a value with aggregate type).
This means a type for which function calls must pass an address to the
return 1;
/* Make sure we have suitable call-clobbered regs to return
the value in; if not, we must return it in memory. */
- reg = hard_function_value (type, 0, 0);
+ reg = hard_function_value (type, 0, fntype, 0);
/* If we have something other than a REG (e.g. a PARALLEL), then assume
it is OK. */
/* If the parm is to be passed as a transparent union, use the type of
the first field for the tests below. We have already verified that
the modes are the same. */
- if (DECL_TRANSPARENT_UNION (parm)
- || (TREE_CODE (passed_type) == UNION_TYPE
- && TYPE_TRANSPARENT_UNION (passed_type)))
+ if (TREE_CODE (passed_type) == UNION_TYPE
+ && TYPE_TRANSPARENT_UNION (passed_type))
passed_type = TREE_TYPE (TYPE_FIELDS (passed_type));
/* See if this arg was passed by invisible reference. */
&& data->nominal_mode != data->passed_mode)
stack_parm = NULL;
+ /* If stack protection is in effect for this function, don't leave any
+ pointers in their passed stack slots. */
+ else if (cfun->stack_protect_guard
+ && (flag_stack_protect == 2
+ || data->passed_pointer
+ || POINTER_TYPE_P (data->nominal_type)))
+ stack_parm = NULL;
+
data->stack_parm = stack_parm;
}
/* Store the parm in a pseudoregister during the function, but we may
need to do it in a wider mode. */
+ /* This is not really promoting for a call. However we need to be
+ consistent with assign_parm_find_data_types and expand_expr_real_1. */
promoted_nominal_mode
- = promote_mode (data->nominal_type, data->nominal_mode, &unsignedp, 0);
+ = promote_mode (data->nominal_type, data->nominal_mode, &unsignedp, 1);
parmreg = gen_reg_rtx (promoted_nominal_mode);
{
struct assign_parm_data_all all;
tree fnargs, parm;
- rtx internal_arg_pointer;
-
- /* If the reg that the virtual arg pointer will be translated into is
- not a fixed reg or is the stack pointer, make a copy of the virtual
- arg pointer, and address parms via the copy. The frame pointer is
- considered fixed even though it is not marked as such.
- The second time through, simply use ap to avoid generating rtx. */
-
- if ((ARG_POINTER_REGNUM == STACK_POINTER_REGNUM
- || ! (fixed_regs[ARG_POINTER_REGNUM]
- || ARG_POINTER_REGNUM == FRAME_POINTER_REGNUM)))
- internal_arg_pointer = copy_to_reg (virtual_incoming_args_rtx);
- else
- internal_arg_pointer = virtual_incoming_args_rtx;
- current_function_internal_arg_pointer = internal_arg_pointer;
+ current_function_internal_arg_pointer
+ = targetm.calls.internal_arg_pointer ();
assign_parms_initialize_all (&all);
fnargs = assign_parms_augmented_arg_list (&all);
REG_PARM_STACK_SPACE (fndecl));
#endif
- current_function_args_size
- = ((current_function_args_size + STACK_BYTES - 1)
- / STACK_BYTES) * STACK_BYTES;
+ current_function_args_size = CEIL_ROUND (current_function_args_size,
+ PARM_BOUNDARY / BITS_PER_UNIT);
#ifdef ARGS_GROW_DOWNWARD
current_function_arg_offset_rtx
{
rtx real_decl_rtl;
-#ifdef FUNCTION_OUTGOING_VALUE
- real_decl_rtl = FUNCTION_OUTGOING_VALUE (TREE_TYPE (decl_result),
- fndecl);
-#else
- real_decl_rtl = FUNCTION_VALUE (TREE_TYPE (decl_result),
- fndecl);
-#endif
+ real_decl_rtl = targetm.calls.function_value (TREE_TYPE (decl_result),
+ fndecl, true);
REG_FUNCTION_VALUE_P (real_decl_rtl) = 1;
/* The delay slot scheduler assumes that current_function_return_rtx
holds the hard register containing the return value, not a
{
tree sizetree;
enum direction where_pad;
- int boundary;
+ unsigned int boundary;
int reg_parm_stack_space = 0;
int part_size_in_regs;
locate->where_pad = where_pad;
locate->boundary = boundary;
+ /* Remember if the outgoing parameter requires extra alignment on the
+ calling function side. */
+ if (boundary > PREFERRED_STACK_BOUNDARY)
+ boundary = PREFERRED_STACK_BOUNDARY;
+ if (cfun->stack_alignment_needed < boundary)
+ cfun->stack_alignment_needed = boundary;
+
#ifdef ARGS_GROW_DOWNWARD
locate->slot_offset.constant = -initial_offset_ptr->constant;
if (initial_offset_ptr->var)
HOST_WIDE_INT sp_offset = STACK_POINTER_OFFSET;
#ifdef SPARC_STACK_BOUNDARY_HACK
- /* The sparc port has a bug. It sometimes claims a STACK_BOUNDARY
- higher than the real alignment of %sp. However, when it does this,
- the alignment of %sp+STACK_POINTER_OFFSET will be STACK_BOUNDARY.
- This is a temporary hack while the sparc port is fixed. */
+ /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
+ the real alignment of %sp. However, when it does this, the
+ alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
if (SPARC_STACK_BOUNDARY_HACK)
sp_offset = 0;
#endif
&& DECL_RTL_SET_P (decl)
&& REG_P (DECL_RTL (decl))
&& regno_clobbered_at_setjmp (REGNO (DECL_RTL (decl))))
- warning (0, "%Jvariable %qD might be clobbered by %<longjmp%>"
+ warning (0, "variable %q+D might be clobbered by %<longjmp%>"
" or %<vfork%>",
- decl, decl);
+ decl);
}
for (sub = BLOCK_SUBBLOCKS (block); sub; sub = TREE_CHAIN (sub))
if (DECL_RTL (decl) != 0
&& REG_P (DECL_RTL (decl))
&& regno_clobbered_at_setjmp (REGNO (DECL_RTL (decl))))
- warning (0, "%Jargument %qD might be clobbered by %<longjmp%> or %<vfork%>",
- decl, decl);
+ warning (0, "argument %q+D might be clobbered by %<longjmp%> or %<vfork%>",
+ decl);
}
\f
tree *block_vector;
*n_blocks_p = all_blocks (block, NULL);
- block_vector = xmalloc (*n_blocks_p * sizeof (tree));
+ block_vector = XNEWVEC (tree, *n_blocks_p);
all_blocks (block, block_vector);
return block_vector;
gcc_assert (VEC_length (int, sibcall_epilogue) == 0);
}
+struct tree_opt_pass pass_init_function =
+{
+ NULL, /* name */
+ NULL, /* gate */
+ init_function_for_compilation, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ 0, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+ 0 /* letter */
+};
+
+
void
expand_main_function (void)
{
-#ifdef FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
- if (FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN)
- {
- int align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
- rtx tmp, seq;
+#if (defined(INVOKE__main) \
+ || (!defined(HAS_INIT_SECTION) \
+ && !defined(INIT_SECTION_ASM_OP) \
+ && !defined(INIT_ARRAY_SECTION_ASM_OP)))
+ emit_library_call (init_one_libfunc (NAME__MAIN), LCT_NORMAL, VOIDmode, 0);
+#endif
+}
+\f
+/* Expand code to initialize the stack_protect_guard. This is invoked at
+ the beginning of a function to be protected. */
- start_sequence ();
- /* Forcibly align the stack. */
-#ifdef STACK_GROWS_DOWNWARD
- tmp = expand_simple_binop (Pmode, AND, stack_pointer_rtx, GEN_INT(-align),
- stack_pointer_rtx, 1, OPTAB_WIDEN);
-#else
- tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
- GEN_INT (align - 1), NULL_RTX, 1, OPTAB_WIDEN);
- tmp = expand_simple_binop (Pmode, AND, tmp, GEN_INT (-align),
- stack_pointer_rtx, 1, OPTAB_WIDEN);
+#ifndef HAVE_stack_protect_set
+# define HAVE_stack_protect_set 0
+# define gen_stack_protect_set(x,y) (gcc_unreachable (), NULL_RTX)
#endif
- if (tmp != stack_pointer_rtx)
- emit_move_insn (stack_pointer_rtx, tmp);
- /* Enlist allocate_dynamic_stack_space to pick up the pieces. */
- tmp = force_reg (Pmode, const0_rtx);
- allocate_dynamic_stack_space (tmp, NULL_RTX, BIGGEST_ALIGNMENT);
- seq = get_insns ();
- end_sequence ();
+void
+stack_protect_prologue (void)
+{
+ tree guard_decl = targetm.stack_protect_guard ();
+ rtx x, y;
+
+ /* Avoid expand_expr here, because we don't want guard_decl pulled
+ into registers unless absolutely necessary. And we know that
+ cfun->stack_protect_guard is a local stack slot, so this skips
+ all the fluff. */
+ x = validize_mem (DECL_RTL (cfun->stack_protect_guard));
+ y = validize_mem (DECL_RTL (guard_decl));
+
+ /* Allow the target to copy from Y to X without leaking Y into a
+ register. */
+ if (HAVE_stack_protect_set)
+ {
+ rtx insn = gen_stack_protect_set (x, y);
+ if (insn)
+ {
+ emit_insn (insn);
+ return;
+ }
+ }
- for (tmp = get_last_insn (); tmp; tmp = PREV_INSN (tmp))
- if (NOTE_P (tmp) && NOTE_LINE_NUMBER (tmp) == NOTE_INSN_FUNCTION_BEG)
- break;
+ /* Otherwise do a straight move. */
+ emit_move_insn (x, y);
+}
+
+/* Expand code to verify the stack_protect_guard. This is invoked at
+ the end of a function to be protected. */
+
+#ifndef HAVE_stack_protect_test
+# define HAVE_stack_protect_test 0
+# define gen_stack_protect_test(x, y, z) (gcc_unreachable (), NULL_RTX)
+#endif
+
+void
+stack_protect_epilogue (void)
+{
+ tree guard_decl = targetm.stack_protect_guard ();
+ rtx label = gen_label_rtx ();
+ rtx x, y, tmp;
+
+ /* Avoid expand_expr here, because we don't want guard_decl pulled
+ into registers unless absolutely necessary. And we know that
+ cfun->stack_protect_guard is a local stack slot, so this skips
+ all the fluff. */
+ x = validize_mem (DECL_RTL (cfun->stack_protect_guard));
+ y = validize_mem (DECL_RTL (guard_decl));
+
+ /* Allow the target to compare Y with X without leaking either into
+ a register. */
+ switch (HAVE_stack_protect_test != 0)
+ {
+ case 1:
+ tmp = gen_stack_protect_test (x, y, label);
if (tmp)
- emit_insn_before (seq, tmp);
- else
- emit_insn (seq);
+ {
+ emit_insn (tmp);
+ break;
+ }
+ /* FALLTHRU */
+
+ default:
+ emit_cmp_and_jump_insns (x, y, EQ, NULL_RTX, ptr_mode, 1, label);
+ break;
}
-#endif
-#if (defined(INVOKE__main) \
- || (!defined(HAS_INIT_SECTION) \
- && !defined(INIT_SECTION_ASM_OP) \
- && !defined(INIT_ARRAY_SECTION_ASM_OP)))
- emit_library_call (init_one_libfunc (NAME__MAIN), LCT_NORMAL, VOIDmode, 0);
-#endif
+ /* The noreturn predictor has been moved to the tree level. The rtl-level
+ predictors estimate this branch about 20%, which isn't enough to get
+ things moved out of line. Since this is the only extant case of adding
+ a noreturn function at the rtl level, it doesn't seem worth doing ought
+ except adding the prediction by hand. */
+ tmp = get_last_insn ();
+ if (JUMP_P (tmp))
+ predict_insn_def (tmp, PRED_NORETURN, TAKEN);
+
+ expand_expr_stmt (targetm.stack_protect_fail ());
+ emit_label (label);
}
\f
/* Start the RTL for a new function, and set variables used for
/* In order to figure out what mode to use for the pseudo, we
figure out what the mode of the eventual return register will
actually be, and use that. */
- rtx hard_reg = hard_function_value (return_type, subr, 1);
+ rtx hard_reg = hard_function_value (return_type, subr, 0, 1);
/* Structures that are returned in registers are not
aggregate_value_p, so we may see a PARALLEL or a REG. */
decl; decl = TREE_CHAIN (decl))
if (!TREE_USED (decl) && TREE_CODE (decl) == PARM_DECL
&& DECL_NAME (decl) && !DECL_ARTIFICIAL (decl))
- warning (0, "%Junused parameter %qD", decl, decl);
+ warning (OPT_Wunused_parameter, "unused parameter %q+D", decl);
}
static GTY(()) rtx initial_trampoline;
clear_pending_stack_adjust ();
do_pending_stack_adjust ();
- /* @@@ This is a kludge. We want to ensure that instructions that
- may trap are not moved into the epilogue by scheduling, because
- we don't always emit unwind information for the epilogue.
- However, not all machine descriptions define a blockage insn, so
- emit an ASM_INPUT to act as one. */
- if (flag_non_call_exceptions)
- emit_insn (gen_rtx_ASM_INPUT (VOIDmode, ""));
-
/* Mark the end of the function body.
If control reaches this insn, the function can drop through
without returning a value. */
/* Output the label for the actual return from the function. */
emit_label (return_label);
- /* Let except.c know where it should emit the call to unregister
- the function context for sjlj exceptions. */
- if (flag_exceptions && USING_SJLJ_EXCEPTIONS)
- sjlj_emit_function_exit_after (get_last_insn ());
+ if (USING_SJLJ_EXCEPTIONS)
+ {
+ /* Let except.c know where it should emit the call to unregister
+ the function context for sjlj exceptions. */
+ if (flag_exceptions)
+ sjlj_emit_function_exit_after (get_last_insn ());
+ }
+ else
+ {
+ /* @@@ This is a kludge. We want to ensure that instructions that
+ may trap are not moved into the epilogue by scheduling, because
+ we don't always emit unwind information for the epilogue.
+ However, not all machine descriptions define a blockage insn, so
+ emit an ASM_INPUT to act as one. */
+ if (flag_non_call_exceptions)
+ emit_insn (gen_rtx_ASM_INPUT (VOIDmode, ""));
+ }
+
+ /* If this is an implementation of throw, do what's necessary to
+ communicate between __builtin_eh_return and the epilogue. */
+ expand_eh_return ();
/* If scalar return value was computed in a pseudo-reg, or was a named
return value that got dumped to the stack, copy that to the hard
TREE_TYPE (decl_result),
int_size_in_bytes (TREE_TYPE (decl_result)));
}
+ /* In the case of complex integer modes smaller than a word, we'll
+ need to generate some non-trivial bitfield insertions. Do that
+ on a pseudo and not the hard register. */
+ else if (GET_CODE (decl_rtl) == CONCAT
+ && GET_MODE_CLASS (GET_MODE (decl_rtl)) == MODE_COMPLEX_INT
+ && GET_MODE_BITSIZE (GET_MODE (decl_rtl)) <= BITS_PER_WORD)
+ {
+ int old_generating_concat_p;
+ rtx tmp;
+
+ old_generating_concat_p = generating_concat_p;
+ generating_concat_p = 0;
+ tmp = gen_reg_rtx (GET_MODE (decl_rtl));
+ generating_concat_p = old_generating_concat_p;
+
+ emit_move_insn (tmp, decl_rtl);
+ emit_move_insn (real_decl_rtl, tmp);
+ }
else
emit_move_insn (real_decl_rtl, decl_rtl);
}
else
value_address = XEXP (value_address, 0);
-#ifdef FUNCTION_OUTGOING_VALUE
- outgoing = FUNCTION_OUTGOING_VALUE (build_pointer_type (type),
- current_function_decl);
-#else
- outgoing = FUNCTION_VALUE (build_pointer_type (type),
- current_function_decl);
-#endif
+ outgoing = targetm.calls.function_value (build_pointer_type (type),
+ current_function_decl, true);
/* Mark this as a function return value so integrate will delete the
assignment and USE below when inlining this function. */
current_function_return_rtx = outgoing;
}
- /* If this is an implementation of throw, do what's necessary to
- communicate between __builtin_eh_return and the epilogue. */
- expand_eh_return ();
-
/* Emit the actual code to clobber return register. */
{
rtx seq;
/* Output the label for the naked return from the function. */
emit_label (naked_return_label);
+ /* If stack protection is enabled for this function, check the guard. */
+ if (cfun->stack_protect_guard)
+ stack_protect_epilogue ();
+
/* If we had calls to alloca, and this machine needs
an accurate stack pointer to exit the function,
insert some code to save and restore the stack pointer. */
info.sp_offset));
retaddr = gen_rtx_MEM (Pmode, retaddr);
+ MEM_NOTRAP_P (retaddr) = 1;
/* If there is a pending load to the equivalent register for SP
and we reference that register, we must load our address into
fixup_fallthru_exit_predecessor. */
cfg_layout_initialize (0);
FOR_EACH_BB (cur_bb)
- if (cur_bb->index >= 0 && cur_bb->next_bb->index >= 0)
- cur_bb->rbi->next = cur_bb->next_bb;
+ if (cur_bb->index >= NUM_FIXED_BLOCKS
+ && cur_bb->next_bb->index >= NUM_FIXED_BLOCKS)
+ cur_bb->aux = cur_bb->next_bb;
cfg_layout_finalize ();
}
epilogue_done:
if (!block)
return;
+ if(!cfun->ib_boundaries_block)
+ return;
+
last_block = VARRAY_TOP_TREE (cfun->ib_boundaries_block);
VARRAY_POP (cfun->ib_boundaries_block);
n = get_max_uid ();
{
return lang_hooks.decl_printable_name (cfun->decl, 2);
}
+\f
+
+static void
+rest_of_handle_check_leaf_regs (void)
+{
+#ifdef LEAF_REGISTERS
+ current_function_uses_only_leaf_regs
+ = optimize > 0 && only_leaf_regs_used () && leaf_function_p ();
+#endif
+}
+
+struct tree_opt_pass pass_leaf_regs =
+{
+ NULL, /* name */
+ NULL, /* gate */
+ rest_of_handle_check_leaf_regs, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ 0, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+ 0 /* letter */
+};
+
#include "gt-function.h"