/* Expands front end tree to back end RTL for GCC.
Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
- 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
Free Software Foundation, Inc.
This file is part of GCC.
/* The currently compiled function. */
struct function *cfun = 0;
-/* These arrays record the INSN_UIDs of the prologue and epilogue insns. */
-static VEC(int,heap) *prologue;
-static VEC(int,heap) *epilogue;
-
-/* Array of INSN_UIDs to hold the INSN_UIDs for each sibcall epilogue
- in this function. */
-static VEC(int,heap) *sibcall_epilogue;
+/* These hashes record the prologue and epilogue insns. */
+static GTY((if_marked ("ggc_marked_p"), param_is (struct rtx_def)))
+ htab_t prologue_insn_hash;
+static GTY((if_marked ("ggc_marked_p"), param_is (struct rtx_def)))
+ htab_t epilogue_insn_hash;
\f
-/* In order to evaluate some expressions, such as function calls returning
- structures in memory, we need to temporarily allocate stack locations.
- We record each allocated temporary in the following structure.
- Associated with each temporary slot is a nesting level. When we pop up
- one level, all temporaries associated with the previous level are freed.
- Normally, all temporaries are freed after the execution of the statement
- in which they were created. However, if we are inside a ({...}) grouping,
- the result may be in a temporary and hence must be preserved. If the
- result could be in a temporary, we preserve it if we can determine which
- one it is in. If we cannot determine which temporary may contain the
- result, all temporaries are preserved. A temporary is preserved by
- pretending it was allocated at the previous nesting level.
-
- Automatic variables are also assigned temporary slots, at the nesting
- level where they are defined. They are marked a "kept" so that
- free_temp_slots will not free them. */
+htab_t types_used_by_vars_hash = NULL;
+tree types_used_by_cur_var_decl = NULL;
-struct temp_slot GTY(())
-{
- /* Points to next temporary slot. */
- struct temp_slot *next;
- /* Points to previous temporary slot. */
- struct temp_slot *prev;
-
- /* The rtx to used to reference the slot. */
- rtx slot;
- /* The rtx used to represent the address if not the address of the
- slot above. May be an EXPR_LIST if multiple addresses exist. */
- rtx address;
- /* The alignment (in bits) of the slot. */
- unsigned int align;
- /* The size, in units, of the slot. */
- HOST_WIDE_INT size;
- /* The type of the object in the slot, or zero if it doesn't correspond
- to a type. We use this to determine whether a slot can be reused.
- It can be reused if objects of the type of the new slot will always
- conflict with objects of the type of the old slot. */
- tree type;
- /* Nonzero if this temporary is currently in use. */
- char in_use;
- /* Nonzero if this temporary has its address taken. */
- char addr_taken;
- /* Nesting level at which this slot is being used. */
- int level;
- /* Nonzero if this should survive a call to free_temp_slots. */
- int keep;
- /* The offset of the slot from the frame_pointer, including extra space
- for alignment. This info is for combine_temp_slots. */
- HOST_WIDE_INT base_offset;
- /* The size of the slot, including extra space for alignment. This
- info is for combine_temp_slots. */
- HOST_WIDE_INT full_size;
-};
-\f
/* Forward declarations. */
static struct temp_slot *find_temp_slot_from_address (rtx);
extern tree debug_find_var_in_block_tree (tree, tree);
/* We always define `record_insns' even if it's not used so that we
can always export `prologue_epilogue_contains'. */
-static void record_insns (rtx, VEC(int,heap) **) ATTRIBUTE_UNUSED;
-static int contains (const_rtx, VEC(int,heap) **);
+static void record_insns (rtx, rtx, htab_t *) ATTRIBUTE_UNUSED;
+static bool contains (const_rtx, htab_t);
#ifdef HAVE_return
static void emit_return_into_block (basic_block);
#endif
void
free_after_compilation (struct function *f)
{
- VEC_free (int, heap, prologue);
- VEC_free (int, heap, epilogue);
- VEC_free (int, heap, sibcall_epilogue);
+ prologue_insn_hash = NULL;
+ epilogue_insn_hash = NULL;
+
if (crtl->emit.regno_pointer_align)
free (crtl->emit.regno_pointer_align);
/* Leave room for the fixed part of the frame. */
- 64 * UNITS_PER_WORD)
{
- error ("%Jtotal size of local objects too large", func);
+ error_at (DECL_SOURCE_LOCATION (func),
+ "total size of local objects too large");
return TRUE;
}
if (crtl->stack_alignment_needed < alignment_in_bits)
crtl->stack_alignment_needed = alignment_in_bits;
- if (crtl->max_used_stack_slot_alignment < crtl->stack_alignment_needed)
- crtl->max_used_stack_slot_alignment = crtl->stack_alignment_needed;
+ if (crtl->max_used_stack_slot_alignment < alignment_in_bits)
+ crtl->max_used_stack_slot_alignment = alignment_in_bits;
/* Calculate how many bytes the start of local variables is off from
stack alignment. */
return assign_stack_local_1 (mode, size, align, false);
}
\f
+\f
+/* In order to evaluate some expressions, such as function calls returning
+ structures in memory, we need to temporarily allocate stack locations.
+ We record each allocated temporary in the following structure.
+
+ Associated with each temporary slot is a nesting level. When we pop up
+ one level, all temporaries associated with the previous level are freed.
+ Normally, all temporaries are freed after the execution of the statement
+ in which they were created. However, if we are inside a ({...}) grouping,
+ the result may be in a temporary and hence must be preserved. If the
+ result could be in a temporary, we preserve it if we can determine which
+ one it is in. If we cannot determine which temporary may contain the
+ result, all temporaries are preserved. A temporary is preserved by
+ pretending it was allocated at the previous nesting level.
+
+ Automatic variables are also assigned temporary slots, at the nesting
+ level where they are defined. They are marked a "kept" so that
+ free_temp_slots will not free them. */
+
+struct GTY(()) temp_slot {
+ /* Points to next temporary slot. */
+ struct temp_slot *next;
+ /* Points to previous temporary slot. */
+ struct temp_slot *prev;
+ /* The rtx to used to reference the slot. */
+ rtx slot;
+ /* The size, in units, of the slot. */
+ HOST_WIDE_INT size;
+ /* The type of the object in the slot, or zero if it doesn't correspond
+ to a type. We use this to determine whether a slot can be reused.
+ It can be reused if objects of the type of the new slot will always
+ conflict with objects of the type of the old slot. */
+ tree type;
+ /* The alignment (in bits) of the slot. */
+ unsigned int align;
+ /* Nonzero if this temporary is currently in use. */
+ char in_use;
+ /* Nonzero if this temporary has its address taken. */
+ char addr_taken;
+ /* Nesting level at which this slot is being used. */
+ int level;
+ /* Nonzero if this should survive a call to free_temp_slots. */
+ int keep;
+ /* The offset of the slot from the frame_pointer, including extra space
+ for alignment. This info is for combine_temp_slots. */
+ HOST_WIDE_INT base_offset;
+ /* The size of the slot, including extra space for alignment. This
+ info is for combine_temp_slots. */
+ HOST_WIDE_INT full_size;
+};
+
+/* A table of addresses that represent a stack slot. The table is a mapping
+ from address RTXen to a temp slot. */
+static GTY((param_is(struct temp_slot_address_entry))) htab_t temp_slot_address_table;
+
+/* Entry for the above hash table. */
+struct GTY(()) temp_slot_address_entry {
+ hashval_t hash;
+ rtx address;
+ struct temp_slot *temp_slot;
+};
+
/* Removes temporary slot TEMP from LIST. */
static void
temp->in_use = 0;
temp->level = -1;
}
+
+/* Compute the hash value for an address -> temp slot mapping.
+ The value is cached on the mapping entry. */
+static hashval_t
+temp_slot_address_compute_hash (struct temp_slot_address_entry *t)
+{
+ int do_not_record = 0;
+ return hash_rtx (t->address, GET_MODE (t->address),
+ &do_not_record, NULL, false);
+}
+
+/* Return the hash value for an address -> temp slot mapping. */
+static hashval_t
+temp_slot_address_hash (const void *p)
+{
+ const struct temp_slot_address_entry *t;
+ t = (const struct temp_slot_address_entry *) p;
+ return t->hash;
+}
+
+/* Compare two address -> temp slot mapping entries. */
+static int
+temp_slot_address_eq (const void *p1, const void *p2)
+{
+ const struct temp_slot_address_entry *t1, *t2;
+ t1 = (const struct temp_slot_address_entry *) p1;
+ t2 = (const struct temp_slot_address_entry *) p2;
+ return exp_equiv_p (t1->address, t2->address, 0, true);
+}
+
+/* Add ADDRESS as an alias of TEMP_SLOT to the addess -> temp slot mapping. */
+static void
+insert_temp_slot_address (rtx address, struct temp_slot *temp_slot)
+{
+ void **slot;
+ struct temp_slot_address_entry *t = GGC_NEW (struct temp_slot_address_entry);
+ t->address = address;
+ t->temp_slot = temp_slot;
+ t->hash = temp_slot_address_compute_hash (t);
+ slot = htab_find_slot_with_hash (temp_slot_address_table, t, t->hash, INSERT);
+ *slot = t;
+}
+
+/* Remove an address -> temp slot mapping entry if the temp slot is
+ not in use anymore. Callback for remove_unused_temp_slot_addresses. */
+static int
+remove_unused_temp_slot_addresses_1 (void **slot, void *data ATTRIBUTE_UNUSED)
+{
+ const struct temp_slot_address_entry *t;
+ t = (const struct temp_slot_address_entry *) *slot;
+ if (! t->temp_slot->in_use)
+ *slot = NULL;
+ return 1;
+}
+
+/* Remove all mappings of addresses to unused temp slots. */
+static void
+remove_unused_temp_slot_addresses (void)
+{
+ htab_traverse (temp_slot_address_table,
+ remove_unused_temp_slot_addresses_1,
+ NULL);
+}
+
+/* Find the temp slot corresponding to the object at address X. */
+
+static struct temp_slot *
+find_temp_slot_from_address (rtx x)
+{
+ struct temp_slot *p;
+ struct temp_slot_address_entry tmp, *t;
+
+ /* First try the easy way:
+ See if X exists in the address -> temp slot mapping. */
+ tmp.address = x;
+ tmp.temp_slot = NULL;
+ tmp.hash = temp_slot_address_compute_hash (&tmp);
+ t = (struct temp_slot_address_entry *)
+ htab_find_with_hash (temp_slot_address_table, &tmp, tmp.hash);
+ if (t)
+ return t->temp_slot;
+
+ /* If we have a sum involving a register, see if it points to a temp
+ slot. */
+ if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
+ && (p = find_temp_slot_from_address (XEXP (x, 0))) != 0)
+ return p;
+ else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 1))
+ && (p = find_temp_slot_from_address (XEXP (x, 1))) != 0)
+ return p;
+
+ /* Last resort: Address is a virtual stack var address. */
+ if (GET_CODE (x) == PLUS
+ && XEXP (x, 0) == virtual_stack_vars_rtx
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ int i;
+ for (i = max_slot_level (); i >= 0; i--)
+ for (p = *temp_slots_at_level (i); p; p = p->next)
+ {
+ if (INTVAL (XEXP (x, 1)) >= p->base_offset
+ && INTVAL (XEXP (x, 1)) < p->base_offset + p->full_size)
+ return p;
+ }
+ }
+
+ return NULL;
+}
\f
/* Allocate a temporary stack slot and record it for possible later
reuse.
p->full_size = best_p->full_size - rounded_size;
p->slot = adjust_address_nv (best_p->slot, BLKmode, rounded_size);
p->align = best_p->align;
- p->address = 0;
p->type = best_p->type;
insert_slot_to_list (p, &avail_temp_slots);
p->base_offset = frame_offset_old;
p->full_size = frame_offset - frame_offset_old;
}
- p->address = 0;
selected = p;
}
pp = temp_slots_at_level (p->level);
insert_slot_to_list (p, pp);
+ insert_temp_slot_address (XEXP (p->slot, 0), p);
/* Create a new MEM rtx to avoid clobbering MEM flags of old slots. */
slot = gen_rtx_MEM (mode, XEXP (p->slot, 0));
#ifdef PROMOTE_MODE
if (! dont_promote)
- mode = promote_mode (type, mode, &unsignedp, 0);
+ mode = promote_mode (type, mode, &unsignedp);
#endif
return gen_reg_rtx (mode);
}
}
\f
-/* Find the temp slot corresponding to the object at address X. */
-
-static struct temp_slot *
-find_temp_slot_from_address (rtx x)
-{
- struct temp_slot *p;
- rtx next;
- int i;
-
- for (i = max_slot_level (); i >= 0; i--)
- for (p = *temp_slots_at_level (i); p; p = p->next)
- {
- if (XEXP (p->slot, 0) == x
- || p->address == x
- || (GET_CODE (x) == PLUS
- && XEXP (x, 0) == virtual_stack_vars_rtx
- && GET_CODE (XEXP (x, 1)) == CONST_INT
- && INTVAL (XEXP (x, 1)) >= p->base_offset
- && INTVAL (XEXP (x, 1)) < p->base_offset + p->full_size))
- return p;
-
- else if (p->address != 0 && GET_CODE (p->address) == EXPR_LIST)
- for (next = p->address; next; next = XEXP (next, 1))
- if (XEXP (next, 0) == x)
- return p;
- }
-
- /* If we have a sum involving a register, see if it points to a temp
- slot. */
- if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
- && (p = find_temp_slot_from_address (XEXP (x, 0))) != 0)
- return p;
- else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 1))
- && (p = find_temp_slot_from_address (XEXP (x, 1))) != 0)
- return p;
-
- return 0;
-}
-
/* Indicate that NEW_RTX is an alternate way of referring to the temp
slot that previously was known by OLD_RTX. */
}
/* Otherwise add an alias for the temp's address. */
- else if (p->address == 0)
- p->address = new_rtx;
- else
- {
- if (GET_CODE (p->address) != EXPR_LIST)
- p->address = gen_rtx_EXPR_LIST (VOIDmode, p->address, NULL_RTX);
-
- p->address = gen_rtx_EXPR_LIST (VOIDmode, new_rtx, p->address);
- }
+ insert_temp_slot_address (new_rtx, p);
}
/* If X could be a reference to a temporary slot, mark the fact that its
make_slot_available (p);
}
+ remove_unused_temp_slot_addresses ();
combine_temp_slots ();
}
make_slot_available (p);
}
+ remove_unused_temp_slot_addresses ();
combine_temp_slots ();
temp_slot_level--;
avail_temp_slots = 0;
used_temp_slots = 0;
temp_slot_level = 0;
+
+ /* Set up the table to map addresses to temp slots. */
+ if (! temp_slot_address_table)
+ temp_slot_address_table = htab_create_ggc (32,
+ temp_slot_address_hash,
+ temp_slot_address_eq,
+ NULL);
+ else
+ htab_empty (temp_slot_address_table);
}
\f
/* These routines are responsible for converting virtual register references
&& recog_data.n_operands >= 3
&& recog_data.operand_loc[1] == &XEXP (SET_SRC (set), 0)
&& recog_data.operand_loc[2] == &XEXP (SET_SRC (set), 1)
- && GET_CODE (recog_data.operand[2]) == CONST_INT
+ && CONST_INT_P (recog_data.operand[2])
&& (new_rtx = instantiate_new_reg (recog_data.operand[1], &offset)))
{
offset += INTVAL (recog_data.operand[2]);
|| GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
|| GET_CODE (PATTERN (insn)) == ASM_INPUT)
continue;
-
- instantiate_virtual_regs_in_insn (insn);
+ else if (DEBUG_INSN_P (insn))
+ for_each_rtx (&INSN_VAR_LOCATION (insn),
+ instantiate_virtual_regs_in_rtx, NULL);
+ else
+ instantiate_virtual_regs_in_insn (insn);
if (INSN_DELETED_P (insn))
continue;
for_each_rtx (®_NOTES (insn), instantiate_virtual_regs_in_rtx, NULL);
/* Instantiate any virtual registers in CALL_INSN_FUNCTION_USAGE. */
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
for_each_rtx (&CALL_INSN_FUNCTION_USAGE (insn),
instantiate_virtual_regs_in_rtx, NULL);
}
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
- 0, /* tv_id */
+ TV_NONE, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
{
case CALL_EXPR:
fndecl = get_callee_fndecl (fntype);
- fntype = fndecl ? TREE_TYPE (fndecl) : 0;
+ fntype = (fndecl
+ ? TREE_TYPE (fndecl)
+ : TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (fntype))));
break;
case FUNCTION_DECL:
fndecl = fntype;
if (DECL_IGNORED_P (decl))
return true;
- return (optimize || DECL_REGISTER (decl));
+ if (optimize)
+ return true;
+
+ if (!DECL_REGISTER (decl))
+ return false;
+
+ switch (TREE_CODE (TREE_TYPE (decl)))
+ {
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ /* When not optimizing, disregard register keyword for variables with
+ types containing methods, otherwise the methods won't be callable
+ from the debugger. */
+ if (TYPE_METHODS (TREE_TYPE (decl)))
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ return true;
}
/* Return true if TYPE should be passed by invisible reference. */
layout_decl (p, 0);
/* Build a second synthetic decl. */
- decl = build_decl (PARM_DECL, NULL_TREE, subtype);
+ decl = build_decl (EXPR_LOCATION (p),
+ PARM_DECL, NULL_TREE, subtype);
DECL_ARG_TYPE (decl) = DECL_ARG_TYPE (p);
DECL_ARTIFICIAL (decl) = addressable;
DECL_IGNORED_P (decl) = addressable;
tree type = build_pointer_type (TREE_TYPE (fntype));
tree decl;
- decl = build_decl (PARM_DECL, NULL_TREE, type);
+ decl = build_decl (DECL_SOURCE_LOCATION (fndecl),
+ PARM_DECL, NULL_TREE, type);
DECL_ARG_TYPE (decl) = type;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 1;
{
tree nominal_type, passed_type;
enum machine_mode nominal_mode, passed_mode, promoted_mode;
+ int unsignedp;
memset (data, 0, sizeof (*data));
}
/* Find mode as it is passed by the ABI. */
- promoted_mode = passed_mode;
- if (targetm.calls.promote_function_args (TREE_TYPE (current_function_decl)))
- {
- int unsignedp = TYPE_UNSIGNED (passed_type);
- promoted_mode = promote_mode (passed_type, promoted_mode,
- &unsignedp, 1);
- }
+ unsignedp = TYPE_UNSIGNED (passed_type);
+ promoted_mode = promote_function_mode (passed_type, passed_mode, &unsignedp,
+ TREE_TYPE (current_function_decl), 0);
egress:
data->nominal_type = nominal_type;
stack_parm = gen_rtx_PLUS (Pmode, stack_parm, offset_rtx);
stack_parm = gen_rtx_MEM (data->promoted_mode, stack_parm);
- set_mem_attributes (stack_parm, parm, 1);
- /* set_mem_attributes could set MEM_SIZE to the passed mode's size,
- while promoted mode's size is needed. */
- if (data->promoted_mode != BLKmode
- && data->promoted_mode != DECL_MODE (parm))
- set_mem_size (stack_parm, GEN_INT (GET_MODE_SIZE (data->promoted_mode)));
+ if (!data->passed_pointer)
+ {
+ set_mem_attributes (stack_parm, parm, 1);
+ /* set_mem_attributes could set MEM_SIZE to the passed mode's size,
+ while promoted mode's size is needed. */
+ if (data->promoted_mode != BLKmode
+ && data->promoted_mode != DECL_MODE (parm))
+ {
+ set_mem_size (stack_parm,
+ GEN_INT (GET_MODE_SIZE (data->promoted_mode)));
+ if (MEM_EXPR (stack_parm) && MEM_OFFSET (stack_parm))
+ {
+ int offset = subreg_lowpart_offset (DECL_MODE (parm),
+ data->promoted_mode);
+ if (offset)
+ set_mem_offset (stack_parm,
+ plus_constant (MEM_OFFSET (stack_parm),
+ -offset));
+ }
+ }
+ }
boundary = data->locate.boundary;
align = BITS_PER_UNIT;
up with a guess at the alignment based on OFFSET_RTX. */
if (data->locate.where_pad != downward || data->entry_parm)
align = boundary;
- else if (GET_CODE (offset_rtx) == CONST_INT)
+ else if (CONST_INT_P (offset_rtx))
{
align = INTVAL (offset_rtx) * BITS_PER_UNIT | boundary;
align = align & -align;
if (GET_CODE (entry_parm) == PARALLEL && GET_MODE (entry_parm) != BLKmode)
{
rtx parmreg = gen_reg_rtx (GET_MODE (entry_parm));
- emit_group_store (parmreg, entry_parm, NULL_TREE,
+ emit_group_store (parmreg, entry_parm, data->passed_type,
GET_MODE_SIZE (GET_MODE (entry_parm)));
entry_parm = parmreg;
}
bool did_conversion = false;
/* Store the parm in a pseudoregister during the function, but we may
- need to do it in a wider mode. */
-
- /* This is not really promoting for a call. However we need to be
- consistent with assign_parm_find_data_types and expand_expr_real_1. */
+ need to do it in a wider mode. Using 2 here makes the result
+ consistent with promote_decl_mode and thus expand_expr_real_1. */
promoted_nominal_mode
- = promote_mode (data->nominal_type, data->nominal_mode, &unsignedp, 1);
+ = promote_function_mode (data->nominal_type, data->nominal_mode, &unsignedp,
+ TREE_TYPE (current_function_decl), 2);
parmreg = gen_reg_rtx (promoted_nominal_mode);
assign_parm_remove_parallels (data);
- /* Copy the value into the register. */
+ /* Copy the value into the register, thus bridging between
+ assign_parm_find_data_types and expand_expr_real_1. */
if (data->nominal_mode != data->passed_mode
|| promoted_nominal_mode != data->promoted_mode)
{
TYPE_UNSIGNED (TREE_TYPE (parm)));
if (data->stack_parm)
- /* ??? This may need a big-endian conversion on sparc64. */
- data->stack_parm
- = adjust_address (data->stack_parm, data->nominal_mode, 0);
+ {
+ int offset = subreg_lowpart_offset (data->nominal_mode,
+ GET_MODE (data->stack_parm));
+ /* ??? This may need a big-endian conversion on sparc64. */
+ data->stack_parm
+ = adjust_address (data->stack_parm, data->nominal_mode, 0);
+ if (offset && MEM_OFFSET (data->stack_parm))
+ set_mem_offset (data->stack_parm,
+ plus_constant (MEM_OFFSET (data->stack_parm),
+ offset));
+ }
}
if (data->entry_parm != data->stack_parm)
if (data->stack_parm == 0)
{
+ int align = STACK_SLOT_ALIGNMENT (data->passed_type,
+ GET_MODE (data->entry_parm),
+ TYPE_ALIGN (data->passed_type));
data->stack_parm
= assign_stack_local (GET_MODE (data->entry_parm),
GET_MODE_SIZE (GET_MODE (data->entry_parm)),
- TYPE_ALIGN (data->passed_type));
+ align);
set_mem_attributes (data->stack_parm, parm, 1);
}
{
rtx rmem, imem;
HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (parm));
+ int align = STACK_SLOT_ALIGNMENT (TREE_TYPE (parm),
+ DECL_MODE (parm),
+ TYPE_ALIGN (TREE_TYPE (parm)));
/* split_complex_arg put the real and imag parts in
pseudos. Move them to memory. */
- tmp = assign_stack_local (DECL_MODE (parm), size,
- TYPE_ALIGN (TREE_TYPE (parm)));
+ tmp = assign_stack_local (DECL_MODE (parm), size, align);
set_mem_attributes (tmp, parm, 1);
rmem = adjust_address_nv (tmp, inner, 0);
imem = adjust_address_nv (tmp, inner, GET_MODE_SIZE (inner));
{
unsigned int align = FUNCTION_ARG_BOUNDARY (data.promoted_mode,
data.passed_type);
+ align = MINIMUM_ALIGNMENT (data.passed_type, data.promoted_mode,
+ align);
if (TYPE_ALIGN (data.nominal_type) > align)
- align = TYPE_ALIGN (data.passed_type);
+ align = MINIMUM_ALIGNMENT (data.nominal_type,
+ TYPE_MODE (data.nominal_type),
+ TYPE_ALIGN (data.nominal_type));
if (crtl->stack_alignment_estimated < align)
{
gcc_assert (!crtl->stack_realign_processed);
= (all.stack_args_size.var == 0 ? GEN_INT (-all.stack_args_size.constant)
: expand_expr (size_diffop (all.stack_args_size.var,
size_int (-all.stack_args_size.constant)),
- NULL_RTX, VOIDmode, 0));
+ NULL_RTX, VOIDmode, EXPAND_NORMAL));
#else
crtl->args.arg_offset_rtx = ARGS_SIZE_RTX (all.stack_args_size);
#endif
calling function side. */
if (crtl->stack_alignment_needed < boundary)
crtl->stack_alignment_needed = boundary;
- if (crtl->max_used_stack_slot_alignment < crtl->stack_alignment_needed)
- crtl->max_used_stack_slot_alignment = crtl->stack_alignment_needed;
if (crtl->preferred_stack_boundary < boundary)
crtl->preferred_stack_boundary = boundary;
OVERRIDE_ABI_FORMAT (fndecl);
#endif
+ invoke_set_current_function_hook (fndecl);
+
if (fndecl != NULL_TREE)
{
DECL_STRUCT_FUNCTION (fndecl) = cfun;
cfun->va_list_gpr_size = VA_LIST_MAX_GPR_SIZE;
cfun->va_list_fpr_size = VA_LIST_MAX_FPR_SIZE;
}
-
- invoke_set_current_function_hook (fndecl);
}
/* This is like allocate_struct_function, but pushes a new cfun for FNDECL
prepare_function_start (void)
{
gcc_assert (!crtl->emit.x_last_insn);
+ init_temp_slots ();
init_emit ();
init_varasm_status ();
init_expr ();
warning (OPT_Waggregate_return, "function returns an aggregate");
}
-/* Make sure all values used by the optimization passes have sane
- defaults. */
+/* Make sure all values used by the optimization passes have sane defaults. */
unsigned int
init_function_for_compilation (void)
{
reg_renumber = 0;
-
- /* No prologue/epilogue insns yet. Make sure that these vectors are
- empty. */
- gcc_assert (VEC_length (int, prologue) == 0);
- gcc_assert (VEC_length (int, epilogue) == 0);
- gcc_assert (VEC_length (int, sibcall_epilogue) == 0);
return 0;
}
{
{
RTL_PASS,
- NULL, /* name */
+ "*init_function", /* name */
NULL, /* gate */
init_function_for_compilation, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
- 0, /* tv_id */
+ TV_NONE, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
tree guard_decl = targetm.stack_protect_guard ();
rtx x, y;
- /* Avoid expand_expr here, because we don't want guard_decl pulled
- into registers unless absolutely necessary. And we know that
- crtl->stack_protect_guard is a local stack slot, so this skips
- all the fluff. */
- x = validize_mem (DECL_RTL (crtl->stack_protect_guard));
- y = validize_mem (DECL_RTL (guard_decl));
+ x = expand_normal (crtl->stack_protect_guard);
+ y = expand_normal (guard_decl);
/* Allow the target to copy from Y to X without leaking Y into a
register. */
rtx label = gen_label_rtx ();
rtx x, y, tmp;
- /* Avoid expand_expr here, because we don't want guard_decl pulled
- into registers unless absolutely necessary. And we know that
- crtl->stack_protect_guard is a local stack slot, so this skips
- all the fluff. */
- x = validize_mem (DECL_RTL (crtl->stack_protect_guard));
- y = validize_mem (DECL_RTL (guard_decl));
+ x = expand_normal (crtl->stack_protect_guard);
+ y = expand_normal (guard_decl);
/* Allow the target to compare Y with X without leaking either into
a register. */
if (cfun->static_chain_decl)
{
tree parm = cfun->static_chain_decl;
- rtx local = gen_reg_rtx (Pmode);
+ rtx local, chain, insn;
+
+ local = gen_reg_rtx (Pmode);
+ chain = targetm.calls.static_chain (current_function_decl, true);
- set_decl_incoming_rtl (parm, static_chain_incoming_rtx, false);
+ set_decl_incoming_rtl (parm, chain, false);
SET_DECL_RTL (parm, local);
mark_reg_pointer (local, TYPE_ALIGN (TREE_TYPE (TREE_TYPE (parm))));
- emit_move_insn (local, static_chain_incoming_rtx);
+ insn = emit_move_insn (local, chain);
+
+ /* Mark the register as eliminable, similar to parameters. */
+ if (MEM_P (chain)
+ && reg_mentioned_p (arg_pointer_rtx, XEXP (chain, 0)))
+ set_unique_reg_note (insn, REG_EQUIV, chain);
}
/* If the function receives a non-local goto, then store the
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
if (CALL_P (insn))
{
+ rtx max_frame_size = GEN_INT (STACK_CHECK_MAX_FRAME_SIZE);
start_sequence ();
- probe_stack_range (STACK_OLD_CHECK_PROTECT,
- GEN_INT (STACK_CHECK_MAX_FRAME_SIZE));
+ if (STACK_CHECK_MOVING_SP)
+ anti_adjust_stack_and_probe (max_frame_size, true);
+ else
+ probe_stack_range (STACK_OLD_CHECK_PROTECT, max_frame_size);
seq = get_insns ();
end_sequence ();
emit_insn_before (seq, stack_check_probe_note);
else if (GET_MODE (real_decl_rtl) != GET_MODE (decl_rtl))
{
int unsignedp = TYPE_UNSIGNED (TREE_TYPE (decl_result));
-
- if (targetm.calls.promote_function_return (TREE_TYPE (current_function_decl)))
- promote_mode (TREE_TYPE (decl_result), GET_MODE (decl_rtl),
- &unsignedp, 1);
+ promote_function_mode (TREE_TYPE (decl_result),
+ GET_MODE (decl_rtl), &unsignedp,
+ TREE_TYPE (current_function_decl), 1);
convert_move (real_decl_rtl, decl_rtl, unsignedp);
}
start_sequence ();
clobber_return_register ();
- expand_naked_return ();
seq = get_insns ();
end_sequence ();
}
/* Output the label for the naked return from the function. */
- emit_label (naked_return_label);
+ if (naked_return_label)
+ emit_label (naked_return_label);
/* @@@ This is a kludge. We want to ensure that instructions that
may trap are not moved into the epilogue by scheduling, because
return ret;
}
\f
-/* Extend a vector that records the INSN_UIDs of INSNS
- (a list of one or more insns). */
+/* Add a list of INSNS to the hash HASHP, possibly allocating HASHP
+ for the first time. */
static void
-record_insns (rtx insns, VEC(int,heap) **vecp)
+record_insns (rtx insns, rtx end, htab_t *hashp)
{
rtx tmp;
+ htab_t hash = *hashp;
+
+ if (hash == NULL)
+ *hashp = hash
+ = htab_create_ggc (17, htab_hash_pointer, htab_eq_pointer, NULL);
- for (tmp = insns; tmp != NULL_RTX; tmp = NEXT_INSN (tmp))
- VEC_safe_push (int, heap, *vecp, INSN_UID (tmp));
+ for (tmp = insns; tmp != end; tmp = NEXT_INSN (tmp))
+ {
+ void **slot = htab_find_slot (hash, tmp, INSERT);
+ gcc_assert (*slot == NULL);
+ *slot = tmp;
+ }
+}
+
+/* INSN has been duplicated as COPY, as part of duping a basic block.
+ If INSN is an epilogue insn, then record COPY as epilogue as well. */
+
+void
+maybe_copy_epilogue_insn (rtx insn, rtx copy)
+{
+ void **slot;
+
+ if (epilogue_insn_hash == NULL
+ || htab_find (epilogue_insn_hash, insn) == NULL)
+ return;
+
+ slot = htab_find_slot (epilogue_insn_hash, copy, INSERT);
+ gcc_assert (*slot == NULL);
+ *slot = copy;
}
/* Set the locator of the insn chain starting at INSN to LOC. */
}
}
-/* Determine how many INSN_UIDs in VEC are part of INSN. Because we can
- be running after reorg, SEQUENCE rtl is possible. */
+/* Determine if any INSNs in HASH are, or are part of, INSN. Because
+ we can be running after reorg, SEQUENCE rtl is possible. */
-static int
-contains (const_rtx insn, VEC(int,heap) **vec)
+static bool
+contains (const_rtx insn, htab_t hash)
{
- int i, j;
+ if (hash == NULL)
+ return false;
- if (NONJUMP_INSN_P (insn)
- && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
{
- int count = 0;
+ int i;
for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
- for (j = VEC_length (int, *vec) - 1; j >= 0; --j)
- if (INSN_UID (XVECEXP (PATTERN (insn), 0, i))
- == VEC_index (int, *vec, j))
- count++;
- return count;
- }
- else
- {
- for (j = VEC_length (int, *vec) - 1; j >= 0; --j)
- if (INSN_UID (insn) == VEC_index (int, *vec, j))
- return 1;
+ if (htab_find (hash, XVECEXP (PATTERN (insn), 0, i)))
+ return true;
+ return false;
}
- return 0;
+
+ return htab_find (hash, insn) != NULL;
}
int
prologue_epilogue_contains (const_rtx insn)
{
- if (contains (insn, &prologue))
+ if (contains (insn, prologue_insn_hash))
return 1;
- if (contains (insn, &epilogue))
+ if (contains (insn, epilogue_insn_hash))
return 1;
return 0;
}
-int
-sibcall_epilogue_contains (const_rtx insn)
-{
- if (sibcall_epilogue)
- return contains (insn, &sibcall_epilogue);
- return 0;
-}
-
#ifdef HAVE_return
/* Insert gen_return at the end of block BB. This also means updating
block_for_insn appropriately. */
emit_use (hard_frame_pointer_rtx);
/* Retain a map of the prologue insns. */
- record_insns (seq, &prologue);
+ record_insns (seq, NULL, &prologue_insn_hash);
emit_note (NOTE_INSN_PROLOGUE_END);
#ifndef PROFILE_BEFORE_PROLOGUE
}
}
#endif
+
+ /* A small fib -- epilogue is not yet completed, but we wish to re-use
+ this marker for the splits of EH_RETURN patterns, and nothing else
+ uses the flag in the meantime. */
+ epilogue_completed = 1;
+
+#ifdef HAVE_eh_return
+ /* Find non-fallthru edges that end with EH_RETURN instructions. On
+ some targets, these get split to a special version of the epilogue
+ code. In order to be able to properly annotate these with unwind
+ info, try to split them now. If we get a valid split, drop an
+ EPILOGUE_BEG note and mark the insns as epilogue insns. */
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ {
+ rtx prev, last, trial;
+
+ if (e->flags & EDGE_FALLTHRU)
+ continue;
+ last = BB_END (e->src);
+ if (!eh_returnjump_p (last))
+ continue;
+
+ prev = PREV_INSN (last);
+ trial = try_split (PATTERN (last), last, 1);
+ if (trial == last)
+ continue;
+
+ record_insns (NEXT_INSN (prev), NEXT_INSN (trial), &epilogue_insn_hash);
+ emit_note_after (NOTE_INSN_EPILOGUE_BEG, prev);
+ }
+#endif
+
/* Find the edge that falls through to EXIT. Other edges may exist
due to RETURN instructions, but those don't need epilogues.
There really shouldn't be a mixture -- either all should have
emit_jump_insn (seq);
/* Retain a map of the epilogue insns. */
- record_insns (seq, &epilogue);
+ record_insns (seq, NULL, &epilogue_insn_hash);
set_insn_locators (seq, epilogue_locator);
seq = get_insns ();
}
start_sequence ();
+ emit_note (NOTE_INSN_EPILOGUE_BEG);
emit_insn (gen_sibcall_epilogue ());
seq = get_insns ();
end_sequence ();
/* Retain a map of the epilogue insns. Used in life analysis to
avoid getting rid of sibcall epilogue insns. Do this before we
actually emit the sequence. */
- record_insns (seq, &sibcall_epilogue);
+ record_insns (seq, NULL, &epilogue_insn_hash);
set_insn_locators (seq, epilogue_locator);
emit_insn_before (seq, insn);
df_update_entry_exit_and_calls ();
}
-/* Reposition the prologue-end and epilogue-begin notes after instruction
- scheduling and delayed branch scheduling. */
+/* Reposition the prologue-end and epilogue-begin notes after
+ instruction scheduling. */
void
reposition_prologue_and_epilogue_notes (void)
{
-#if defined (HAVE_prologue) || defined (HAVE_epilogue)
- rtx insn, last, note;
- int len;
-
- if ((len = VEC_length (int, prologue)) > 0)
+#if defined (HAVE_prologue) || defined (HAVE_epilogue) \
+ || defined (HAVE_sibcall_epilogue)
+ /* Since the hash table is created on demand, the fact that it is
+ non-null is a signal that it is non-empty. */
+ if (prologue_insn_hash != NULL)
{
- last = 0, note = 0;
-
- /* Scan from the beginning until we reach the last prologue insn.
- We apparently can't depend on basic_block_{head,end} after
- reorg has run. */
+ size_t len = htab_elements (prologue_insn_hash);
+ rtx insn, last = NULL, note = NULL;
+
+ /* Scan from the beginning until we reach the last prologue insn. */
+ /* ??? While we do have the CFG intact, there are two problems:
+ (1) The prologue can contain loops (typically probing the stack),
+ which means that the end of the prologue isn't in the first bb.
+ (2) Sometimes the PROLOGUE_END note gets pushed into the next bb. */
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
{
if (NOTE_P (insn))
if (NOTE_KIND (insn) == NOTE_INSN_PROLOGUE_END)
note = insn;
}
- else if (contains (insn, &prologue))
+ else if (contains (insn, prologue_insn_hash))
{
last = insn;
if (--len == 0)
if (last)
{
- /* Find the prologue-end note if we haven't already, and
- move it to just after the last prologue insn. */
- if (note == 0)
+ if (note == NULL)
{
- for (note = last; (note = NEXT_INSN (note));)
- if (NOTE_P (note)
- && NOTE_KIND (note) == NOTE_INSN_PROLOGUE_END)
- break;
+ /* Scan forward looking for the PROLOGUE_END note. It should
+ be right at the beginning of the block, possibly with other
+ insn notes that got moved there. */
+ for (note = NEXT_INSN (last); ; note = NEXT_INSN (note))
+ {
+ if (NOTE_P (note)
+ && NOTE_KIND (note) == NOTE_INSN_PROLOGUE_END)
+ break;
+ }
}
/* Avoid placing note between CODE_LABEL and BASIC_BLOCK note. */
}
}
- if ((len = VEC_length (int, epilogue)) > 0)
+ if (epilogue_insn_hash != NULL)
{
- last = 0, note = 0;
+ edge_iterator ei;
+ edge e;
- /* Scan from the end until we reach the first epilogue insn.
- We apparently can't depend on basic_block_{head,end} after
- reorg has run. */
- for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
{
- if (NOTE_P (insn))
- {
- if (NOTE_KIND (insn) == NOTE_INSN_EPILOGUE_BEG)
- note = insn;
- }
- else if (contains (insn, &epilogue))
+ rtx insn, first = NULL, note = NULL;
+ basic_block bb = e->src;
+
+ /* Scan from the beginning until we reach the first epilogue insn. */
+ FOR_BB_INSNS (bb, insn)
{
- last = insn;
- if (--len == 0)
- break;
+ if (NOTE_P (insn))
+ {
+ if (NOTE_KIND (insn) == NOTE_INSN_EPILOGUE_BEG)
+ {
+ note = insn;
+ if (first != NULL)
+ break;
+ }
+ }
+ else if (first == NULL && contains (insn, epilogue_insn_hash))
+ {
+ first = insn;
+ if (note != NULL)
+ break;
+ }
}
- }
- if (last)
- {
- /* Find the epilogue-begin note if we haven't already, and
- move it to just before the first epilogue insn. */
- if (note == 0)
+ if (note)
{
- for (note = insn; (note = PREV_INSN (note));)
- if (NOTE_P (note)
- && NOTE_KIND (note) == NOTE_INSN_EPILOGUE_BEG)
- break;
+ /* If the function has a single basic block, and no real
+ epilogue insns (e.g. sibcall with no cleanup), the
+ epilogue note can get scheduled before the prologue
+ note. If we have frame related prologue insns, having
+ them scanned during the epilogue will result in a crash.
+ In this case re-order the epilogue note to just before
+ the last insn in the block. */
+ if (first == NULL)
+ first = BB_END (bb);
+
+ if (PREV_INSN (first) != note)
+ reorder_insns (note, note, PREV_INSN (first));
}
-
- if (PREV_INSN (last) != note)
- reorder_insns (note, note, PREV_INSN (last));
}
}
#endif /* HAVE_prologue or HAVE_epilogue */
const char *
current_function_name (void)
{
+ if (cfun == NULL)
+ return "<none>";
return lang_hooks.decl_printable_name (cfun->decl, 2);
}
-
-/* Returns the raw (mangled) name of the current function. */
-const char *
-current_function_assembler_name (void)
-{
- return IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (cfun->decl));
-}
\f
static unsigned int
}
/* Insert a TYPE into the used types hash table of CFUN. */
+
static void
used_types_insert_helper (tree type, struct function *func)
{
t = TREE_TYPE (t);
t = TYPE_MAIN_VARIANT (t);
if (debug_info_level > DINFO_LEVEL_NONE)
- used_types_insert_helper (t, cfun);
+ {
+ if (cfun)
+ used_types_insert_helper (t, cfun);
+ else
+ /* So this might be a type referenced by a global variable.
+ Record that type so that we can later decide to emit its debug
+ information. */
+ types_used_by_cur_var_decl =
+ tree_cons (t, NULL, types_used_by_cur_var_decl);
+
+ }
+}
+
+/* Helper to Hash a struct types_used_by_vars_entry. */
+
+static hashval_t
+hash_types_used_by_vars_entry (const struct types_used_by_vars_entry *entry)
+{
+ gcc_assert (entry && entry->var_decl && entry->type);
+
+ return iterative_hash_object (entry->type,
+ iterative_hash_object (entry->var_decl, 0));
+}
+
+/* Hash function of the types_used_by_vars_entry hash table. */
+
+hashval_t
+types_used_by_vars_do_hash (const void *x)
+{
+ const struct types_used_by_vars_entry *entry =
+ (const struct types_used_by_vars_entry *) x;
+
+ return hash_types_used_by_vars_entry (entry);
+}
+
+/*Equality function of the types_used_by_vars_entry hash table. */
+
+int
+types_used_by_vars_eq (const void *x1, const void *x2)
+{
+ const struct types_used_by_vars_entry *e1 =
+ (const struct types_used_by_vars_entry *) x1;
+ const struct types_used_by_vars_entry *e2 =
+ (const struct types_used_by_vars_entry *)x2;
+
+ return (e1->var_decl == e2->var_decl && e1->type == e2->type);
+}
+
+/* Inserts an entry into the types_used_by_vars_hash hash table. */
+
+void
+types_used_by_var_decl_insert (tree type, tree var_decl)
+{
+ if (type != NULL && var_decl != NULL)
+ {
+ void **slot;
+ struct types_used_by_vars_entry e;
+ e.var_decl = var_decl;
+ e.type = type;
+ if (types_used_by_vars_hash == NULL)
+ types_used_by_vars_hash =
+ htab_create_ggc (37, types_used_by_vars_do_hash,
+ types_used_by_vars_eq, NULL);
+ slot = htab_find_slot_with_hash (types_used_by_vars_hash, &e,
+ hash_types_used_by_vars_entry (&e), INSERT);
+ if (*slot == NULL)
+ {
+ struct types_used_by_vars_entry *entry;
+ entry = (struct types_used_by_vars_entry*) ggc_alloc
+ (sizeof (struct types_used_by_vars_entry));
+ entry->type = type;
+ entry->var_decl = var_decl;
+ *slot = entry;
+ }
+ }
}
struct rtl_opt_pass pass_leaf_regs =
{
{
RTL_PASS,
- NULL, /* name */
+ "*leaf_regs", /* name */
NULL, /* gate */
rest_of_handle_check_leaf_regs, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
- 0, /* tv_id */
+ TV_NONE, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
char *end;
int match, j;
+ if (*constraint == '%')
+ constraint++;
+
match = strtoul (constraint, &end, 10);
if (end == constraint)
continue;
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
- 0, /* tv_id */
+ TV_NONE, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */