/* Subroutines for insn-output.c for Tensilica's Xtensa architecture.
- Copyright 2001,2002,2003 Free Software Foundation, Inc.
+ Copyright 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
This file is part of GCC.
#include "target.h"
#include "target-def.h"
#include "langhooks.h"
+#include "tree-gimple.h"
+
/* Enumeration for all of the relational tests, so that we can build
arrays indexed by the test type, and not worry about the order
struct machine_function GTY(())
{
int accesses_prev_frame;
- bool incoming_a7_copied;
+ bool need_a7_copy;
+ bool vararg_a7;
+ rtx set_frame_ptr_insn;
};
/* Vector, indexed by hard register number, which contains 1 for a
static rtx fixup_subreg_mem (rtx);
static enum machine_mode xtensa_find_mode_for_size (unsigned);
static struct machine_function * xtensa_init_machine_status (void);
+static bool xtensa_return_in_msb (tree);
static void printx (FILE *, signed int);
static void xtensa_function_epilogue (FILE *, HOST_WIDE_INT);
+static rtx xtensa_builtin_saveregs (void);
static unsigned int xtensa_multibss_section_type_flags (tree, const char *,
int) ATTRIBUTE_UNUSED;
static void xtensa_select_rtx_section (enum machine_mode, rtx,
unsigned HOST_WIDE_INT);
static bool xtensa_rtx_costs (rtx, int, int, int *);
+static tree xtensa_build_builtin_va_list (void);
+static bool xtensa_return_in_memory (tree, tree);
+static tree xtensa_gimplify_va_arg_expr (tree, tree, tree *, tree *);
-static int current_function_arg_words;
static const int reg_nonleaf_alloc_order[FIRST_PSEUDO_REGISTER] =
REG_ALLOC_ORDER;
\f
#undef TARGET_ADDRESS_COST
#define TARGET_ADDRESS_COST hook_int_rtx_0
+#undef TARGET_BUILD_BUILTIN_VA_LIST
+#define TARGET_BUILD_BUILTIN_VA_LIST xtensa_build_builtin_va_list
+
+#undef TARGET_PROMOTE_FUNCTION_ARGS
+#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
+#undef TARGET_PROMOTE_FUNCTION_RETURN
+#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
+#undef TARGET_PROMOTE_PROTOTYPES
+#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
+
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY xtensa_return_in_memory
+#undef TARGET_SPLIT_COMPLEX_ARG
+#define TARGET_SPLIT_COMPLEX_ARG hook_bool_tree_true
+#undef TARGET_MUST_PASS_IN_STACK
+#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
+
+#undef TARGET_EXPAND_BUILTIN_SAVEREGS
+#define TARGET_EXPAND_BUILTIN_SAVEREGS xtensa_builtin_saveregs
+#undef TARGET_GIMPLIFY_VA_ARG_EXPR
+#define TARGET_GIMPLIFY_VA_ARG_EXPR xtensa_gimplify_va_arg_expr
+
+#undef TARGET_RETURN_IN_MSB
+#define TARGET_RETURN_IN_MSB xtensa_return_in_msb
+
+#undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
+#define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE hook_int_void_1
+
struct gcc_target targetm = TARGET_INITIALIZER;
\f
{
tree callee, callee_sec, caller_sec;
- if (GET_CODE (op) != SYMBOL_REF || !SYMBOL_REF_LOCAL_P (op))
+ if (GET_CODE (op) != SYMBOL_REF
+ || !SYMBOL_REF_LOCAL_P (op) || SYMBOL_REF_EXTERNAL_P (op))
return FALSE;
/* Don't attempt a direct call if the callee is known to be in
case SImode:
if (TARGET_CONST16)
return CONSTANT_P (op);
- /* fall through */
+ /* Fall through. */
case HImode:
case QImode:
- /* Accept CONSTANT_P_RTX, since it will be gone by CSE1 and
- result in 0/1. */
- if (GET_CODE (op) == CONSTANT_P_RTX)
- return TRUE;
-
if (GET_CODE (op) == CONST_INT && xtensa_simm12b (INTVAL (op)))
return TRUE;
break;
{
rtx offset;
- /* only handle (PLUS (SYM, OFFSET)) form */
+ /* Only handle (PLUS (SYM, OFFSET)) form. */
addr = XEXP (addr, 0);
if (GET_CODE (addr) != PLUS)
return FALSE;
- /* make sure the address is word aligned */
+ /* Make sure the address is word aligned. */
offset = XEXP (addr, 1);
if ((GET_CODE (offset) != CONST_INT)
|| ((INTVAL (offset) & 3) != 0))
rtx temp = gen_reg_rtx (SImode);
rtx shift = GEN_INT (BITS_PER_WORD - GET_MODE_BITSIZE (GET_MODE (src)));
- /* generate paradoxical subregs as needed so that the modes match */
+ /* Generate paradoxical subregs as needed so that the modes match. */
src = simplify_gen_subreg (SImode, src, GET_MODE (src), 0);
dst = simplify_gen_subreg (SImode, dst, GET_MODE (dst), 0);
cmp1 = temp;
}
- return gen_rtx (p_info->test_code, VOIDmode, cmp0, cmp1);
+ return gen_rtx_fmt_ee (p_info->test_code, VOIDmode, cmp0, cmp1);
}
case LT: reverse_regs = 0; invert = 0; gen_fn = gen_slt_sf; break;
case GE: reverse_regs = 1; invert = 0; gen_fn = gen_sle_sf; break;
default:
- fatal_insn ("bad test", gen_rtx (test_code, VOIDmode, cmp0, cmp1));
+ fatal_insn ("bad test", gen_rtx_fmt_ee (test_code, VOIDmode, cmp0, cmp1));
reverse_regs = 0; invert = 0; gen_fn = 0; /* avoid compiler warnings */
}
brtmp = gen_rtx_REG (CCmode, FPCC_REGNUM);
emit_insn (gen_fn (brtmp, cmp0, cmp1));
- return gen_rtx (invert ? EQ : NE, VOIDmode, brtmp, const0_rtx);
+ return gen_rtx_fmt_ee (invert ? EQ : NE, VOIDmode, brtmp, const0_rtx);
}
{
case CMP_DF:
default:
- fatal_insn ("bad test", gen_rtx (test_code, VOIDmode, cmp0, cmp1));
+ fatal_insn ("bad test", gen_rtx_fmt_ee (test_code, VOIDmode, cmp0, cmp1));
case CMP_SI:
invert = FALSE;
case CMP_SF:
if (!TARGET_HARD_FLOAT)
- fatal_insn ("bad test", gen_rtx (test_code, VOIDmode, cmp0, cmp1));
+ fatal_insn ("bad test", gen_rtx_fmt_ee (test_code, VOIDmode, cmp0, cmp1));
invert = FALSE;
cmp = gen_float_relational (test_code, cmp0, cmp1);
break;
code = GE;
op1 = const0_rtx;
}
- cmp = gen_rtx (code, VOIDmode, cc0_rtx, const0_rtx);
+ cmp = gen_rtx_fmt_ee (code, VOIDmode, cc0_rtx, const0_rtx);
if (boolean_operator (cmp, VOIDmode))
{
- /* swap the operands to make const0 second */
+ /* Swap the operands to make const0 second. */
if (op0 == const0_rtx)
{
op0 = op1;
op1 = const0_rtx;
}
- /* if not comparing against zero, emit a comparison (subtract) */
+ /* If not comparing against zero, emit a comparison (subtract). */
if (op1 != const0_rtx)
{
op0 = expand_binop (SImode, sub_optab, op0, op1,
}
else if (branch_operator (cmp, VOIDmode))
{
- /* swap the operands to make const0 second */
+ /* Swap the operands to make const0 second. */
if (op0 == const0_rtx)
{
op0 = op1;
else
return 0;
- return gen_rtx (code, VOIDmode, op0, op1);
+ return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
}
if (TARGET_HARD_FLOAT && (branch_type == CMP_SF))
xtensa_emit_move_sequence (rtx *operands, enum machine_mode mode)
{
if (CONSTANT_P (operands[1])
- && GET_CODE (operands[1]) != CONSTANT_P_RTX
&& (GET_CODE (operands[1]) != CONST_INT
|| !xtensa_simm12b (INTVAL (operands[1]))))
{
}
}
- if (!(reload_in_progress | reload_completed))
- {
- if (!xtensa_valid_move (mode, operands))
- operands[1] = force_reg (mode, operands[1]);
+ if (!(reload_in_progress | reload_completed)
+ && !xtensa_valid_move (mode, operands))
+ operands[1] = force_reg (mode, operands[1]);
- if (xtensa_copy_incoming_a7 (operands, mode))
- return 1;
- }
+ operands[1] = xtensa_copy_incoming_a7 (operands[1]);
/* During reload we don't want to emit (subreg:X (mem:Y)) since that
instruction won't be recognized after reload, so we remove the
}
-/* Check if this move is copying an incoming argument in a7. If so,
- emit the move, followed by the special "set_frame_ptr"
- unspec_volatile insn, at the very beginning of the function. This
- is necessary because the register allocator will ignore conflicts
- with a7 and may assign some other pseudo to a7. If that pseudo was
- assigned prior to this move, it would clobber the incoming argument
- in a7. By copying the argument out of a7 as the very first thing,
- and then immediately following that with an unspec_volatile to keep
- the scheduler away, we should avoid any problems. */
+/* Check if an incoming argument in a7 is expected to be used soon and
+ if OPND is a register or register pair that includes a7. If so,
+ create a new pseudo and copy a7 into that pseudo at the very
+ beginning of the function, followed by the special "set_frame_ptr"
+ unspec_volatile insn. The return value is either the original
+ operand, if it is not a7, or the new pseudo containing a copy of
+ the incoming argument. This is necessary because the register
+ allocator will ignore conflicts with a7 and may either assign some
+ other pseudo to a7 or use a7 as the hard_frame_pointer, clobbering
+ the incoming argument in a7. By copying the argument out of a7 as
+ the very first thing, and then immediately following that with an
+ unspec_volatile to keep the scheduler away, we should avoid any
+ problems. Putting the set_frame_ptr insn at the beginning, with
+ only the a7 copy before it, also makes it easier for the prologue
+ expander to initialize the frame pointer after the a7 copy and to
+ fix up the a7 copy to use the stack pointer instead of the frame
+ pointer. */
-bool
-xtensa_copy_incoming_a7 (rtx *operands, enum machine_mode mode)
+rtx
+xtensa_copy_incoming_a7 (rtx opnd)
{
- if (a7_overlap_mentioned_p (operands[1])
- && !cfun->machine->incoming_a7_copied)
+ rtx entry_insns = 0;
+ rtx reg, tmp;
+ enum machine_mode mode;
+
+ if (!cfun->machine->need_a7_copy)
+ return opnd;
+
+ /* This function should never be called again once a7 has been copied. */
+ if (cfun->machine->set_frame_ptr_insn)
+ abort ();
+
+ mode = GET_MODE (opnd);
+
+ /* The operand using a7 may come in a later instruction, so just return
+ the original operand if it doesn't use a7. */
+ reg = opnd;
+ if (GET_CODE (reg) == SUBREG)
{
- rtx mov;
- switch (mode)
- {
- case DFmode:
- mov = gen_movdf_internal (operands[0], operands[1]);
- break;
- case SFmode:
- mov = gen_movsf_internal (operands[0], operands[1]);
- break;
- case DImode:
- mov = gen_movdi_internal (operands[0], operands[1]);
- break;
- case SImode:
- mov = gen_movsi_internal (operands[0], operands[1]);
- break;
- case HImode:
- mov = gen_movhi_internal (operands[0], operands[1]);
- break;
- case QImode:
- mov = gen_movqi_internal (operands[0], operands[1]);
- break;
- default:
- abort ();
- }
+ if (SUBREG_BYTE (reg) != 0)
+ abort ();
+ reg = SUBREG_REG (reg);
+ }
+ if (GET_CODE (reg) != REG
+ || REGNO (reg) > A7_REG
+ || REGNO (reg) + HARD_REGNO_NREGS (A7_REG, mode) <= A7_REG)
+ return opnd;
- /* Insert the instructions before any other argument copies.
- (The set_frame_ptr insn comes _after_ the move, so push it
- out first.) */
- push_topmost_sequence ();
- emit_insn_after (gen_set_frame_ptr (), get_insns ());
- emit_insn_after (mov, get_insns ());
- pop_topmost_sequence ();
+ /* 1-word args will always be in a7; 2-word args in a6/a7. */
+ if (REGNO (reg) + HARD_REGNO_NREGS (A7_REG, mode) - 1 != A7_REG)
+ abort ();
- /* Ideally the incoming argument in a7 would only be copied
- once, since propagating a7 into the body of a function
- will almost certainly lead to errors. However, there is
- at least one harmless case (in GCSE) where the original
- copy from a7 is changed to copy into a new pseudo. Thus,
- we use a flag to only do this special treatment for the
- first copy of a7. */
+ cfun->machine->need_a7_copy = false;
- cfun->machine->incoming_a7_copied = true;
+ /* Copy a7 to a new pseudo at the function entry. Use gen_raw_REG to
+ create the REG for a7 so that hard_frame_pointer_rtx is not used. */
- return 1;
+ push_to_sequence (entry_insns);
+ tmp = gen_reg_rtx (mode);
+
+ switch (mode)
+ {
+ case DFmode:
+ case DImode:
+ emit_insn (gen_movsi_internal (gen_rtx_SUBREG (SImode, tmp, 0),
+ gen_rtx_REG (SImode, A7_REG - 1)));
+ emit_insn (gen_movsi_internal (gen_rtx_SUBREG (SImode, tmp, 4),
+ gen_raw_REG (SImode, A7_REG)));
+ break;
+ case SFmode:
+ emit_insn (gen_movsf_internal (tmp, gen_raw_REG (mode, A7_REG)));
+ break;
+ case SImode:
+ emit_insn (gen_movsi_internal (tmp, gen_raw_REG (mode, A7_REG)));
+ break;
+ case HImode:
+ emit_insn (gen_movhi_internal (tmp, gen_raw_REG (mode, A7_REG)));
+ break;
+ case QImode:
+ emit_insn (gen_movqi_internal (tmp, gen_raw_REG (mode, A7_REG)));
+ break;
+ default:
+ abort ();
}
- return 0;
+ cfun->machine->set_frame_ptr_insn = emit_insn (gen_set_frame_ptr ());
+ entry_insns = get_insns ();
+ end_sequence ();
+
+ if (cfun->machine->vararg_a7)
+ {
+ /* This is called from within builtin_savereg, so we're already
+ inside a start_sequence that will be placed at the start of
+ the function. */
+ emit_insn (entry_insns);
+ }
+ else
+ {
+ /* Put entry_insns after the NOTE that starts the function. If
+ this is inside a start_sequence, make the outer-level insn
+ chain current, so the code is placed at the start of the
+ function. */
+ push_topmost_sequence ();
+ emit_insn_after (entry_insns, get_insns ());
+ pop_topmost_sequence ();
+ }
+
+ return tmp;
}
int align = XINT (operands[3], 0);
int num_pieces, move_ratio;
- /* If this is not a fixed size move, just call memcpy */
+ /* If this is not a fixed size move, just call memcpy. */
if (!optimize || (GET_CODE (operands[2]) != CONST_INT))
return 0;
- /* Anything to move? */
+ /* Anything to move? */
if (bytes <= 0)
return 1;
if (align > MOVE_MAX)
align = MOVE_MAX;
- /* decide whether to expand inline based on the optimization level */
+ /* Decide whether to expand inline based on the optimization level. */
move_ratio = 4;
if (optimize > 2)
move_ratio = LARGEST_MOVE_RATIO;
- num_pieces = (bytes / align) + (bytes % align); /* close enough anyway */
+ num_pieces = (bytes / align) + (bytes % align); /* Close enough anyway. */
if (num_pieces >= move_ratio)
return 0;
- /* make sure the memory addresses are valid */
+ /* Make sure the memory addresses are valid. */
operands[0] = validize_mem (dest);
operands[1] = validize_mem (src);
- emit_insn (gen_movstrsi_internal (operands[0], operands[1],
+ emit_insn (gen_movmemsi_internal (operands[0], operands[1],
operands[2], operands[3]));
return 1;
}
-/* Emit a sequence of instructions to implement a block move, trying
- to hide load delay slots as much as possible. Load N values into
- temporary registers, store those N values, and repeat until the
- complete block has been moved. N=delay_slots+1 */
+/* Emit a sequence of instructions to implement a block move, trying
+ to hide load delay slots as much as possible. Load N values into
+ temporary registers, store those N values, and repeat until the
+ complete block has been moved. N=delay_slots+1. */
struct meminsnbuf
{
if (bytes < item_size)
{
- /* find a smaller item_size which we can load & store */
+ /* Find a smaller item_size which we can load & store. */
item_size = bytes;
mode = xtensa_find_mode_for_size (item_size);
item_size = GET_MODE_SIZE (mode);
stname = xtensa_st_opcodes[(int) mode];
}
- /* record the load instruction opcode and operands */
+ /* Record the load instruction opcode and operands. */
addr = plus_constant (from_addr, offset);
mem = gen_rtx_MEM (mode, addr);
if (! memory_address_p (mode, addr))
ldinsns[n].operands[1] = mem;
sprintf (ldinsns[n].template, "%s\t%%0, %%1", ldname);
- /* record the store instruction opcode and operands */
+ /* Record the store instruction opcode and operands. */
addr = plus_constant (to_addr, offset);
mem = gen_rtx_MEM (mode, addr);
if (! memory_address_p (mode, addr))
bytes -= item_size;
}
- /* now output the loads followed by the stores */
+ /* Now output the loads followed by the stores. */
for (n = 0; n < chunk_size; n++)
output_asm_insn (ldinsns[n].template, ldinsns[n].operands);
for (n = 0; n < chunk_size; n++)
{
mode = VOIDmode;
- /* find mode closest to but not bigger than item_size */
+ /* Find mode closest to but not bigger than item_size. */
for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
if (GET_MODE_SIZE (tmode) <= item_size)
&& xtensa_st_opcodes[(int) mode])
break;
- /* cannot load & store this mode; try something smaller */
+ /* Cannot load & store this mode; try something smaller. */
item_size -= 1;
}
rtx goto_handler = operands[1];
rtx containing_fp = operands[3];
- /* generate a call to "__xtensa_nonlocal_goto" (in libgcc); the code
- is too big to generate in-line */
+ /* Generate a call to "__xtensa_nonlocal_goto" (in libgcc); the code
+ is too big to generate in-line. */
if (GET_CODE (containing_fp) != REG)
containing_fp = force_reg (Pmode, containing_fp);
a comment showing where the end of the loop is. However, if there is a
label or a branch at the end of the loop then we need to place a nop
there. If the loop ends with a label we need the nop so that branches
- targetting that label will target the nop (and thus remain in the loop),
- instead of targetting the instruction after the loop (and thus exiting
+ targeting that label will target the nop (and thus remain in the loop),
+ instead of targeting the instruction after the loop (and thus exiting
the loop). If the loop ends with a branch, we need the nop in case the
- branch is targetting a location inside the loop. When the branch
+ branch is targeting a location inside the loop. When the branch
executes it will cause the loop count to be decremented even if it is
taken (because it is the last instruction in the loop), so we need to
nop after the branch to prevent the loop count from being decremented
}
-/* Return the stabs register number to use for 'regno'. */
+/* Return the debugger register number to use for 'regno'. */
int
xtensa_dbx_register_number (int regno)
else if (FP_REG_P (regno))
{
regno -= FP_REG_FIRST;
- /* The current numbering convention is that TIE registers are
- numbered in libcc order beginning with 256. We can't guarantee
- that the FP registers will come first, so the following is just
- a guess. It seems like we should make a special case for FP
- registers and give them fixed numbers < 256. */
- first = 256;
+ first = 48;
}
else if (ACC_REG_P (regno))
{
- first = 0;
- regno = -1;
+ first = 0x200; /* Start of Xtensa special registers. */
+ regno = 16; /* ACCLO is special register 16. */
}
/* When optimizing, we sometimes get asked about pseudo-registers
/* Initialize CUMULATIVE_ARGS for a function. */
void
-init_cumulative_args (CUMULATIVE_ARGS *cum,
- tree fntype ATTRIBUTE_UNUSED,
- rtx libname ATTRIBUTE_UNUSED)
+init_cumulative_args (CUMULATIVE_ARGS *cum, int incoming)
{
cum->arg_words = 0;
+ cum->incoming = incoming;
}
? (int) GET_MODE_SIZE (mode)
: int_size_in_bytes (type)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
- if ((*arg_words + words > max) && (*arg_words < max))
+ if (*arg_words < max
+ && (targetm.calls.must_pass_in_stack (mode, type)
+ || *arg_words + words > max))
*arg_words = max;
*arg_words += words;
int regbase, words, max;
int *arg_words;
int regno;
- enum machine_mode result_mode;
arg_words = &cum->arg_words;
regbase = (incoming_p ? GP_ARG_FIRST : GP_OUTGOING_ARG_FIRST);
: int_size_in_bytes (type)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
if (type && (TYPE_ALIGN (type) > BITS_PER_WORD))
- *arg_words += (*arg_words & 1);
+ {
+ int align = TYPE_ALIGN (type) / BITS_PER_WORD;
+ *arg_words = (*arg_words + align - 1) & -align;
+ }
if (*arg_words + words > max)
return (rtx)0;
regno = regbase + *arg_words;
- result_mode = (mode == BLKmode ? TYPE_MODE (type) : mode);
- /* We need to make sure that references to a7 are represented with
- rtx that is not equal to hard_frame_pointer_rtx. For BLKmode and
- modes bigger than 2 words (because we only have patterns for
- modes of 2 words or smaller), we can't control the expansion
- unless we explicitly list the individual registers in a PARALLEL. */
+ if (cum->incoming && regno <= A7_REG && regno + words > A7_REG)
+ cfun->machine->need_a7_copy = true;
- if ((mode == BLKmode || words > 2)
- && regno < A7_REG
- && regno + words > A7_REG)
- {
- rtx result;
- int n;
+ return gen_rtx_REG (mode, regno);
+}
- result = gen_rtx_PARALLEL (result_mode, rtvec_alloc (words));
- for (n = 0; n < words; n++)
- {
- XVECEXP (result, 0, n) =
- gen_rtx_EXPR_LIST (VOIDmode,
- gen_raw_REG (SImode, regno + n),
- GEN_INT (n * UNITS_PER_WORD));
- }
- return result;
- }
- return gen_raw_REG (result_mode, regno);
+static bool
+xtensa_return_in_msb (tree valtype)
+{
+ return (TARGET_BIG_ENDIAN
+ && AGGREGATE_TYPE_P (valtype)
+ && int_size_in_bytes (valtype) >= UNITS_PER_WORD);
}
if (!TARGET_BOOLEANS && TARGET_HARD_FLOAT)
error ("boolean registers required for the floating-point option");
- /* set up the tables of ld/st opcode names for block moves */
+ /* Set up the tables of ld/st opcode names for block moves. */
xtensa_ld_opcodes[(int) SImode] = "l32i";
xtensa_ld_opcodes[(int) HImode] = "l16ui";
xtensa_ld_opcodes[(int) QImode] = "l8ui";
compute_frame_size (int size)
{
/* Add space for the incoming static chain value. */
- if (current_function_needs_context)
+ if (cfun->static_chain_decl != NULL)
size += (1 * UNITS_PER_WORD);
xtensa_current_frame_size =
if (frame_pointer_needed)
{
- rtx first, insn, set_frame_ptr_insn = 0;
-
- push_topmost_sequence ();
- first = get_insns ();
- pop_topmost_sequence ();
-
- /* Search all instructions, looking for the insn that sets up the
- frame pointer. This search will fail if the function does not
- have an incoming argument in $a7, but in that case, we can just
- set up the frame pointer at the very beginning of the
- function. */
-
- for (insn = first; insn; insn = NEXT_INSN (insn))
+ if (cfun->machine->set_frame_ptr_insn)
{
- rtx pat;
+ rtx first, insn;
- if (!INSN_P (insn))
- continue;
+ push_topmost_sequence ();
+ first = get_insns ();
+ pop_topmost_sequence ();
- pat = PATTERN (insn);
- if (GET_CODE (pat) == SET
- && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
- && (XINT (SET_SRC (pat), 1) == UNSPECV_SET_FP))
- {
- set_frame_ptr_insn = insn;
- break;
- }
- }
-
- if (set_frame_ptr_insn)
- {
/* For all instructions prior to set_frame_ptr_insn, replace
hard_frame_pointer references with stack_pointer. */
for (insn = first;
- insn != set_frame_ptr_insn;
+ insn != cfun->machine->set_frame_ptr_insn;
insn = NEXT_INSN (insn))
{
if (INSN_P (insn))
/* Create the va_list data type.
- This structure is set up by __builtin_saveregs. The __va_reg
- field points to a stack-allocated region holding the contents of the
- incoming argument registers. The __va_ndx field is an index initialized
- to the position of the first unnamed (variable) argument. This same index
- is also used to address the arguments passed in memory. Thus, the
- __va_stk field is initialized to point to the position of the first
- argument in memory offset to account for the arguments passed in
- registers. E.G., if there are 6 argument registers, and each register is
- 4 bytes, then __va_stk is set to $sp - (6 * 4); then __va_reg[N*4]
- references argument word N for 0 <= N < 6, and __va_stk[N*4] references
- argument word N for N >= 6. */
-
-tree
-xtensa_build_va_list (void)
+
+ This structure is set up by __builtin_saveregs. The __va_reg field
+ points to a stack-allocated region holding the contents of the
+ incoming argument registers. The __va_ndx field is an index
+ initialized to the position of the first unnamed (variable)
+ argument. This same index is also used to address the arguments
+ passed in memory. Thus, the __va_stk field is initialized to point
+ to the position of the first argument in memory offset to account
+ for the arguments passed in registers and to account for the size
+ of the argument registers not being 16-byte aligned. E.G., there
+ are 6 argument registers of 4 bytes each, but we want the __va_ndx
+ for the first stack argument to have the maximal alignment of 16
+ bytes, so we offset the __va_stk address by 32 bytes so that
+ __va_stk[32] references the first argument on the stack. */
+
+static tree
+xtensa_build_builtin_va_list (void)
{
tree f_stk, f_reg, f_ndx, record, type_decl;
/* Save the incoming argument registers on the stack. Returns the
address of the saved registers. */
-rtx
+static rtx
xtensa_builtin_saveregs (void)
{
rtx gp_regs, dest;
- int arg_words = current_function_arg_words;
+ int arg_words = current_function_args_info.arg_words;
int gp_left = MAX_ARGS_IN_REGISTERS - arg_words;
- int i;
- if (gp_left == 0)
+ if (gp_left <= 0)
return const0_rtx;
- /* allocate the general-purpose register space */
+ /* Allocate the general-purpose register space. */
gp_regs = assign_stack_local
(BLKmode, MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD, -1);
set_mem_alias_set (gp_regs, get_varargs_alias_set ());
dest = change_address (gp_regs, SImode,
plus_constant (XEXP (gp_regs, 0),
arg_words * UNITS_PER_WORD));
-
- /* Note: Don't use move_block_from_reg() here because the incoming
- argument in a7 cannot be represented by hard_frame_pointer_rtx.
- Instead, call gen_raw_REG() directly so that we get a distinct
- instance of (REG:SI 7). */
- for (i = 0; i < gp_left; i++)
- {
- emit_move_insn (operand_subword (dest, i, 1, BLKmode),
- gen_raw_REG (SImode, GP_ARG_FIRST + arg_words + i));
- }
+ cfun->machine->need_a7_copy = true;
+ cfun->machine->vararg_a7 = true;
+ move_block_from_reg (GP_ARG_FIRST + arg_words, dest, gp_left);
return XEXP (gp_regs, 0);
}
f_reg = TREE_CHAIN (f_stk);
f_ndx = TREE_CHAIN (f_reg);
- stk = build (COMPONENT_REF, TREE_TYPE (f_stk), valist, f_stk);
- reg = build (COMPONENT_REF, TREE_TYPE (f_reg), valist, f_reg);
- ndx = build (COMPONENT_REF, TREE_TYPE (f_ndx), valist, f_ndx);
+ stk = build (COMPONENT_REF, TREE_TYPE (f_stk), valist, f_stk, NULL_TREE);
+ reg = build (COMPONENT_REF, TREE_TYPE (f_reg), valist, f_reg, NULL_TREE);
+ ndx = build (COMPONENT_REF, TREE_TYPE (f_ndx), valist, f_ndx, NULL_TREE);
/* Call __builtin_saveregs; save the result in __va_reg */
- current_function_arg_words = arg_words;
u = make_tree (ptr_type_node, expand_builtin_saveregs ());
t = build (MODIFY_EXPR, ptr_type_node, reg, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
- /* Set the __va_stk member to $arg_ptr - (size of __va_reg area) */
+ /* Set the __va_stk member to ($arg_ptr - 32). */
u = make_tree (ptr_type_node, virtual_incoming_args_rtx);
- u = fold (build (PLUS_EXPR, ptr_type_node, u,
- build_int_2 (-MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD, -1)));
+ u = fold (build (PLUS_EXPR, ptr_type_node, u, build_int_2 (-32, -1)));
t = build (MODIFY_EXPR, ptr_type_node, stk, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
- /* Set the __va_ndx member. */
+ /* Set the __va_ndx member. If the first variable argument is on
+ the stack, adjust __va_ndx by 2 words to account for the extra
+ alignment offset for __va_stk. */
+ if (arg_words >= MAX_ARGS_IN_REGISTERS)
+ arg_words += 2;
u = build_int_2 (arg_words * UNITS_PER_WORD, 0);
t = build (MODIFY_EXPR, integer_type_node, ndx, u);
TREE_SIDE_EFFECTS (t) = 1;
/* Implement `va_arg'. */
-rtx
-xtensa_va_arg (tree valist, tree type)
+static tree
+xtensa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
+ tree *post_p ATTRIBUTE_UNUSED)
{
tree f_stk, stk;
tree f_reg, reg;
tree f_ndx, ndx;
- tree tmp, addr_tree, type_size;
- rtx array, orig_ndx, r, addr, size, va_size;
- rtx lab_false, lab_over, lab_false2;
+ tree type_size, array, orig_ndx, addr, size, va_size, t;
+ tree lab_false, lab_over, lab_false2;
+
+ /* Handle complex values as separate real and imaginary parts. */
+ if (TREE_CODE (type) == COMPLEX_TYPE)
+ {
+ tree real_part, imag_part;
+
+ real_part = xtensa_gimplify_va_arg_expr (valist, TREE_TYPE (type),
+ pre_p, NULL);
+ real_part = get_initialized_tmp_var (real_part, pre_p, NULL);
+
+ imag_part = xtensa_gimplify_va_arg_expr (valist, TREE_TYPE (type),
+ pre_p, NULL);
+ imag_part = get_initialized_tmp_var (imag_part, pre_p, NULL);
+
+ return build (COMPLEX_EXPR, type, real_part, imag_part);
+ }
f_stk = TYPE_FIELDS (va_list_type_node);
f_reg = TREE_CHAIN (f_stk);
f_ndx = TREE_CHAIN (f_reg);
- stk = build (COMPONENT_REF, TREE_TYPE (f_stk), valist, f_stk);
- reg = build (COMPONENT_REF, TREE_TYPE (f_reg), valist, f_reg);
- ndx = build (COMPONENT_REF, TREE_TYPE (f_ndx), valist, f_ndx);
+ stk = build (COMPONENT_REF, TREE_TYPE (f_stk), valist, f_stk, NULL_TREE);
+ reg = build (COMPONENT_REF, TREE_TYPE (f_reg), valist, f_reg, NULL_TREE);
+ ndx = build (COMPONENT_REF, TREE_TYPE (f_ndx), valist, f_ndx, NULL_TREE);
- type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
+ type_size = size_in_bytes (type);
+ va_size = round_up (type_size, UNITS_PER_WORD);
+ gimplify_expr (&va_size, pre_p, NULL, is_gimple_val, fb_rvalue);
- va_size = gen_reg_rtx (SImode);
- tmp = fold (build (MULT_EXPR, sizetype,
- fold (build (TRUNC_DIV_EXPR, sizetype,
- fold (build (PLUS_EXPR, sizetype,
- type_size,
- size_int (UNITS_PER_WORD - 1))),
- size_int (UNITS_PER_WORD))),
- size_int (UNITS_PER_WORD)));
- r = expand_expr (tmp, va_size, SImode, EXPAND_NORMAL);
- if (r != va_size)
- emit_move_insn (va_size, r);
+ /* First align __va_ndx if necessary for this arg:
- /* First align __va_ndx to a double word boundary if necessary for this arg:
+ orig_ndx = (AP).__va_ndx;
+ if (__alignof__ (TYPE) > 4 )
+ orig_ndx = ((orig_ndx + __alignof__ (TYPE) - 1)
+ & -__alignof__ (TYPE)); */
- if (__alignof__ (TYPE) > 4)
- (AP).__va_ndx = (((AP).__va_ndx + 7) & -8); */
+ orig_ndx = get_initialized_tmp_var (ndx, pre_p, NULL);
if (TYPE_ALIGN (type) > BITS_PER_WORD)
{
- tmp = build (PLUS_EXPR, integer_type_node, ndx,
- build_int_2 ((2 * UNITS_PER_WORD) - 1, 0));
- tmp = build (BIT_AND_EXPR, integer_type_node, tmp,
- build_int_2 (-2 * UNITS_PER_WORD, -1));
- tmp = build (MODIFY_EXPR, integer_type_node, ndx, tmp);
- TREE_SIDE_EFFECTS (tmp) = 1;
- expand_expr (tmp, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ int align = TYPE_ALIGN (type) / BITS_PER_UNIT;
+
+ t = build (PLUS_EXPR, integer_type_node, orig_ndx,
+ build_int_2 (align - 1, 0));
+ t = build (BIT_AND_EXPR, integer_type_node, t,
+ build_int_2 (-align, -1));
+ t = build (MODIFY_EXPR, integer_type_node, orig_ndx, t);
+ gimplify_and_add (t, pre_p);
}
/* Increment __va_ndx to point past the argument:
- orig_ndx = (AP).__va_ndx;
- (AP).__va_ndx += __va_size (TYPE); */
-
- orig_ndx = gen_reg_rtx (SImode);
- r = expand_expr (ndx, orig_ndx, SImode, EXPAND_NORMAL);
- if (r != orig_ndx)
- emit_move_insn (orig_ndx, r);
+ (AP).__va_ndx = orig_ndx + __va_size (TYPE); */
- tmp = build (PLUS_EXPR, integer_type_node, ndx,
- make_tree (intSI_type_node, va_size));
- tmp = build (MODIFY_EXPR, integer_type_node, ndx, tmp);
- TREE_SIDE_EFFECTS (tmp) = 1;
- expand_expr (tmp, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ t = fold_convert (integer_type_node, va_size);
+ t = build (PLUS_EXPR, integer_type_node, orig_ndx, t);
+ t = build (MODIFY_EXPR, integer_type_node, ndx, t);
+ gimplify_and_add (t, pre_p);
/* Check if the argument is in registers:
if ((AP).__va_ndx <= __MAX_ARGS_IN_REGISTERS * 4
- && !MUST_PASS_IN_STACK (type))
+ && !must_pass_in_stack (type))
__array = (AP).__va_reg; */
- array = gen_reg_rtx (Pmode);
+ array = create_tmp_var (ptr_type_node, NULL);
- lab_over = NULL_RTX;
- if (!MUST_PASS_IN_STACK (VOIDmode, type))
+ lab_over = NULL;
+ if (!targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
{
- lab_false = gen_label_rtx ();
- lab_over = gen_label_rtx ();
-
- emit_cmp_and_jump_insns (expand_expr (ndx, NULL_RTX, SImode,
- EXPAND_NORMAL),
- GEN_INT (MAX_ARGS_IN_REGISTERS
- * UNITS_PER_WORD),
- GT, const1_rtx, SImode, 0, lab_false);
-
- r = expand_expr (reg, array, Pmode, EXPAND_NORMAL);
- if (r != array)
- emit_move_insn (array, r);
-
- emit_jump_insn (gen_jump (lab_over));
- emit_barrier ();
- emit_label (lab_false);
+ lab_false = create_artificial_label ();
+ lab_over = create_artificial_label ();
+
+ t = build_int_2 (MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD, 0);
+ t = build (GT_EXPR, boolean_type_node, ndx, t);
+ t = build (COND_EXPR, void_type_node, t,
+ build (GOTO_EXPR, void_type_node, lab_false),
+ NULL);
+ gimplify_and_add (t, pre_p);
+
+ t = build (MODIFY_EXPR, void_type_node, array, reg);
+ gimplify_and_add (t, pre_p);
+
+ t = build (GOTO_EXPR, void_type_node, lab_over);
+ gimplify_and_add (t, pre_p);
+
+ t = build (LABEL_EXPR, void_type_node, lab_false);
+ gimplify_and_add (t, pre_p);
}
+
/* ...otherwise, the argument is on the stack (never split between
registers and the stack -- change __va_ndx if necessary):
else
{
- if (orig_ndx < __MAX_ARGS_IN_REGISTERS * 4)
- (AP).__va_ndx = __MAX_ARGS_IN_REGISTERS * 4 + __va_size (TYPE);
+ if (orig_ndx <= __MAX_ARGS_IN_REGISTERS * 4)
+ (AP).__va_ndx = 32 + __va_size (TYPE);
__array = (AP).__va_stk;
} */
- lab_false2 = gen_label_rtx ();
- emit_cmp_and_jump_insns (orig_ndx,
- GEN_INT (MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD),
- GE, const1_rtx, SImode, 0, lab_false2);
+ lab_false2 = create_artificial_label ();
- tmp = build (PLUS_EXPR, sizetype, make_tree (intSI_type_node, va_size),
- build_int_2 (MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD, 0));
- tmp = build (MODIFY_EXPR, integer_type_node, ndx, tmp);
- TREE_SIDE_EFFECTS (tmp) = 1;
- expand_expr (tmp, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ t = build_int_2 (MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD, 0);
+ t = build (GT_EXPR, boolean_type_node, orig_ndx, t);
+ t = build (COND_EXPR, void_type_node, t,
+ build (GOTO_EXPR, void_type_node, lab_false2),
+ NULL);
+ gimplify_and_add (t, pre_p);
- emit_label (lab_false2);
+ t = size_binop (PLUS_EXPR, va_size, size_int (32));
+ t = fold_convert (integer_type_node, t);
+ t = build (MODIFY_EXPR, integer_type_node, ndx, t);
+ gimplify_and_add (t, pre_p);
- r = expand_expr (stk, array, Pmode, EXPAND_NORMAL);
- if (r != array)
- emit_move_insn (array, r);
+ t = build (LABEL_EXPR, void_type_node, lab_false2);
+ gimplify_and_add (t, pre_p);
- if (lab_over != NULL_RTX)
- emit_label (lab_over);
+ t = build (MODIFY_EXPR, void_type_node, array, stk);
+ gimplify_and_add (t, pre_p);
+
+ if (lab_over)
+ {
+ t = build (LABEL_EXPR, void_type_node, lab_over);
+ gimplify_and_add (t, pre_p);
+ }
/* Given the base array pointer (__array) and index to the subsequent
The results are endian-dependent because values smaller than one word
are aligned differently. */
- size = gen_reg_rtx (SImode);
- emit_move_insn (size, va_size);
- if (BYTES_BIG_ENDIAN)
+ if (BYTES_BIG_ENDIAN && TREE_CODE (type_size) == INTEGER_CST)
{
- rtx lab_use_va_size = gen_label_rtx ();
-
- emit_cmp_and_jump_insns (expand_expr (type_size, NULL_RTX, SImode,
- EXPAND_NORMAL),
- GEN_INT (PARM_BOUNDARY / BITS_PER_UNIT),
- GE, const1_rtx, SImode, 0, lab_use_va_size);
-
- r = expand_expr (type_size, size, SImode, EXPAND_NORMAL);
- if (r != size)
- emit_move_insn (size, r);
-
- emit_label (lab_use_va_size);
+ t = size_int (PARM_BOUNDARY / BITS_PER_UNIT);
+ t = fold (build (GE_EXPR, boolean_type_node, type_size, t));
+ t = fold (build (COND_EXPR, sizetype, t, va_size, type_size));
+ size = t;
}
+ else
+ size = va_size;
- addr_tree = build (PLUS_EXPR, ptr_type_node,
- make_tree (ptr_type_node, array),
- ndx);
- addr_tree = build (MINUS_EXPR, ptr_type_node, addr_tree,
- make_tree (intSI_type_node, size));
- addr = expand_expr (addr_tree, NULL_RTX, Pmode, EXPAND_NORMAL);
- addr = copy_to_reg (addr);
- return addr;
+ t = fold_convert (ptr_type_node, ndx);
+ addr = build (PLUS_EXPR, ptr_type_node, array, t);
+ t = fold_convert (ptr_type_node, size);
+ addr = build (MINUS_EXPR, ptr_type_node, addr, t);
+
+ addr = fold_convert (build_pointer_type (type), addr);
+ return build_fold_indirect_ref (addr);
}
int i, num_arg_regs;
int nxt = 0;
- /* use the AR registers in increasing order (skipping a0 and a1)
- but save the incoming argument registers for a last resort */
+ /* Use the AR registers in increasing order (skipping a0 and a1)
+ but save the incoming argument registers for a last resort. */
num_arg_regs = current_function_args_info.arg_words;
if (num_arg_regs > MAX_ARGS_IN_REGISTERS)
num_arg_regs = MAX_ARGS_IN_REGISTERS;
for (i = 0; i < num_arg_regs; i++)
reg_alloc_order[nxt++] = GP_ARG_FIRST + i;
- /* list the coprocessor registers in order */
+ /* List the coprocessor registers in order. */
for (i = 0; i < BR_REG_NUM; i++)
reg_alloc_order[nxt++] = BR_REG_FIRST + i;
- /* list the FP registers in order for now */
+ /* List the FP registers in order for now. */
for (i = 0; i < 16; i++)
reg_alloc_order[nxt++] = FP_REG_FIRST + i;
}
-/* A customized version of reg_overlap_mentioned_p that only looks for
- references to a7 (as opposed to hard_frame_pointer_rtx). */
-
-int
-a7_overlap_mentioned_p (rtx x)
-{
- int i, j;
- unsigned int x_regno;
- const char *fmt;
-
- if (GET_CODE (x) == REG)
- {
- x_regno = REGNO (x);
- return (x != hard_frame_pointer_rtx
- && x_regno < A7_REG + 1
- && x_regno + HARD_REGNO_NREGS (A7_REG, GET_MODE (x)) > A7_REG);
- }
-
- if (GET_CODE (x) == SUBREG
- && GET_CODE (SUBREG_REG (x)) == REG
- && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
- {
- x_regno = subreg_regno (x);
- return (SUBREG_REG (x) != hard_frame_pointer_rtx
- && x_regno < A7_REG + 1
- && x_regno + HARD_REGNO_NREGS (A7_REG, GET_MODE (x)) > A7_REG);
- }
-
- /* X does not match, so try its subexpressions. */
- fmt = GET_RTX_FORMAT (GET_CODE (x));
- for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
- {
- if (fmt[i] == 'e')
- {
- if (a7_overlap_mentioned_p (XEXP (x, i)))
- return 1;
- }
- else if (fmt[i] == 'E')
- {
- for (j = XVECLEN (x, i) - 1; j >=0; j--)
- if (a7_overlap_mentioned_p (XVECEXP (x, i, j)))
- return 1;
- }
- }
-
- return 0;
-}
-
-
/* Some Xtensa targets support multiple bss sections. If the section
name ends with ".bss", add SECTION_BSS to the flags. */
case LSHIFTRT:
case ROTATE:
case ROTATERT:
- /* no way to tell if X is the 2nd operand so be conservative */
+ /* No way to tell if X is the 2nd operand so be conservative. */
default: break;
}
if (xtensa_simm12b (INTVAL (x)))
return true;
}
}
- /* fall through */
+ /* Fall through. */
case UDIV:
case UMOD:
}
}
+/* Worker function for TARGET_RETURN_IN_MEMORY. */
+
+static bool
+xtensa_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
+{
+ return ((unsigned HOST_WIDE_INT) int_size_in_bytes (type)
+ > 4 * UNITS_PER_WORD);
+}
+
#include "gt-xtensa.h"