/* Subroutines for insn-output.c for Tensilica's Xtensa architecture.
- Copyright 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+ Copyright 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
This file is part of GCC.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
#include "config.h"
#include "system.h"
/* Current frame size calculated by compute_frame_size. */
unsigned xtensa_current_frame_size;
-/* Tables of ld/st opcode names for block moves */
-const char *xtensa_ld_opcodes[(int) MAX_MACHINE_MODE];
-const char *xtensa_st_opcodes[(int) MAX_MACHINE_MODE];
+/* Largest block move to handle in-line. */
#define LARGEST_MOVE_RATIO 15
/* Define the structure for the machine field in struct function. */
NO_REGS, NO_REGS, NO_REGS, NO_REGS,
};
-static int b4const_or_zero (int);
static enum internal_test map_test_to_internal_test (enum rtx_code);
static rtx gen_int_relational (enum rtx_code, rtx, rtx, int *);
static rtx gen_float_relational (enum rtx_code, rtx, rtx);
static rtx gen_conditional_move (rtx);
static rtx fixup_subreg_mem (rtx);
-static enum machine_mode xtensa_find_mode_for_size (unsigned);
static struct machine_function * xtensa_init_machine_status (void);
static bool xtensa_return_in_msb (tree);
static void printx (FILE *, signed int);
#undef TARGET_ASM_SELECT_RTX_SECTION
#define TARGET_ASM_SELECT_RTX_SECTION xtensa_select_rtx_section
+#undef TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_FUSED_MADD)
+
#undef TARGET_RTX_COSTS
#define TARGET_RTX_COSTS xtensa_rtx_costs
#undef TARGET_ADDRESS_COST
#define TARGET_RETURN_IN_MEMORY xtensa_return_in_memory
#undef TARGET_SPLIT_COMPLEX_ARG
#define TARGET_SPLIT_COMPLEX_ARG hook_bool_tree_true
+#undef TARGET_MUST_PASS_IN_STACK
+#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
#undef TARGET_EXPAND_BUILTIN_SAVEREGS
#define TARGET_EXPAND_BUILTIN_SAVEREGS xtensa_builtin_saveregs
#undef TARGET_RETURN_IN_MSB
#define TARGET_RETURN_IN_MSB xtensa_return_in_msb
-#undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
-#define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE hook_int_void_1
-
struct gcc_target targetm = TARGET_INITIALIZER;
\f
* Functions to test Xtensa immediate operand validity.
*/
-int
-xtensa_b4constu (int v)
+bool
+xtensa_simm8 (HOST_WIDE_INT v)
+{
+ return v >= -128 && v <= 127;
+}
+
+
+bool
+xtensa_simm8x256 (HOST_WIDE_INT v)
+{
+ return (v & 255) == 0 && (v >= -32768 && v <= 32512);
+}
+
+
+bool
+xtensa_simm12b (HOST_WIDE_INT v)
+{
+ return v >= -2048 && v <= 2047;
+}
+
+
+static bool
+xtensa_uimm8 (HOST_WIDE_INT v)
+{
+ return v >= 0 && v <= 255;
+}
+
+
+static bool
+xtensa_uimm8x2 (HOST_WIDE_INT v)
+{
+ return (v & 1) == 0 && (v >= 0 && v <= 510);
+}
+
+
+static bool
+xtensa_uimm8x4 (HOST_WIDE_INT v)
+{
+ return (v & 3) == 0 && (v >= 0 && v <= 1020);
+}
+
+
+static bool
+xtensa_b4const (HOST_WIDE_INT v)
{
switch (v)
{
- case 32768:
- case 65536:
+ case -1:
+ case 1:
case 2:
case 3:
case 4:
case 64:
case 128:
case 256:
- return 1;
+ return true;
}
- return 0;
+ return false;
}
-int
-xtensa_simm8x256 (int v)
-{
- return (v & 255) == 0 && (v >= -32768 && v <= 32512);
-}
-int
-xtensa_ai4const (int v)
+bool
+xtensa_b4const_or_zero (HOST_WIDE_INT v)
{
- return (v == -1 || (v >= 1 && v <= 15));
+ if (v == 0)
+ return true;
+ return xtensa_b4const (v);
}
-int
-xtensa_simm7 (int v)
-{
- return v >= -32 && v <= 95;
-}
-int
-xtensa_b4const (int v)
+bool
+xtensa_b4constu (HOST_WIDE_INT v)
{
switch (v)
{
- case -1:
- case 1:
+ case 32768:
+ case 65536:
case 2:
case 3:
case 4:
case 64:
case 128:
case 256:
- return 1;
+ return true;
}
- return 0;
+ return false;
}
-int
-xtensa_simm8 (int v)
-{
- return v >= -128 && v <= 127;
-}
-
-int
-xtensa_tp7 (int v)
-{
- return (v >= 7 && v <= 22);
-}
-int
-xtensa_lsi4x4 (int v)
+bool
+xtensa_mask_immediate (HOST_WIDE_INT v)
{
- return (v & 3) == 0 && (v >= 0 && v <= 60);
-}
+#define MAX_MASK_SIZE 16
+ int mask_size;
-int
-xtensa_simm12b (int v)
-{
- return v >= -2048 && v <= 2047;
-}
+ for (mask_size = 1; mask_size <= MAX_MASK_SIZE; mask_size++)
+ {
+ if ((v & 1) == 0)
+ return false;
+ v = v >> 1;
+ if (v == 0)
+ return true;
+ }
-int
-xtensa_uimm8 (int v)
-{
- return v >= 0 && v <= 255;
+ return false;
}
-int
-xtensa_uimm8x2 (int v)
-{
- return (v & 1) == 0 && (v >= 0 && v <= 510);
-}
-int
-xtensa_uimm8x4 (int v)
+bool
+xtensa_const_ok_for_letter_p (HOST_WIDE_INT v, int c)
{
- return (v & 3) == 0 && (v >= 0 && v <= 1020);
+ switch (c)
+ {
+ case 'I': return xtensa_simm12b (v);
+ case 'J': return xtensa_simm8 (v);
+ case 'K': return (v == 0 || xtensa_b4const (v));
+ case 'L': return xtensa_b4constu (v);
+ case 'M': return (v >= -32 && v <= 95);
+ case 'N': return xtensa_simm8x256 (v);
+ case 'O': return (v == -1 || (v >= 1 && v <= 15));
+ case 'P': return xtensa_mask_immediate (v);
+ default: break;
+ }
+ return false;
}
int
-add_operand (rtx op, enum machine_mode mode)
-{
- if (GET_CODE (op) == CONST_INT)
- return (xtensa_simm8 (INTVAL (op)) || xtensa_simm8x256 (INTVAL (op)));
-
- return register_operand (op, mode);
-}
-
-
-int
-arith_operand (rtx op, enum machine_mode mode)
-{
- if (GET_CODE (op) == CONST_INT)
- return xtensa_simm8 (INTVAL (op));
-
- return register_operand (op, mode);
-}
-
-
-int
-nonimmed_operand (rtx op, enum machine_mode mode)
-{
- /* We cannot use the standard nonimmediate_operand() predicate because
- it includes constant pool memory operands. */
-
- if (memory_operand (op, mode))
- return !constantpool_address_p (XEXP (op, 0));
-
- return register_operand (op, mode);
-}
-
-
-int
-mem_operand (rtx op, enum machine_mode mode)
-{
- /* We cannot use the standard memory_operand() predicate because
- it includes constant pool memory operands. */
-
- if (memory_operand (op, mode))
- return !constantpool_address_p (XEXP (op, 0));
-
- return FALSE;
-}
-
-
-int
xtensa_valid_move (enum machine_mode mode, rtx *operands)
{
/* Either the destination or source must be a register, and the
int
-mask_operand (rtx op, enum machine_mode mode)
-{
- if (GET_CODE (op) == CONST_INT)
- return xtensa_mask_immediate (INTVAL (op));
-
- return register_operand (op, mode);
-}
-
-
-int
-extui_fldsz_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- return ((GET_CODE (op) == CONST_INT)
- && xtensa_mask_immediate ((1 << INTVAL (op)) - 1));
-}
-
-
-int
-sext_operand (rtx op, enum machine_mode mode)
-{
- if (TARGET_SEXT)
- return nonimmed_operand (op, mode);
- return mem_operand (op, mode);
-}
-
-
-int
-sext_fldsz_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- return ((GET_CODE (op) == CONST_INT) && xtensa_tp7 (INTVAL (op) - 1));
-}
-
-
-int
-lsbitnum_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- if (GET_CODE (op) == CONST_INT)
- {
- return (BITS_BIG_ENDIAN
- ? (INTVAL (op) == BITS_PER_WORD-1)
- : (INTVAL (op) == 0));
- }
- return FALSE;
-}
-
-
-static int
-b4const_or_zero (int v)
-{
- if (v == 0)
- return TRUE;
- return xtensa_b4const (v);
-}
-
-
-int
-branch_operand (rtx op, enum machine_mode mode)
-{
- if (GET_CODE (op) == CONST_INT)
- return b4const_or_zero (INTVAL (op));
-
- return register_operand (op, mode);
-}
-
-
-int
-ubranch_operand (rtx op, enum machine_mode mode)
-{
- if (GET_CODE (op) == CONST_INT)
- return xtensa_b4constu (INTVAL (op));
-
- return register_operand (op, mode);
-}
-
-
-int
-call_insn_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- if ((GET_CODE (op) == REG)
- && (op != arg_pointer_rtx)
- && ((REGNO (op) < FRAME_POINTER_REGNUM)
- || (REGNO (op) > LAST_VIRTUAL_REGISTER)))
- return TRUE;
-
- if (CONSTANT_ADDRESS_P (op))
- {
- /* Direct calls only allowed to static functions with PIC. */
- if (flag_pic)
- {
- tree callee, callee_sec, caller_sec;
-
- if (GET_CODE (op) != SYMBOL_REF
- || !SYMBOL_REF_LOCAL_P (op) || SYMBOL_REF_EXTERNAL_P (op))
- return FALSE;
-
- /* Don't attempt a direct call if the callee is known to be in
- a different section, since there's a good chance it will be
- out of range. */
-
- if (flag_function_sections
- || DECL_ONE_ONLY (current_function_decl))
- return FALSE;
- caller_sec = DECL_SECTION_NAME (current_function_decl);
- callee = SYMBOL_REF_DECL (op);
- if (callee)
- {
- if (DECL_ONE_ONLY (callee))
- return FALSE;
- callee_sec = DECL_SECTION_NAME (callee);
- if (((caller_sec == NULL_TREE) ^ (callee_sec == NULL_TREE))
- || (caller_sec != NULL_TREE
- && strcmp (TREE_STRING_POINTER (caller_sec),
- TREE_STRING_POINTER (callee_sec)) != 0))
- return FALSE;
- }
- else if (caller_sec != NULL_TREE)
- return FALSE;
- }
- return TRUE;
- }
-
- return FALSE;
-}
-
-
-int
-move_operand (rtx op, enum machine_mode mode)
-{
- if (register_operand (op, mode)
- || memory_operand (op, mode))
- return TRUE;
-
- switch (mode)
- {
- case DFmode:
- case SFmode:
- return TARGET_CONST16 && CONSTANT_P (op);
-
- case DImode:
- case SImode:
- if (TARGET_CONST16)
- return CONSTANT_P (op);
- /* Fall through. */
-
- case HImode:
- case QImode:
- if (GET_CODE (op) == CONST_INT && xtensa_simm12b (INTVAL (op)))
- return TRUE;
- break;
-
- default:
- break;
- }
-
- return FALSE;
-}
-
-
-int
smalloffset_mem_p (rtx op)
{
if (GET_CODE (op) == MEM)
if (GET_CODE (addr) == PLUS)
{
rtx offset = XEXP (addr, 0);
+ HOST_WIDE_INT val;
if (GET_CODE (offset) != CONST_INT)
offset = XEXP (addr, 1);
if (GET_CODE (offset) != CONST_INT)
return FALSE;
- return xtensa_lsi4x4 (INTVAL (offset));
+
+ val = INTVAL (offset);
+ return (val & 3) == 0 && (val >= 0 && val <= 60);
}
}
return FALSE;
}
-/* Accept the floating point constant 1 in the appropriate mode. */
-
-int
-const_float_1_operand (rtx op, enum machine_mode mode)
-{
- REAL_VALUE_TYPE d;
- static REAL_VALUE_TYPE onedf;
- static REAL_VALUE_TYPE onesf;
- static int one_initialized;
-
- if ((GET_CODE (op) != CONST_DOUBLE)
- || (mode != GET_MODE (op))
- || (mode != DFmode && mode != SFmode))
- return FALSE;
-
- REAL_VALUE_FROM_CONST_DOUBLE (d, op);
-
- if (! one_initialized)
- {
- onedf = REAL_VALUE_ATOF ("1.0", DFmode);
- onesf = REAL_VALUE_ATOF ("1.0", SFmode);
- one_initialized = TRUE;
- }
-
- if (mode == DFmode)
- return REAL_VALUES_EQUAL (d, onedf);
- else
- return REAL_VALUES_EQUAL (d, onesf);
-}
-
-
-int
-fpmem_offset_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- if (GET_CODE (op) == CONST_INT)
- return xtensa_mem_offset (INTVAL (op), SFmode);
- return 0;
-}
-
-
void
xtensa_extend_reg (rtx dst, rtx src)
{
}
-int
-branch_operator (rtx x, enum machine_mode mode)
-{
- if (GET_MODE (x) != mode)
- return FALSE;
-
- switch (GET_CODE (x))
- {
- case EQ:
- case NE:
- case LT:
- case GE:
- return TRUE;
- default:
- break;
- }
- return FALSE;
-}
-
-
-int
-ubranch_operator (rtx x, enum machine_mode mode)
-{
- if (GET_MODE (x) != mode)
- return FALSE;
-
- switch (GET_CODE (x))
- {
- case LTU:
- case GEU:
- return TRUE;
- default:
- break;
- }
- return FALSE;
-}
-
-
-int
-boolean_operator (rtx x, enum machine_mode mode)
-{
- if (GET_MODE (x) != mode)
- return FALSE;
-
- switch (GET_CODE (x))
- {
- case EQ:
- case NE:
- return TRUE;
- default:
- break;
- }
- return FALSE;
-}
-
-
-int
-xtensa_mask_immediate (int v)
-{
-#define MAX_MASK_SIZE 16
- int mask_size;
-
- for (mask_size = 1; mask_size <= MAX_MASK_SIZE; mask_size++)
- {
- if ((v & 1) == 0)
- return FALSE;
- v = v >> 1;
- if (v == 0)
- return TRUE;
- }
-
- return FALSE;
-}
-
-
-int
+bool
xtensa_mem_offset (unsigned v, enum machine_mode mode)
{
switch (mode)
}
+bool
+xtensa_extra_constraint (rtx op, int c)
+{
+ /* Allow pseudo registers during reload. */
+ if (GET_CODE (op) != MEM)
+ return (c >= 'R' && c <= 'U'
+ && reload_in_progress && GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER);
+
+ switch (c)
+ {
+ case 'R': return smalloffset_mem_p (op);
+ case 'T': return !TARGET_CONST16 && constantpool_mem_p (op);
+ case 'U': return !constantpool_mem_p (op);
+ default: break;
+ }
+ return false;
+}
+
+
/* Make normal rtx_code into something we can index from an array. */
static enum internal_test
struct cmp_info
{
enum rtx_code test_code; /* test code to use in insn */
- int (*const_range_p) (int); /* predicate function to check range */
+ bool (*const_range_p) (HOST_WIDE_INT); /* range check function */
int const_add; /* constant to add (convert LE -> LT) */
int reverse_regs; /* reverse registers in test */
int invert_const; /* != 0 if invert value if cmp1 is constant */
static struct cmp_info info[ (int)ITEST_MAX ] = {
- { EQ, b4const_or_zero, 0, 0, 0, 0, 0 }, /* EQ */
- { NE, b4const_or_zero, 0, 0, 0, 0, 0 }, /* NE */
+ { EQ, xtensa_b4const_or_zero, 0, 0, 0, 0, 0 }, /* EQ */
+ { NE, xtensa_b4const_or_zero, 0, 0, 0, 0, 0 }, /* NE */
- { LT, b4const_or_zero, 1, 1, 1, 0, 0 }, /* GT */
- { GE, b4const_or_zero, 0, 0, 0, 0, 0 }, /* GE */
- { LT, b4const_or_zero, 0, 0, 0, 0, 0 }, /* LT */
- { GE, b4const_or_zero, 1, 1, 1, 0, 0 }, /* LE */
+ { LT, xtensa_b4const_or_zero, 1, 1, 1, 0, 0 }, /* GT */
+ { GE, xtensa_b4const_or_zero, 0, 0, 0, 0, 0 }, /* GE */
+ { LT, xtensa_b4const_or_zero, 0, 0, 0, 0, 0 }, /* LT */
+ { GE, xtensa_b4const_or_zero, 1, 1, 1, 0, 0 }, /* LE */
{ LTU, xtensa_b4constu, 1, 1, 1, 0, 1 }, /* GTU */
{ GEU, xtensa_b4constu, 0, 0, 0, 0, 1 }, /* GEU */
struct cmp_info *p_info;
test = map_test_to_internal_test (test_code);
- if (test == ITEST_MAX)
- abort ();
+ gcc_assert (test != ITEST_MAX);
p_info = &info[ (int)test ];
{
case LT: code = GE; break;
case GE: code = LT; break;
- default: abort ();
+ default: gcc_unreachable ();
}
}
break;
default:
- abort ();
+ gcc_unreachable ();
}
switch (GET_CODE (operands[0]))
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
return opnd;
/* This function should never be called again once a7 has been copied. */
- if (cfun->machine->set_frame_ptr_insn)
- abort ();
+ gcc_assert (!cfun->machine->set_frame_ptr_insn);
mode = GET_MODE (opnd);
reg = opnd;
if (GET_CODE (reg) == SUBREG)
{
- if (SUBREG_BYTE (reg) != 0)
- abort ();
+ gcc_assert (SUBREG_BYTE (reg) == 0);
reg = SUBREG_REG (reg);
}
if (GET_CODE (reg) != REG
return opnd;
/* 1-word args will always be in a7; 2-word args in a6/a7. */
- if (REGNO (reg) + HARD_REGNO_NREGS (A7_REG, mode) - 1 != A7_REG)
- abort ();
+ gcc_assert (REGNO (reg) + HARD_REGNO_NREGS (A7_REG, mode) - 1 == A7_REG);
cfun->machine->need_a7_copy = false;
emit_insn (gen_movqi_internal (tmp, gen_raw_REG (mode, A7_REG)));
break;
default:
- abort ();
+ gcc_unreachable ();
}
cfun->machine->set_frame_ptr_insn = emit_insn (gen_set_frame_ptr ());
}
-/* Try to expand a block move operation to an RTL block move instruction.
- If not optimizing or if the block size is not a constant or if the
- block is small, the expansion fails and GCC falls back to calling
- memcpy().
+/* Try to expand a block move operation to a sequence of RTL move
+ instructions. If not optimizing, or if the block size is not a
+ constant, or if the block is too large, the expansion fails and GCC
+ falls back to calling memcpy().
operands[0] is the destination
operands[1] is the source
int
xtensa_expand_block_move (rtx *operands)
{
- rtx dest = operands[0];
- rtx src = operands[1];
- int bytes = INTVAL (operands[2]);
- int align = XINT (operands[3], 0);
+ static const enum machine_mode mode_from_align[] =
+ {
+ VOIDmode, QImode, HImode, VOIDmode, SImode,
+ };
+
+ rtx dst_mem = operands[0];
+ rtx src_mem = operands[1];
+ HOST_WIDE_INT bytes, align;
int num_pieces, move_ratio;
+ rtx temp[2];
+ enum machine_mode mode[2];
+ int amount[2];
+ bool active[2];
+ int phase = 0;
+ int next;
+ int offset_ld = 0;
+ int offset_st = 0;
+ rtx x;
/* If this is not a fixed size move, just call memcpy. */
if (!optimize || (GET_CODE (operands[2]) != CONST_INT))
return 0;
+ bytes = INTVAL (operands[2]);
+ align = INTVAL (operands[3]);
+
/* Anything to move? */
if (bytes <= 0)
- return 1;
+ return 0;
if (align > MOVE_MAX)
align = MOVE_MAX;
if (optimize > 2)
move_ratio = LARGEST_MOVE_RATIO;
num_pieces = (bytes / align) + (bytes % align); /* Close enough anyway. */
- if (num_pieces >= move_ratio)
+ if (num_pieces > move_ratio)
return 0;
- /* Make sure the memory addresses are valid. */
- operands[0] = validize_mem (dest);
- operands[1] = validize_mem (src);
-
- emit_insn (gen_movmemsi_internal (operands[0], operands[1],
- operands[2], operands[3]));
- return 1;
-}
-
-
-/* Emit a sequence of instructions to implement a block move, trying
- to hide load delay slots as much as possible. Load N values into
- temporary registers, store those N values, and repeat until the
- complete block has been moved. N=delay_slots+1. */
-
-struct meminsnbuf
-{
- char template[30];
- rtx operands[2];
-};
-
-void
-xtensa_emit_block_move (rtx *operands, rtx *tmpregs, int delay_slots)
-{
- rtx dest = operands[0];
- rtx src = operands[1];
- int bytes = INTVAL (operands[2]);
- int align = XINT (operands[3], 0);
- rtx from_addr = XEXP (src, 0);
- rtx to_addr = XEXP (dest, 0);
- int from_struct = MEM_IN_STRUCT_P (src);
- int to_struct = MEM_IN_STRUCT_P (dest);
- int offset = 0;
- int chunk_size, item_size;
- struct meminsnbuf *ldinsns, *stinsns;
- const char *ldname, *stname;
- enum machine_mode mode;
-
- if (align > MOVE_MAX)
- align = MOVE_MAX;
- item_size = align;
- chunk_size = delay_slots + 1;
+ x = XEXP (dst_mem, 0);
+ if (!REG_P (x))
+ {
+ x = force_reg (Pmode, x);
+ dst_mem = replace_equiv_address (dst_mem, x);
+ }
- ldinsns = (struct meminsnbuf *)
- alloca (chunk_size * sizeof (struct meminsnbuf));
- stinsns = (struct meminsnbuf *)
- alloca (chunk_size * sizeof (struct meminsnbuf));
+ x = XEXP (src_mem, 0);
+ if (!REG_P (x))
+ {
+ x = force_reg (Pmode, x);
+ src_mem = replace_equiv_address (src_mem, x);
+ }
- mode = xtensa_find_mode_for_size (item_size);
- item_size = GET_MODE_SIZE (mode);
- ldname = xtensa_ld_opcodes[(int) mode];
- stname = xtensa_st_opcodes[(int) mode];
+ active[0] = active[1] = false;
- while (bytes > 0)
+ do
{
- int n;
+ next = phase;
+ phase ^= 1;
- for (n = 0; n < chunk_size; n++)
+ if (bytes > 0)
{
- rtx addr, mem;
-
- if (bytes == 0)
- {
- chunk_size = n;
- break;
- }
-
- if (bytes < item_size)
- {
- /* Find a smaller item_size which we can load & store. */
- item_size = bytes;
- mode = xtensa_find_mode_for_size (item_size);
- item_size = GET_MODE_SIZE (mode);
- ldname = xtensa_ld_opcodes[(int) mode];
- stname = xtensa_st_opcodes[(int) mode];
- }
-
- /* Record the load instruction opcode and operands. */
- addr = plus_constant (from_addr, offset);
- mem = gen_rtx_MEM (mode, addr);
- if (! memory_address_p (mode, addr))
- abort ();
- MEM_IN_STRUCT_P (mem) = from_struct;
- ldinsns[n].operands[0] = tmpregs[n];
- ldinsns[n].operands[1] = mem;
- sprintf (ldinsns[n].template, "%s\t%%0, %%1", ldname);
-
- /* Record the store instruction opcode and operands. */
- addr = plus_constant (to_addr, offset);
- mem = gen_rtx_MEM (mode, addr);
- if (! memory_address_p (mode, addr))
- abort ();
- MEM_IN_STRUCT_P (mem) = to_struct;
- stinsns[n].operands[0] = tmpregs[n];
- stinsns[n].operands[1] = mem;
- sprintf (stinsns[n].template, "%s\t%%0, %%1", stname);
-
- offset += item_size;
- bytes -= item_size;
- }
-
- /* Now output the loads followed by the stores. */
- for (n = 0; n < chunk_size; n++)
- output_asm_insn (ldinsns[n].template, ldinsns[n].operands);
- for (n = 0; n < chunk_size; n++)
- output_asm_insn (stinsns[n].template, stinsns[n].operands);
- }
-}
-
+ int next_amount;
-static enum machine_mode
-xtensa_find_mode_for_size (unsigned item_size)
-{
- enum machine_mode mode, tmode;
+ next_amount = (bytes >= 4 ? 4 : (bytes >= 2 ? 2 : 1));
+ next_amount = MIN (next_amount, align);
- while (1)
- {
- mode = VOIDmode;
+ amount[next] = next_amount;
+ mode[next] = mode_from_align[next_amount];
+ temp[next] = gen_reg_rtx (mode[next]);
- /* Find mode closest to but not bigger than item_size. */
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) <= item_size)
- mode = tmode;
- if (mode == VOIDmode)
- abort ();
+ x = adjust_address (src_mem, mode[next], offset_ld);
+ emit_insn (gen_rtx_SET (VOIDmode, temp[next], x));
- item_size = GET_MODE_SIZE (mode);
+ offset_ld += next_amount;
+ bytes -= next_amount;
+ active[next] = true;
+ }
- if (xtensa_ld_opcodes[(int) mode]
- && xtensa_st_opcodes[(int) mode])
- break;
+ if (active[phase])
+ {
+ active[phase] = false;
+
+ x = adjust_address (dst_mem, mode[phase], offset_st);
+ emit_insn (gen_rtx_SET (VOIDmode, x, temp[phase]));
- /* Cannot load & store this mode; try something smaller. */
- item_size -= 1;
+ offset_st += amount[phase];
+ }
}
+ while (active[next]);
- return mode;
+ return 1;
}
? (int) GET_MODE_SIZE (mode)
: int_size_in_bytes (type)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
- if ((*arg_words + words > max) && (*arg_words < max))
+ if (*arg_words < max
+ && (targetm.calls.must_pass_in_stack (mode, type)
+ || *arg_words + words > max))
*arg_words = max;
*arg_words += words;
if (!TARGET_BOOLEANS && TARGET_HARD_FLOAT)
error ("boolean registers required for the floating-point option");
- /* Set up the tables of ld/st opcode names for block moves. */
- xtensa_ld_opcodes[(int) SImode] = "l32i";
- xtensa_ld_opcodes[(int) HImode] = "l16ui";
- xtensa_ld_opcodes[(int) QImode] = "l8ui";
- xtensa_st_opcodes[(int) SImode] = "s32i";
- xtensa_st_opcodes[(int) HImode] = "s16i";
- xtensa_st_opcodes[(int) QImode] = "s8i";
-
xtensa_char_to_class['q'] = SP_REG;
xtensa_char_to_class['a'] = GR_REGS;
xtensa_char_to_class['b'] = ((TARGET_BOOLEANS) ? BR_REGS : NO_REGS);
/* There's no need for -fPIC (as opposed to -fpic) on Xtensa. */
if (flag_pic > 1)
flag_pic = 1;
+
+ /* Hot/cold partitioning does not work on this architecture, because of
+ constant pools (the load instruction cannot necessarily reach that far).
+ Therefore disable it on this architecture. */
+ if (flag_reorder_blocks_and_partition)
+ {
+ flag_reorder_blocks_and_partition = 0;
+ flag_reorder_blocks = 1;
+ }
}
switch (GET_MODE_CLASS (mode))
{
case MODE_FLOAT:
- if (GET_CODE (x) != CONST_DOUBLE)
- abort ();
+ gcc_assert (GET_CODE (x) == CONST_DOUBLE);
REAL_VALUE_FROM_CONST_DOUBLE (r, x);
switch (mode)
break;
default:
- abort ();
+ gcc_unreachable ();
}
break;
case MODE_INT:
case MODE_PARTIAL_INT:
size = GET_MODE_SIZE (mode);
- if (size == 4)
+ switch (size)
{
+ case 4:
output_addr_const (file, x);
fputs ("\n", file);
- }
- else if (size == 8)
- {
+ break;
+
+ case 8:
output_addr_const (file, operand_subword (x, 0, 0, DImode));
fputs (", ", file);
output_addr_const (file, operand_subword (x, 1, 0, DImode));
fputs ("\n", file);
+ break;
+
+ default:
+ gcc_unreachable ();
}
- else
- abort ();
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
f_reg = TREE_CHAIN (f_stk);
f_ndx = TREE_CHAIN (f_reg);
- stk = build (COMPONENT_REF, TREE_TYPE (f_stk), valist, f_stk, NULL_TREE);
- reg = build (COMPONENT_REF, TREE_TYPE (f_reg), valist, f_reg, NULL_TREE);
- ndx = build (COMPONENT_REF, TREE_TYPE (f_ndx), valist, f_ndx, NULL_TREE);
+ stk = build3 (COMPONENT_REF, TREE_TYPE (f_stk), valist, f_stk, NULL_TREE);
+ reg = build3 (COMPONENT_REF, TREE_TYPE (f_reg), valist, f_reg, NULL_TREE);
+ ndx = build3 (COMPONENT_REF, TREE_TYPE (f_ndx), valist, f_ndx, NULL_TREE);
/* Call __builtin_saveregs; save the result in __va_reg */
u = make_tree (ptr_type_node, expand_builtin_saveregs ());
- t = build (MODIFY_EXPR, ptr_type_node, reg, u);
+ t = build2 (MODIFY_EXPR, ptr_type_node, reg, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
/* Set the __va_stk member to ($arg_ptr - 32). */
u = make_tree (ptr_type_node, virtual_incoming_args_rtx);
- u = fold (build (PLUS_EXPR, ptr_type_node, u, build_int_2 (-32, -1)));
- t = build (MODIFY_EXPR, ptr_type_node, stk, u);
+ u = fold_build2 (PLUS_EXPR, ptr_type_node, u,
+ build_int_cst (NULL_TREE, -32));
+ t = build2 (MODIFY_EXPR, ptr_type_node, stk, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
alignment offset for __va_stk. */
if (arg_words >= MAX_ARGS_IN_REGISTERS)
arg_words += 2;
- u = build_int_2 (arg_words * UNITS_PER_WORD, 0);
- t = build (MODIFY_EXPR, integer_type_node, ndx, u);
+ u = build_int_cst (NULL_TREE, arg_words * UNITS_PER_WORD);
+ t = build2 (MODIFY_EXPR, integer_type_node, ndx, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
tree f_ndx, ndx;
tree type_size, array, orig_ndx, addr, size, va_size, t;
tree lab_false, lab_over, lab_false2;
+ bool indirect;
+
+ indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
+ if (indirect)
+ type = build_pointer_type (type);
/* Handle complex values as separate real and imaginary parts. */
if (TREE_CODE (type) == COMPLEX_TYPE)
pre_p, NULL);
imag_part = get_initialized_tmp_var (imag_part, pre_p, NULL);
- return build (COMPLEX_EXPR, type, real_part, imag_part);
+ return build2 (COMPLEX_EXPR, type, real_part, imag_part);
}
f_stk = TYPE_FIELDS (va_list_type_node);
f_reg = TREE_CHAIN (f_stk);
f_ndx = TREE_CHAIN (f_reg);
- stk = build (COMPONENT_REF, TREE_TYPE (f_stk), valist, f_stk, NULL_TREE);
- reg = build (COMPONENT_REF, TREE_TYPE (f_reg), valist, f_reg, NULL_TREE);
- ndx = build (COMPONENT_REF, TREE_TYPE (f_ndx), valist, f_ndx, NULL_TREE);
+ stk = build3 (COMPONENT_REF, TREE_TYPE (f_stk), valist, f_stk, NULL_TREE);
+ reg = build3 (COMPONENT_REF, TREE_TYPE (f_reg), valist, f_reg, NULL_TREE);
+ ndx = build3 (COMPONENT_REF, TREE_TYPE (f_ndx), valist, f_ndx, NULL_TREE);
type_size = size_in_bytes (type);
va_size = round_up (type_size, UNITS_PER_WORD);
{
int align = TYPE_ALIGN (type) / BITS_PER_UNIT;
- t = build (PLUS_EXPR, integer_type_node, orig_ndx,
- build_int_2 (align - 1, 0));
- t = build (BIT_AND_EXPR, integer_type_node, t,
- build_int_2 (-align, -1));
- t = build (MODIFY_EXPR, integer_type_node, orig_ndx, t);
+ t = build2 (PLUS_EXPR, integer_type_node, orig_ndx,
+ build_int_cst (NULL_TREE, align - 1));
+ t = build2 (BIT_AND_EXPR, integer_type_node, t,
+ build_int_cst (NULL_TREE, -align));
+ t = build2 (MODIFY_EXPR, integer_type_node, orig_ndx, t);
gimplify_and_add (t, pre_p);
}
(AP).__va_ndx = orig_ndx + __va_size (TYPE); */
t = fold_convert (integer_type_node, va_size);
- t = build (PLUS_EXPR, integer_type_node, orig_ndx, t);
- t = build (MODIFY_EXPR, integer_type_node, ndx, t);
+ t = build2 (PLUS_EXPR, integer_type_node, orig_ndx, t);
+ t = build2 (MODIFY_EXPR, integer_type_node, ndx, t);
gimplify_and_add (t, pre_p);
/* Check if the argument is in registers:
if ((AP).__va_ndx <= __MAX_ARGS_IN_REGISTERS * 4
- && !MUST_PASS_IN_STACK (type))
+ && !must_pass_in_stack (type))
__array = (AP).__va_reg; */
array = create_tmp_var (ptr_type_node, NULL);
lab_over = NULL;
- if (!MUST_PASS_IN_STACK (VOIDmode, type))
+ if (!targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
{
lab_false = create_artificial_label ();
lab_over = create_artificial_label ();
- t = build_int_2 (MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD, 0);
- t = build (GT_EXPR, boolean_type_node, ndx, t);
- t = build (COND_EXPR, void_type_node, t,
- build (GOTO_EXPR, void_type_node, lab_false),
- NULL);
+ t = build_int_cst (NULL_TREE, MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD);
+ t = build2 (GT_EXPR, boolean_type_node, ndx, t);
+ t = build3 (COND_EXPR, void_type_node, t,
+ build1 (GOTO_EXPR, void_type_node, lab_false),
+ NULL_TREE);
gimplify_and_add (t, pre_p);
- t = build (MODIFY_EXPR, void_type_node, array, reg);
+ t = build2 (MODIFY_EXPR, void_type_node, array, reg);
gimplify_and_add (t, pre_p);
- t = build (GOTO_EXPR, void_type_node, lab_over);
+ t = build1 (GOTO_EXPR, void_type_node, lab_over);
gimplify_and_add (t, pre_p);
- t = build (LABEL_EXPR, void_type_node, lab_false);
+ t = build1 (LABEL_EXPR, void_type_node, lab_false);
gimplify_and_add (t, pre_p);
}
lab_false2 = create_artificial_label ();
- t = build_int_2 (MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD, 0);
- t = build (GT_EXPR, boolean_type_node, orig_ndx, t);
- t = build (COND_EXPR, void_type_node, t,
- build (GOTO_EXPR, void_type_node, lab_false2),
- NULL);
+ t = build_int_cst (NULL_TREE, MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD);
+ t = build2 (GT_EXPR, boolean_type_node, orig_ndx, t);
+ t = build3 (COND_EXPR, void_type_node, t,
+ build1 (GOTO_EXPR, void_type_node, lab_false2),
+ NULL_TREE);
gimplify_and_add (t, pre_p);
t = size_binop (PLUS_EXPR, va_size, size_int (32));
t = fold_convert (integer_type_node, t);
- t = build (MODIFY_EXPR, integer_type_node, ndx, t);
+ t = build2 (MODIFY_EXPR, integer_type_node, ndx, t);
gimplify_and_add (t, pre_p);
- t = build (LABEL_EXPR, void_type_node, lab_false2);
+ t = build1 (LABEL_EXPR, void_type_node, lab_false2);
gimplify_and_add (t, pre_p);
- t = build (MODIFY_EXPR, void_type_node, array, stk);
+ t = build2 (MODIFY_EXPR, void_type_node, array, stk);
gimplify_and_add (t, pre_p);
if (lab_over)
{
- t = build (LABEL_EXPR, void_type_node, lab_over);
+ t = build1 (LABEL_EXPR, void_type_node, lab_over);
gimplify_and_add (t, pre_p);
}
are aligned differently. */
- if (BYTES_BIG_ENDIAN)
+ if (BYTES_BIG_ENDIAN && TREE_CODE (type_size) == INTEGER_CST)
{
t = size_int (PARM_BOUNDARY / BITS_PER_UNIT);
- t = fold (build (GE_EXPR, boolean_type_node, type_size, t));
- t = fold (build (COND_EXPR, sizetype, t, type_size, va_size));
+ t = fold_build2 (GE_EXPR, boolean_type_node, type_size, t);
+ t = fold_build3 (COND_EXPR, sizetype, t, va_size, type_size);
size = t;
}
else
size = va_size;
t = fold_convert (ptr_type_node, ndx);
- addr = build (PLUS_EXPR, ptr_type_node, array, t);
+ addr = build2 (PLUS_EXPR, ptr_type_node, array, t);
t = fold_convert (ptr_type_node, size);
- addr = build (MINUS_EXPR, ptr_type_node, addr, t);
+ addr = build2 (MINUS_EXPR, ptr_type_node, addr, t);
addr = fold_convert (build_pointer_type (type), addr);
- return build_fold_indirect_ref (addr);
+ if (indirect)
+ addr = build_va_arg_indirect_ref (addr);
+ return build_va_arg_indirect_ref (addr);
}
&& DECL_INITIAL (decl) == NULL_TREE))
flags |= SECTION_BSS; /* @nobits */
else
- warning ("only uninitialized variables can be placed in a "
+ warning (0, "only uninitialized variables can be placed in a "
".bss section");
}