1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
Contributed by A. Lichnewsky, lich@inria.inria.fr.
Changes by Michael Meissner, meissner@osf.org.
- 64 bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
+ 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
Brendan Eich, brendan@microunity.com.
This file is part of GCC.
MIPS_VOID_FTYPE_V2HI_V2HI,
MIPS_VOID_FTYPE_V4QI_V4QI,
+ /* For MIPS DSP REV 2 ASE. */
+ MIPS_V4QI_FTYPE_V4QI,
+ MIPS_SI_FTYPE_SI_SI_SI,
+ MIPS_DI_FTYPE_DI_USI_USI,
+ MIPS_DI_FTYPE_SI_SI,
+ MIPS_DI_FTYPE_USI_USI,
+ MIPS_V2HI_FTYPE_SI_SI_SI,
+
/* The last type. */
MIPS_MAX_FTYPE_MAX
};
struct mips_sim;
static enum mips_symbol_type mips_classify_symbol (rtx);
-static void mips_split_const (rtx, rtx *, HOST_WIDE_INT *);
-static bool mips_offset_within_object_p (rtx, HOST_WIDE_INT);
static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
static bool mips_symbolic_address_p (enum mips_symbol_type, enum machine_mode);
static bool mips_classify_address (struct mips_address_info *, rtx,
static bool mips_handle_option (size_t, const char *, int);
static struct machine_function *mips_init_machine_status (void);
static void print_operand_reloc (FILE *, rtx, const char **);
-#if TARGET_IRIX
-static void irix_output_external_libcall (rtx);
-#endif
static void mips_file_start (void);
-static void mips_file_end (void);
static bool mips_rewrite_small_data_p (rtx);
static int mips_small_data_pattern_1 (rtx *, void *);
static int mips_rewrite_small_data_1 (rtx *, void *);
tree, bool);
static bool mips_valid_pointer_mode (enum machine_mode);
static bool mips_vector_mode_supported_p (enum machine_mode);
-static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree *);
+static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree, unsigned int);
static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
static void mips_init_builtins (void);
static void mips_encode_section_info (tree, rtx, int);
static void mips_extra_live_on_entry (bitmap);
static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
+static bool mips_offset_within_alignment_p (rtx, HOST_WIDE_INT);
/* Structure to be filled in by compute_frame_size with register
save masks, and offsets for the current function. */
/* Next label # for each statement for Silicon Graphics IRIS systems. */
int sym_lineno = 0;
-/* Linked list of all externals that are to be emitted when optimizing
- for the global pointer if they haven't been declared by the end of
- the program with an appropriate .comm or initialization. */
-
-struct extern_list GTY (())
-{
- struct extern_list *next; /* next external */
- const char *name; /* name of the external */
- int size; /* size in bytes */
-};
-
-static GTY (()) struct extern_list *extern_head = 0;
-
/* Name of the file containing the current function. */
const char *current_function_file = "";
/* MIPS32 Release 2 */
{ "m4k", PROCESSOR_M4K, 33 },
- { "24k", PROCESSOR_24K, 33 },
- { "24kc", PROCESSOR_24K, 33 }, /* 24K no FPU */
- { "24kf", PROCESSOR_24K, 33 }, /* 24K 1:2 FPU */
- { "24kx", PROCESSOR_24KX, 33 }, /* 24K 1:1 FPU */
+ { "4kec", PROCESSOR_4KC, 33 },
+ { "4kem", PROCESSOR_4KC, 33 },
+ { "4kep", PROCESSOR_4KP, 33 },
+ { "24kc", PROCESSOR_24KC, 33 }, /* 24K no FPU */
+ { "24kf", PROCESSOR_24KF, 33 }, /* 24K 1:2 FPU */
+ { "24kx", PROCESSOR_24KX, 33 }, /* 24K 1:1 FPU */
+ { "24kec", PROCESSOR_24KC, 33 }, /* 24K with DSP */
+ { "24kef", PROCESSOR_24KF, 33 },
+ { "24kex", PROCESSOR_24KX, 33 },
+ { "34kc", PROCESSOR_24KC, 33 }, /* 34K with MT/DSP */
+ { "34kf", PROCESSOR_24KF, 33 },
+ { "34kx", PROCESSOR_24KX, 33 },
+ { "74kc", PROCESSOR_74KC, 33 },
+ { "74kf", PROCESSOR_74KF, 33 },
+ { "74kx", PROCESSOR_74KX, 33 },
/* MIPS64 */
{ "5kc", PROCESSOR_5KC, 64 },
COSTS_N_INSNS (256), /* fp_div_sf */ \
COSTS_N_INSNS (256) /* fp_div_df */
+static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
+ {
+ COSTS_N_INSNS (1), /* fp_add */
+ COSTS_N_INSNS (1), /* fp_mult_sf */
+ COSTS_N_INSNS (1), /* fp_mult_df */
+ COSTS_N_INSNS (1), /* fp_div_sf */
+ COSTS_N_INSNS (1), /* fp_div_df */
+ COSTS_N_INSNS (1), /* int_mult_si */
+ COSTS_N_INSNS (1), /* int_mult_di */
+ COSTS_N_INSNS (1), /* int_div_si */
+ COSTS_N_INSNS (1), /* int_div_di */
+ 2, /* branch_cost */
+ 4 /* memory_latency */
+ };
+
static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
{
{ /* R3000 */
{ /* 20KC */
DEFAULT_COSTS
},
- { /* 24k */
+ { /* 24KC */
+ SOFT_FP_COSTS,
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (5), /* int_mult_di */
+ COSTS_N_INSNS (41), /* int_div_si */
+ COSTS_N_INSNS (41), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* 24KF */
COSTS_N_INSNS (8), /* fp_add */
COSTS_N_INSNS (8), /* fp_mult_sf */
COSTS_N_INSNS (10), /* fp_mult_df */
1, /* branch_cost */
4 /* memory_latency */
},
- { /* 24kx */
+ { /* 24KX */
+ COSTS_N_INSNS (4), /* fp_add */
+ COSTS_N_INSNS (4), /* fp_mult_sf */
+ COSTS_N_INSNS (5), /* fp_mult_df */
+ COSTS_N_INSNS (17), /* fp_div_sf */
+ COSTS_N_INSNS (32), /* fp_div_df */
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (5), /* int_mult_di */
+ COSTS_N_INSNS (41), /* int_div_si */
+ COSTS_N_INSNS (41), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* 74KC */
+ SOFT_FP_COSTS,
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (5), /* int_mult_di */
+ COSTS_N_INSNS (41), /* int_div_si */
+ COSTS_N_INSNS (41), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* 74KF */
+ COSTS_N_INSNS (8), /* fp_add */
+ COSTS_N_INSNS (8), /* fp_mult_sf */
+ COSTS_N_INSNS (10), /* fp_mult_df */
+ COSTS_N_INSNS (34), /* fp_div_sf */
+ COSTS_N_INSNS (64), /* fp_div_df */
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (5), /* int_mult_di */
+ COSTS_N_INSNS (41), /* int_div_si */
+ COSTS_N_INSNS (41), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* 74KX */
COSTS_N_INSNS (4), /* fp_add */
COSTS_N_INSNS (4), /* fp_mult_sf */
COSTS_N_INSNS (5), /* fp_mult_df */
#define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
#undef TARGET_ASM_FILE_START
-#undef TARGET_ASM_FILE_END
#define TARGET_ASM_FILE_START mips_file_start
-#define TARGET_ASM_FILE_END mips_file_end
#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
struct gcc_target targetm = TARGET_INITIALIZER;
\f
+/* Return true if SYMBOL_REF X is associated with a global symbol
+ (in the STB_GLOBAL sense). */
+
+static bool
+mips_global_symbol_p (rtx x)
+{
+ tree decl;
+
+ decl = SYMBOL_REF_DECL (x);
+ if (!decl)
+ return !SYMBOL_REF_LOCAL_P (x);
+
+ /* Weakref symbols are not TREE_PUBLIC, but their targets are global
+ or weak symbols. Relocations in the object file will be against
+ the target symbol, so it's that symbol's binding that matters here. */
+ return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
+}
+
+/* Return true if SYMBOL_REF X binds locally. */
+
+static bool
+mips_symbol_binds_local_p (rtx x)
+{
+ return (SYMBOL_REF_DECL (x)
+ ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
+ : SYMBOL_REF_LOCAL_P (x));
+}
+
/* Classify symbol X, which must be a SYMBOL_REF or a LABEL_REF. */
static enum mips_symbol_type
mips_classify_symbol (rtx x)
{
+ if (TARGET_RTP_PIC)
+ return SYMBOL_GOT_DISP;
+
if (GET_CODE (x) == LABEL_REF)
{
if (TARGET_MIPS16)
return SYMBOL_CONSTANT_POOL;
if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
- return SYMBOL_GOT_LOCAL;
+ return SYMBOL_GOT_PAGE_OFST;
return SYMBOL_GENERAL;
}
if (TARGET_ABICALLS)
{
- if (SYMBOL_REF_DECL (x) == 0)
- {
- if (!SYMBOL_REF_LOCAL_P (x))
- return SYMBOL_GOT_GLOBAL;
- }
- else
- {
- /* Don't use GOT accesses for locally-binding symbols if
- TARGET_ABSOLUTE_ABICALLS. Otherwise, there are three
- cases to consider:
-
- - o32 PIC (either with or without explicit relocs)
- - n32/n64 PIC without explicit relocs
- - n32/n64 PIC with explicit relocs
-
- In the first case, both local and global accesses will use an
- R_MIPS_GOT16 relocation. We must correctly predict which of
- the two semantics (local or global) the assembler and linker
- will apply. The choice doesn't depend on the symbol's
- visibility, so we deliberately ignore decl_visibility and
- binds_local_p here.
-
- In the second case, the assembler will not use R_MIPS_GOT16
- relocations, but it chooses between local and global accesses
- in the same way as for o32 PIC.
-
- In the third case we have more freedom since both forms of
- access will work for any kind of symbol. However, there seems
- little point in doing things differently. */
- if (DECL_P (SYMBOL_REF_DECL (x))
- && TREE_PUBLIC (SYMBOL_REF_DECL (x))
- && !(TARGET_ABSOLUTE_ABICALLS
- && targetm.binds_local_p (SYMBOL_REF_DECL (x))))
- return SYMBOL_GOT_GLOBAL;
- }
+ /* Don't use GOT accesses for locally-binding symbols; we can use
+ %hi and %lo instead. */
+ if (TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x))
+ return SYMBOL_GENERAL;
- if (!TARGET_ABSOLUTE_ABICALLS)
- return SYMBOL_GOT_LOCAL;
- }
+ /* There are three cases to consider:
- return SYMBOL_GENERAL;
-}
+ - o32 PIC (either with or without explicit relocs)
+ - n32/n64 PIC without explicit relocs
+ - n32/n64 PIC with explicit relocs
+ In the first case, both local and global accesses will use an
+ R_MIPS_GOT16 relocation. We must correctly predict which of
+ the two semantics (local or global) the assembler and linker
+ will apply. The choice depends on the symbol's binding rather
+ than its visibility.
-/* Split X into a base and a constant offset, storing them in *BASE
- and *OFFSET respectively. */
-
-static void
-mips_split_const (rtx x, rtx *base, HOST_WIDE_INT *offset)
-{
- *offset = 0;
+ In the second case, the assembler will not use R_MIPS_GOT16
+ relocations, but it chooses between local and global accesses
+ in the same way as for o32 PIC.
- if (GET_CODE (x) == CONST)
- x = XEXP (x, 0);
+ In the third case we have more freedom since both forms of
+ access will work for any kind of symbol. However, there seems
+ little point in doing things differently. */
+ if (mips_global_symbol_p (x))
+ return SYMBOL_GOT_DISP;
- if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
- {
- *offset += INTVAL (XEXP (x, 1));
- x = XEXP (x, 0);
+ return SYMBOL_GOT_PAGE_OFST;
}
- *base = x;
-}
+ return SYMBOL_GENERAL;
+}
-/* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
- to the same object as SYMBOL, or to the same object_block. */
+/* Returns true if OFFSET is within the range [0, ALIGN), where ALIGN
+ is the alignment (in bytes) of SYMBOL_REF X. */
static bool
-mips_offset_within_object_p (rtx symbol, HOST_WIDE_INT offset)
+mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
{
- if (GET_CODE (symbol) != SYMBOL_REF)
- return false;
+ /* If for some reason we can't get the alignment for the
+ symbol, initializing this to one means we won't accept any
+ offset. */
+ HOST_WIDE_INT align = 1;
+ tree t;
- if (CONSTANT_POOL_ADDRESS_P (symbol)
- && offset >= 0
- && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
- return true;
+ /* Get the alignment of the symbol we're referring to. */
+ t = SYMBOL_REF_DECL (x);
+ if (t)
+ align = DECL_ALIGN_UNIT (t);
- if (SYMBOL_REF_DECL (symbol) != 0
- && offset >= 0
- && offset < int_size_in_bytes (TREE_TYPE (SYMBOL_REF_DECL (symbol))))
+ if (offset >= 0 && offset < align)
return true;
-
- if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
- && SYMBOL_REF_BLOCK (symbol)
- && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
- && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
- < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
- return true;
-
return false;
}
-
/* Return true if X is a symbolic constant that can be calculated in
the same way as a bare symbol. If it is, store the type of the
symbol in *SYMBOL_TYPE. */
bool
mips_symbolic_constant_p (rtx x, enum mips_symbol_type *symbol_type)
{
- HOST_WIDE_INT offset;
+ rtx offset;
- mips_split_const (x, &x, &offset);
+ split_const (x, &x, &offset);
if (UNSPEC_ADDRESS_P (x))
- *symbol_type = UNSPEC_ADDRESS_TYPE (x);
+ {
+ *symbol_type = UNSPEC_ADDRESS_TYPE (x);
+ x = UNSPEC_ADDRESS (x);
+ }
else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
{
*symbol_type = mips_classify_symbol (x);
else
return false;
- if (offset == 0)
+ if (offset == const0_rtx)
return true;
/* Check whether a nonzero offset is valid for the underlying
sign-extended. In this case we can't allow an arbitrary offset
in case the 32-bit value X + OFFSET has a different sign from X. */
if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
- return mips_offset_within_object_p (x, offset);
+ return offset_within_block_p (x, INTVAL (offset));
/* In other cases the relocations can handle any offset. */
return true;
case SYMBOL_SMALL_DATA:
/* Make sure that the offset refers to something within the
- underlying object. This should guarantee that the final
+ same object block. This should guarantee that the final
PC- or GP-relative offset is within the 16-bit limit. */
- return mips_offset_within_object_p (x, offset);
+ return offset_within_block_p (x, INTVAL (offset));
- case SYMBOL_GOT_LOCAL:
+ case SYMBOL_GOT_PAGE_OFST:
case SYMBOL_GOTOFF_PAGE:
- /* The linker should provide enough local GOT entries for a
- 16-bit offset. Larger offsets may lead to GOT overflow. */
- return SMALL_OPERAND (offset);
+ /* If the symbol is global, the GOT entry will contain the symbol's
+ address, and we will apply a 16-bit offset after loading it.
+ If the symbol is local, the linker should provide enough local
+ GOT entries for a 16-bit offset, but larger offsets may lead
+ to GOT overflow. */
+ return SMALL_INT (offset);
+
+ case SYMBOL_TPREL:
+ case SYMBOL_DTPREL:
+ /* There is no carry between the HI and LO REL relocations, so the
+ offset is only valid if we know it won't lead to such a carry. */
+ return mips_offset_within_alignment_p (x, INTVAL (offset));
- case SYMBOL_GOT_GLOBAL:
- case SYMBOL_GOTOFF_GLOBAL:
+ case SYMBOL_GOT_DISP:
+ case SYMBOL_GOTOFF_DISP:
case SYMBOL_GOTOFF_CALL:
case SYMBOL_GOTOFF_LOADGP:
case SYMBOL_TLSGD:
case SYMBOL_TLSLDM:
- case SYMBOL_DTPREL:
- case SYMBOL_TPREL:
case SYMBOL_GOTTPREL:
case SYMBOL_TLS:
+ case SYMBOL_HALF:
return false;
}
gcc_unreachable ();
int
mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
{
- if (regno >= FIRST_PSEUDO_REGISTER)
+ if (!HARD_REGISTER_NUM_P (regno))
{
if (!strict)
return true;
/* PC-relative addressing is only available for lw and ld. */
return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
- case SYMBOL_GOT_LOCAL:
+ case SYMBOL_GOT_PAGE_OFST:
return true;
- case SYMBOL_GOT_GLOBAL:
+ case SYMBOL_GOT_DISP:
/* The address will have to be loaded from the GOT first. */
return false;
case SYMBOL_GOTOFF_PAGE:
- case SYMBOL_GOTOFF_GLOBAL:
+ case SYMBOL_GOTOFF_DISP:
case SYMBOL_GOTOFF_CALL:
case SYMBOL_GOTOFF_LOADGP:
case SYMBOL_TLS:
case SYMBOL_64_HIGH:
case SYMBOL_64_MID:
case SYMBOL_64_LOW:
+ case SYMBOL_HALF:
return true;
}
gcc_unreachable ();
static bool
mips_cannot_force_const_mem (rtx x)
{
- rtx base;
- HOST_WIDE_INT offset;
+ rtx base, offset;
if (!TARGET_MIPS16)
{
if (GET_CODE (x) == CONST_INT)
return true;
- mips_split_const (x, &base, &offset);
- if (symbolic_operand (base, VOIDmode) && SMALL_OPERAND (offset))
+ split_const (x, &base, &offset);
+ if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
return true;
}
return (ABI_HAS_64BIT_SYMBOLS ? 6 : 2);
case SYMBOL_SMALL_DATA:
+ case SYMBOL_HALF:
return 1;
case SYMBOL_CONSTANT_POOL:
extended instruction. */
return 2;
- case SYMBOL_GOT_LOCAL:
- case SYMBOL_GOT_GLOBAL:
+ case SYMBOL_GOT_PAGE_OFST:
+ case SYMBOL_GOT_DISP:
/* Unless -funit-at-a-time is in effect, we can't be sure whether
the local/global classification is accurate. See override_options
for details.
return 3;
case SYMBOL_GOTOFF_PAGE:
- case SYMBOL_GOTOFF_GLOBAL:
+ case SYMBOL_GOTOFF_DISP:
case SYMBOL_GOTOFF_CALL:
case SYMBOL_GOTOFF_LOADGP:
case SYMBOL_64_HIGH:
{
struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
enum mips_symbol_type symbol_type;
- HOST_WIDE_INT offset;
+ rtx offset;
switch (GET_CODE (x))
{
/* Otherwise try splitting the constant into a base and offset.
16-bit offsets can be added using an extra addiu. Larger offsets
must be calculated separately and then added to the base. */
- mips_split_const (x, &x, &offset);
+ split_const (x, &x, &offset);
if (offset != 0)
{
int n = mips_const_insns (x);
if (n != 0)
{
- if (SMALL_OPERAND (offset))
+ if (SMALL_INT (offset))
return n + 1;
else
- return n + 1 + mips_build_integer (codes, offset);
+ return n + 1 + mips_build_integer (codes, INTVAL (offset));
}
}
return 0;
{
rtx high;
- if (TARGET_MIPS16)
- high = mips16_gp_pseudo_reg ();
- else
+ if (!TARGET_MIPS16)
high = mips_force_temporary (temp, gen_rtx_HIGH (Pmode, copy_rtx (addr)));
+ else if (no_new_pseudos)
+ {
+ emit_insn (gen_load_const_gp (copy_rtx (temp)));
+ high = temp;
+ }
+ else
+ high = mips16_gp_pseudo_reg ();
return gen_rtx_LO_SUM (Pmode, high, addr);
}
rtx
mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
{
- rtx base;
- HOST_WIDE_INT offset;
+ rtx base, offset;
- mips_split_const (address, &base, &offset);
+ split_const (address, &base, &offset);
base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
UNSPEC_ADDRESS_FIRST + symbol_type);
- return plus_constant (gen_rtx_CONST (Pmode, base), offset);
+ if (offset != const0_rtx)
+ base = gen_rtx_PLUS (Pmode, base, offset);
+ return gen_rtx_CONST (Pmode, base);
}
static void
mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
{
- rtx base;
- HOST_WIDE_INT offset;
+ rtx base, offset;
/* Split moves of big integers into smaller pieces. */
if (splittable_const_int_operand (src, mode))
/* If we have (const (plus symbol offset)), load the symbol first
and then add in the offset. This is usually better than forcing
the constant into memory, at least in non-mips16 code. */
- mips_split_const (src, &base, &offset);
+ split_const (src, &base, &offset);
if (!TARGET_MIPS16
- && offset != 0
- && (!no_new_pseudos || SMALL_OPERAND (offset)))
+ && offset != const0_rtx
+ && (!no_new_pseudos || SMALL_INT (offset)))
{
base = mips_force_temporary (dest, base);
- emit_move_insn (dest, mips_add_offset (0, base, offset));
+ emit_move_insn (dest, mips_add_offset (0, base, INTVAL (offset)));
return;
}
return true;
}
- /* We can use cmpi for an xor with an unsigned 16 bit value. */
+ /* We can use cmpi for an xor with an unsigned 16-bit value. */
if ((outer_code) == XOR
&& INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
{
}
/* We may be able to use slt or sltu for a comparison with a
- signed 16 bit value. (The boundary conditions aren't quite
+ signed 16-bit value. (The boundary conditions aren't quite
right, but this is just a heuristic anyhow.) */
if (((outer_code) == LT || (outer_code) == LE
|| (outer_code) == GE || (outer_code) == GT
return true;
case SIGN_EXTEND:
- /* A sign extend from SImode to DImode in 64 bit mode is often
+ /* A sign extend from SImode to DImode in 64-bit mode is often
zero instructions, because the result can often be used
directly by another instruction; we'll call it one. */
if (TARGET_64BIT && mode == DImode
if (FP_REG_RTX_P (dest))
{
/* Loading an FPR from memory or from GPRs. */
- emit_insn (gen_load_df_low (copy_rtx (dest), mips_subword (src, 0)));
- emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
- copy_rtx (dest)));
+ if (ISA_HAS_MXHC1)
+ {
+ dest = gen_lowpart (DFmode, dest);
+ emit_insn (gen_load_df_low (dest, mips_subword (src, 0)));
+ emit_insn (gen_mthc1 (dest, mips_subword (src, 1),
+ copy_rtx (dest)));
+ }
+ else
+ {
+ emit_insn (gen_load_df_low (copy_rtx (dest),
+ mips_subword (src, 0)));
+ emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
+ copy_rtx (dest)));
+ }
}
else if (FP_REG_RTX_P (src))
{
/* Storing an FPR into memory or GPRs. */
- emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
- emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
+ if (ISA_HAS_MXHC1)
+ {
+ src = gen_lowpart (DFmode, src);
+ emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
+ emit_insn (gen_mfhc1 (mips_subword (dest, 1), src));
+ }
+ else
+ {
+ emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
+ emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
+ }
}
else
{
if (src_code == CONST_INT)
{
/* Don't use the X format, because that will give out of
- range numbers for 64 bit hosts and 32 bit targets. */
+ range numbers for 64-bit hosts and 32-bit targets. */
if (!TARGET_MIPS16)
return "li\t%0,%1\t\t\t# %X1";
comparison. */
static bool
-mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
+mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
enum machine_mode mode)
{
HOST_WIDE_INT original, plus_one;
if (GET_CODE (*cmp1) != CONST_INT)
return false;
-
+
original = INTVAL (*cmp1);
plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
-
+
switch (*code)
{
case LE:
return true;
}
break;
-
+
case LEU:
if (plus_one != 0)
{
return true;
}
break;
-
+
default:
return false;
}
-
+
return false;
}
operands[1]));
}
\f
+/* Return true if calls to X can use R_MIPS_CALL* relocations. */
+
+static bool
+mips_ok_for_lazy_binding_p (rtx x)
+{
+ return (TARGET_USE_GOT
+ && GET_CODE (x) == SYMBOL_REF
+ && !mips_symbol_binds_local_p (x));
+}
+
/* Load function address ADDR into register DEST. SIBCALL_P is true
if the address is needed for a sibling call. */
{
/* If we're generating PIC, and this call is to a global function,
try to allow its address to be resolved lazily. This isn't
- possible for NewABI sibcalls since the value of $gp on entry
+ possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
to the stub would be our caller's gp, not ours. */
if (TARGET_EXPLICIT_RELOCS
- && !(sibcall_p && TARGET_NEWABI)
- && global_got_operand (addr, VOIDmode))
+ && !(sibcall_p && TARGET_CALL_SAVED_GP)
+ && mips_ok_for_lazy_binding_p (addr))
{
rtx high, lo_sum_symbol;
insn = emit_call_insn (pattern);
/* Lazy-binding stubs require $gp to be valid on entry. */
- if (global_got_operand (orig_addr, VOIDmode))
+ if (mips_ok_for_lazy_binding_p (orig_addr))
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
}
src = gen_rtx_REG (SFmode, true_regnum (src));
fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
- fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + FP_INC);
+ fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
emit_move_insn (copy_rtx (fp1), src);
emit_move_insn (copy_rtx (fp2), CONST0_RTX (SFmode));
if (mips_abi != ABI_EABI || !info.fpr_p)
cum->num_gprs = info.reg_offset + info.reg_words;
else if (info.reg_words > 0)
- cum->num_fprs += FP_INC;
+ cum->num_fprs += MAX_FPRS_PER_FMT;
if (info.stack_words > 0)
cum->stack_words = info.stack_offset + info.stack_words;
&& host_integerp (TYPE_SIZE_UNIT (type), 1)
&& named)
{
- /* The Irix 6 n32/n64 ABIs say that if any 64 bit chunk of the
- structure contains a double in its entirety, then that 64 bit
+ /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
+ structure contains a double in its entirety, then that 64-bit
chunk is passed in a floating point register. */
tree field;
if (field != 0)
{
/* Now handle the special case by returning a PARALLEL
- indicating where each 64 bit chunk goes. INFO.REG_WORDS
+ indicating where each 64-bit chunk goes. INFO.REG_WORDS
chunks are passed in registers. */
unsigned int i;
HOST_WIDE_INT bitpos;
if (!info.fpr_p)
return gen_rtx_REG (mode, GP_ARG_FIRST + info.reg_offset);
- else if (info.reg_offset == 1)
- /* This code handles the special o32 case in which the second word
- of the argument structure is passed in floating-point registers. */
- return gen_rtx_REG (mode, FP_ARG_FIRST + FP_INC);
+ else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info.reg_offset > 0)
+ /* In o32, the second argument is always passed in $f14
+ for TARGET_DOUBLE_FLOAT, regardless of whether the
+ first argument was a word or doubleword. */
+ return gen_rtx_REG (mode, FP_ARG_FIRST + 2);
else
return gen_rtx_REG (mode, FP_ARG_FIRST + info.reg_offset);
}
mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
- for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS; i += FP_INC)
+ for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
+ i += MAX_FPRS_PER_FMT)
{
rtx ptr, mem;
t = build2 (PLUS_EXPR, TREE_TYPE (ovfl), t,
build_int_cst (NULL_TREE,
cum->stack_words * UNITS_PER_WORD));
- t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
+ t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
/* Emit code to initialize GTOP, the top of the GPR save area. */
t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
- t = build2 (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
+ t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
/* Emit code to initialize FTOP, the top of the FPR save area.
if (fpr_offset)
t = build2 (PLUS_EXPR, TREE_TYPE (ftop), t,
build_int_cst (NULL_TREE, -fpr_offset));
- t = build2 (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
+ t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
/* Emit code to initialize GOFF, the offset from GTOP of the
next GPR argument. */
- t = build2 (MODIFY_EXPR, TREE_TYPE (goff), goff,
+ t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
build_int_cst (NULL_TREE, gpr_save_area_size));
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
/* Likewise emit code to initialize FOFF, the offset from FTOP
of the next FPR argument. */
- t = build2 (MODIFY_EXPR, TREE_TYPE (foff), foff,
+ t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
build_int_cst (NULL_TREE, fpr_save_area_size));
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
/* [1] Emit code for: off &= -rsize. */
t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
build_int_cst (NULL_TREE, -rsize));
- t = build2 (MODIFY_EXPR, TREE_TYPE (off), off, t);
+ t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
gimplify_and_add (t, pre_p);
}
osize = rsize;
u = fold_convert (TREE_TYPE (ovfl),
build_int_cst (NULL_TREE, -osize));
t = build2 (BIT_AND_EXPR, TREE_TYPE (ovfl), t, u);
- align = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
+ align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
}
else
align = NULL;
len = INTVAL (size);
pos = INTVAL (position);
-
- if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
+
+ if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
|| pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
return false;
int i, start, regno;
enum machine_mode mode;
+#ifdef SUBTARGET_OVERRIDE_OPTIONS
+ SUBTARGET_OVERRIDE_OPTIONS;
+#endif
+
mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
/* The following code determines the architecture and register size.
mips_set_tune (mips_arch_info);
/* Set cost structure for the processor. */
- mips_cost = &mips_rtx_cost_data[mips_tune];
+ if (optimize_size)
+ mips_cost = &mips_rtx_cost_optimize_size;
+ else
+ mips_cost = &mips_rtx_cost_data[mips_tune];
if ((target_flags_explicit & MASK_64BIT) != 0)
{
only one right answer here. */
if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
- else if (!TARGET_64BIT && TARGET_FLOAT64)
- error ("unsupported combination: %s", "-mgp32 -mfp64");
+ else if (!TARGET_64BIT && TARGET_FLOAT64
+ && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
+ error ("-mgp32 and -mfp64 can only be combined if the target"
+ " supports the mfhc1 and mthc1 instructions");
else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
error ("unsupported combination: %s", "-mfp64 -msingle-float");
}
warning (0, "%<-G%> is incompatible with %<-mabicalls%>");
}
+ if (TARGET_VXWORKS_RTP && mips_section_threshold > 0)
+ warning (0, "-G and -mrtp are incompatible");
+
/* mips_split_addresses is a half-way house between explicit
relocations and the traditional assembler macros. It can
split absolute 32-bit symbolic constants into a high/lo_sum
if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
error ("-mips3d/-mpaired-single must be used with -mips64");
+ /* If TARGET_DSPR2, enable MASK_DSP. */
+ if (TARGET_DSPR2)
+ target_flags |= MASK_DSP;
+
if (TARGET_MIPS16 && TARGET_DSP)
error ("-mips16 and -mdsp cannot be used together");
temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
else if (FP_REG_P (regno))
- temp = ((regno % FP_INC) == 0)
+ temp = ((((regno % MAX_FPRS_PER_FMT) == 0)
+ || (MIN_FPRS_PER_FMT == 1
+ && size <= UNITS_PER_FPREG))
&& (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
|| class == MODE_VECTOR_FLOAT)
&& size <= UNITS_PER_FPVALUE)
&& size >= MIN_UNITS_PER_WORD
&& size <= UNITS_PER_FPREG)
/* Allow TFmode for CCmode reloads. */
- || (ISA_HAS_8CC && mode == TFmode));
+ || (ISA_HAS_8CC && mode == TFmode)));
else if (ACC_REG_P (regno))
temp = (INTEGRAL_MODE_P (mode)
then lowered by mips_rewrite_small_data. */
mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gp_rel(";
- mips_split_p[SYMBOL_GOT_LOCAL] = true;
+ mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
if (TARGET_NEWABI)
{
mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
- mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%got_ofst(";
+ mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
}
else
{
mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
- mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%lo(";
+ mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
}
if (TARGET_XGOT)
{
/* The HIGH and LO_SUM are matched by special .md patterns. */
- mips_split_p[SYMBOL_GOT_GLOBAL] = true;
+ mips_split_p[SYMBOL_GOT_DISP] = true;
- mips_split_p[SYMBOL_GOTOFF_GLOBAL] = true;
- mips_hi_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_hi(";
- mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_lo(";
+ mips_split_p[SYMBOL_GOTOFF_DISP] = true;
+ mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
+ mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
mips_split_p[SYMBOL_GOTOFF_CALL] = true;
mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
else
{
if (TARGET_NEWABI)
- mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_disp(";
+ mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
else
- mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got(";
+ mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
}
}
mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
+ mips_lo_relocs[SYMBOL_HALF] = "%half(";
+
/* We don't have a thread pointer access instruction on MIPS16, or
appropriate TLS relocations. */
if (TARGET_MIPS16)
{
enum mips_symbol_type symbol_type;
const char *p;
- rtx base;
- HOST_WIDE_INT offset;
+ rtx base, offset;
if (!mips_symbolic_constant_p (op, &symbol_type) || relocs[symbol_type] == 0)
fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
/* If OP uses an UNSPEC address, we want to print the inner symbol. */
- mips_split_const (op, &base, &offset);
+ split_const (op, &base, &offset);
if (UNSPEC_ADDRESS_P (base))
- op = plus_constant (UNSPEC_ADDRESS (base), offset);
+ op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
fputs (relocs[symbol_type], file);
output_addr_const (file, op);
the -G limit but declared by the user to be in a section other
than .sbss or .sdata. */
-int
-mips_output_external (FILE *file ATTRIBUTE_UNUSED, tree decl, const char *name)
-{
- register struct extern_list *p;
-
- if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
- {
- p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
- p->next = extern_head;
- p->name = name;
- p->size = int_size_in_bytes (TREE_TYPE (decl));
- extern_head = p;
- }
-
- if (TARGET_IRIX && mips_abi == ABI_32 && TREE_CODE (decl) == FUNCTION_DECL)
- {
- p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
- p->next = extern_head;
- p->name = name;
- p->size = -1;
- extern_head = p;
- }
-
- return 0;
-}
-
-#if TARGET_IRIX
-static void
-irix_output_external_libcall (rtx fun)
+void
+mips_output_external (FILE *file, tree decl, const char *name)
{
- register struct extern_list *p;
+ default_elf_asm_output_external (file, decl, name);
- if (mips_abi == ABI_32)
+ /* We output the name if and only if TREE_SYMBOL_REFERENCED is
+ set in order to avoid putting out names that are never really
+ used. */
+ if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
{
- p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
- p->next = extern_head;
- p->name = XSTR (fun, 0);
- p->size = -1;
- extern_head = p;
+ if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
+ {
+ fputs ("\t.extern\t", file);
+ assemble_name (file, name);
+ fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
+ int_size_in_bytes (TREE_TYPE (decl)));
+ }
+ else if (TARGET_IRIX
+ && mips_abi == ABI_32
+ && TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
+ `.global name .text' directive for every used but
+ undefined function. If we don't, the linker may perform
+ an optimization (skipping over the insns that set $gp)
+ when it is unsafe. */
+ fputs ("\t.globl ", file);
+ assemble_name (file, name);
+ fputs (" .text\n", file);
+ }
}
}
-#endif
\f
/* Emit a new filename to a stream. If we are smuggling stabs, try to
put out a MIPS ECOFF file and a stab. */
}
#endif
\f
-/* Implement TARGET_ASM_FILE_END. When using assembler macros, emit
- .externs for any small-data variables that turned out to be external. */
-
-static void
-mips_file_end (void)
-{
- tree name_tree;
- struct extern_list *p;
-
- if (extern_head)
- {
- fputs ("\n", asm_out_file);
-
- for (p = extern_head; p != 0; p = p->next)
- {
- name_tree = get_identifier (p->name);
-
- /* Positively ensure only one .extern for any given symbol. */
- if (!TREE_ASM_WRITTEN (name_tree)
- && TREE_SYMBOL_REFERENCED (name_tree))
- {
- TREE_ASM_WRITTEN (name_tree) = 1;
- /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
- `.global name .text' directive for every used but
- undefined function. If we don't, the linker may perform
- an optimization (skipping over the insns that set $gp)
- when it is unsafe. */
- if (TARGET_IRIX && mips_abi == ABI_32 && p->size == -1)
- {
- fputs ("\t.globl ", asm_out_file);
- assemble_name (asm_out_file, p->name);
- fputs (" .text\n", asm_out_file);
- }
- else
- {
- fputs ("\t.extern\t", asm_out_file);
- assemble_name (asm_out_file, p->name);
- fprintf (asm_out_file, ", %d\n", p->size);
- }
- }
- }
- }
-}
-
/* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
elfos.h version, but we also need to handle -muninit-const-in-rodata. */
{
unsigned int regno;
- /* $gp is always available in non-abicalls code. */
- if (!TARGET_ABICALLS)
+ /* $gp is always available unless we're using a GOT. */
+ if (!TARGET_USE_GOT)
return GLOBAL_POINTER_REGNUM;
/* We must always provide $gp when it is used implicitly. */
/* We need a global pointer, but perhaps we can use a call-clobbered
register instead of $gp. */
- if (TARGET_NEWABI && current_function_is_leaf)
+ if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
if (!regs_ever_live[regno]
&& call_used_regs[regno]
static bool
mips_save_reg_p (unsigned int regno)
{
- /* We only need to save $gp for NewABI PIC. */
+ /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
+ if we have not chosen a call-clobbered substitute. */
if (regno == GLOBAL_POINTER_REGNUM)
- return (TARGET_ABICALLS && TARGET_NEWABI
- && cfun->machine->global_pointer == regno);
+ return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
/* Check call-saved registers. */
if (regs_ever_live[regno] && !call_used_regs[regno])
return true;
+ /* Save both registers in an FPR pair if either one is used. This is
+ needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
+ register to be used without the even register. */
+ if (FP_REG_P (regno)
+ && MAX_FPRS_PER_FMT == 2
+ && regs_ever_live[regno + 1]
+ && !call_used_regs[regno + 1])
+ return true;
+
/* We need to save the old frame pointer before setting up a new one. */
if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
return true;
}
/* This loop must iterate over the same space as its companion in
- save_restore_insns. */
- for (regno = (FP_REG_LAST - FP_INC + 1);
+ mips_for_each_saved_reg. */
+ for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
regno >= FP_REG_FIRST;
- regno -= FP_INC)
+ regno -= MAX_FPRS_PER_FMT)
{
if (mips_save_reg_p (regno))
{
- fp_reg_size += FP_INC * UNITS_PER_FPREG;
- fmask |= ((1 << FP_INC) - 1) << (regno - FP_REG_FIRST);
+ fp_reg_size += MAX_FPRS_PER_FMT * UNITS_PER_FPREG;
+ fmask |= ((1 << MAX_FPRS_PER_FMT) - 1) << (regno - FP_REG_FIRST);
}
}
cfun->machine->frame.fmask = fmask;
cfun->machine->frame.initialized = reload_completed;
cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
- cfun->machine->frame.num_fp = fp_reg_size / (FP_INC * UNITS_PER_FPREG);
+ cfun->machine->frame.num_fp = (fp_reg_size
+ / (MAX_FPRS_PER_FMT * UNITS_PER_FPREG));
if (mask)
{
offset = (args_size + cprestore_size + var_size
+ gp_reg_rounded + fp_reg_size
- - FP_INC * UNITS_PER_FPREG);
+ - MAX_FPRS_PER_FMT * UNITS_PER_FPREG);
cfun->machine->frame.fp_sp_offset = offset;
cfun->machine->frame.fp_save_offset = offset - total_size;
}
/* Save registers starting from high to low. The debuggers prefer at least
the return register be stored at func+4, and also it allows us not to
- need a nop in the epilog if at least one register is reloaded in
+ need a nop in the epilogue if at least one register is reloaded in
addition to return address. */
offset = cfun->machine->frame.gp_sp_offset - sp_offset;
for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
compute_frame_size. */
offset = cfun->machine->frame.fp_sp_offset - sp_offset;
fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
- for (regno = (FP_REG_LAST - FP_INC + 1);
+ for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
regno >= FP_REG_FIRST;
- regno -= FP_INC)
+ regno -= MAX_FPRS_PER_FMT)
if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
{
mips_save_restore_reg (fpr_mode, regno, offset, fn);
enum mips_loadgp_style
mips_current_loadgp_style (void)
{
- if (!TARGET_ABICALLS || cfun->machine->global_pointer == 0)
+ if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
return LOADGP_NONE;
+ if (TARGET_RTP_PIC)
+ return LOADGP_RTP;
+
if (TARGET_ABSOLUTE_ABICALLS)
return LOADGP_ABSOLUTE;
static void
mips_emit_loadgp (void)
{
- rtx addr, offset, incoming_address;
+ rtx addr, offset, incoming_address, base, index;
switch (mips_current_loadgp_style ())
{
mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
}
- emit_insn (gen_loadgp_noshared (mips_gnu_local_gp));
+ emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
break;
case LOADGP_NEWABI:
addr = XEXP (DECL_RTL (current_function_decl), 0);
offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
- emit_insn (gen_loadgp (offset, incoming_address));
+ emit_insn (gen_loadgp_newabi (offset, incoming_address));
+ if (!TARGET_EXPLICIT_RELOCS)
+ emit_insn (gen_loadgp_blockage ());
+ break;
+
+ case LOADGP_RTP:
+ base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
+ index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
+ emit_insn (gen_loadgp_rtp (base, index));
if (!TARGET_EXPLICIT_RELOCS)
emit_insn (gen_loadgp_blockage ());
break;
#endif
/* In mips16 mode, we may need to generate a 32 bit to handle
- floating point arguments. The linker will arrange for any 32 bit
- functions to call this stub, which will then jump to the 16 bit
+ floating point arguments. The linker will arrange for any 32-bit
+ functions to call this stub, which will then jump to the 16-bit
function proper. */
if (TARGET_MIPS16 && !TARGET_SOFT_FLOAT
&& current_function_args_info.fp_code != 0)
{
rtx offset = GEN_INT (cfun->machine->frame.args_size);
if (SMALL_OPERAND (cfun->machine->frame.args_size))
- RTX_FRAME_RELATED_P
+ RTX_FRAME_RELATED_P
(emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
stack_pointer_rtx,
offset))) = 1;
MIPS_PROLOGUE_TEMP (Pmode)));
mips_set_frame_expr
(gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
- plus_constant (stack_pointer_rtx,
+ plus_constant (stack_pointer_rtx,
cfun->machine->frame.args_size)));
}
}
mips_emit_loadgp ();
/* If generating o32/o64 abicalls, save $gp on the stack. */
- if (TARGET_ABICALLS && !TARGET_NEWABI && !current_function_is_leaf)
+ if (TARGET_ABICALLS && TARGET_OLDABI && !current_function_is_leaf)
emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
/* If we are profiling, make sure no instructions are scheduled before
if (target != stack_pointer_rtx)
emit_move_insn (stack_pointer_rtx, target);
- /* If we're using addressing macros for n32/n64 abicalls, $gp is
- implicitly used by all SYMBOL_REFs. We must emit a blockage
- insn before restoring it. */
- if (TARGET_ABICALLS && TARGET_NEWABI && !TARGET_EXPLICIT_RELOCS)
+ /* If we're using addressing macros, $gp is implicitly used by all
+ SYMBOL_REFs. We must emit a blockage insn before restoring $gp
+ from the stack. */
+ if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
emit_insn (gen_blockage ());
/* Restore the registers. */
/* Pretend to be a post-reload pass while generating rtl. */
no_new_pseudos = 1;
reload_completed = 1;
- reset_block_changes ();
- /* Pick a global pointer for -mabicalls. Use $15 rather than $28
- for TARGET_NEWABI since the latter is a call-saved register. */
- if (TARGET_ABICALLS)
+ /* Pick a global pointer. Use a call-clobbered register if
+ TARGET_CALL_SAVED_GP, so that we can use a sibcall. */
+ if (TARGET_USE_GOT)
cfun->machine->global_pointer
= REGNO (pic_offset_table_rtx)
- = TARGET_NEWABI ? 15 : GLOBAL_POINTER_REGNUM;
+ = TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
/* Set up the global pointer for n32 or n64 abicalls. */
mips_emit_loadgp ();
/* Jump to the target function. Use a sibcall if direct jumps are
allowed, otherwise load the address into a register first. */
fnaddr = XEXP (DECL_RTL (function), 0);
- if (TARGET_MIPS16 || TARGET_ABICALLS || TARGET_LONG_CALLS)
+ if (TARGET_MIPS16 || TARGET_USE_GOT || TARGET_LONG_CALLS)
{
/* This is messy. gas treats "la $25,foo" as part of a call
sequence and may allow a global "foo" to be lazily bound.
The general move patterns therefore reject this combination.
- In this context, lazy binding would actually be OK for o32 and o64,
- but it's still wrong for n32 and n64; see mips_load_call_address.
- We must therefore load the address via a temporary register if
- mips_dangerous_for_la25_p.
+ In this context, lazy binding would actually be OK
+ for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
+ TARGET_CALL_SAVED_GP; see mips_load_call_address.
+ We must therefore load the address via a temporary
+ register if mips_dangerous_for_la25_p.
If we jump to the temporary register rather than $25, the assembler
can use the move insn to fill the jump's delay slot. */
- if (TARGET_ABICALLS && !mips_dangerous_for_la25_p (fnaddr))
+ if (TARGET_USE_PIC_FN_ADDR_REG
+ && !mips_dangerous_for_la25_p (fnaddr))
temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
mips_load_call_address (temp1, fnaddr, true);
- if (TARGET_ABICALLS && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
+ if (TARGET_USE_PIC_FN_ADDR_REG
+ && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
emit_move_insn (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
emit_jump_insn (gen_indirect_jump (temp1));
}
/* Run just enough of rest_of_compilation. This sequence was
"borrowed" from alpha.c. */
insn = get_insns ();
- insn_locators_initialize ();
+ insn_locators_alloc ();
split_all_insns_noflow ();
if (TARGET_MIPS16)
mips16_lay_out_constants ();
if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
return false;
- /* We don't yet generate small-data references for -mabicalls. See related
- -G handling in override_options. */
- if (TARGET_ABICALLS)
+ /* We don't yet generate small-data references for -mabicalls or
+ VxWorks RTP code. See the related -G handling in override_options. */
+ if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
return false;
if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
{
int inc;
- inc = (TARGET_NEWABI ? 2 : FP_INC);
+ inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
return gen_rtx_PARALLEL
(mode,
gen_rtvec (2,
registers, the first register always holds the low word.
We therefore can't allow FPRs to change between single-word
and multi-word modes. */
- if (FP_INC > 1 && reg_classes_intersect_p (FP_REGS, class))
+ if (MAX_FPRS_PER_FMT > 1 && reg_classes_intersect_p (FP_REGS, class))
return true;
}
else
return true;
}
}
+
+ /* gcc assumes that each word of a multiword register can be accessed
+ individually using SUBREGs. This is not true for floating-point
+ registers if they are bigger than a word. */
+ if (UNITS_PER_FPREG > UNITS_PER_WORD
+ && GET_MODE_SIZE (from) > UNITS_PER_WORD
+ && GET_MODE_SIZE (to) < UNITS_PER_FPREG
+ && reg_classes_intersect_p (FP_REGS, class))
+ return true;
+
/* Loading a 32-bit value into a 64-bit floating-point register
will not sign-extend the value, despite what LOAD_EXTEND_OP says.
We can't allow 64-bit float registers to change from SImode to
to a wider mode. */
- if (TARGET_FLOAT64
+ if (TARGET_64BIT
+ && TARGET_FLOAT64
&& from == SImode
&& GET_MODE_SIZE (to) >= UNITS_PER_WORD
&& reg_classes_intersect_p (FP_REGS, class))
return true;
+
return false;
}
bool
mips_dangerous_for_la25_p (rtx x)
{
- HOST_WIDE_INT offset;
-
- if (TARGET_EXPLICIT_RELOCS)
- return false;
-
- mips_split_const (x, &x, &offset);
- return global_got_operand (x, VOIDmode);
+ return (!TARGET_EXPLICIT_RELOCS
+ && TARGET_USE_GOT
+ && GET_CODE (x) == SYMBOL_REF
+ && mips_global_symbol_p (x));
}
/* Implement PREFERRED_RELOAD_CLASS. */
/* Implement CLASS_MAX_NREGS.
- Usually all registers are word-sized. The only supported exception
- is -mgp64 -msingle-float, which has 64-bit words but 32-bit float
- registers. A word-based calculation is correct even in that case,
- since -msingle-float disallows multi-FPR values.
+ - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
+
+ - ST_REGS are always hold CCmode values, and CCmode values are
+ considered to be 4 bytes wide.
- The FP status registers are an exception to this rule. They are always
- 4 bytes wide as they only hold condition code modes, and CCmode is always
- considered to be 4 bytes wide. */
+ All other register classes are covered by UNITS_PER_WORD. Note that
+ this is true even for unions of integer and float registers when the
+ latter are smaller than the former. The only supported combination
+ in which case this occurs is -mgp64 -msingle-float, which has 64-bit
+ words but 32-bit float registers. A word-based calculation is correct
+ in that case since -msingle-float disallows multi-FPR values. */
int
mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
{
if (class == ST_REGS)
return (GET_MODE_SIZE (mode) + 3) / 4;
+ else if (class == FP_REGS)
+ return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
else
return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
}
{
if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
{
- rtx unspec;
rtx insn, scan;
cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
/* We want to initialize this to a value which gcc will believe
is constant. */
- start_sequence ();
- unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_GP);
- emit_move_insn (cfun->machine->mips16_gp_pseudo_rtx,
- gen_rtx_CONST (Pmode, unspec));
- insn = get_insns ();
- end_sequence ();
+ insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
push_topmost_sequence ();
/* We need to emit the initialization after the FUNCTION_BEG
int gparg, fparg;
unsigned int f;
- /* This code only works for the original 32 bit ABI and the O64 ABI. */
+ /* This code only works for the original 32-bit ABI and the O64 ABI. */
gcc_assert (TARGET_OLDABI);
if (from_fp_p)
}
/* Build a mips16 function stub. This is used for functions which
- take arguments in the floating point registers. It is 32 bit code
+ take arguments in the floating point registers. It is 32-bit code
that moves the floating point args into the general registers, and
- then jumps to the 16 bit code. */
+ then jumps to the 16-bit code. */
static void
build_mips16_function_stub (FILE *file)
/* Build a call stub for a mips16 call. A stub is needed if we are
passing any floating point values which should go into the floating
- point registers. If we are, and the call turns out to be to a 32
- bit function, the stub will be used to move the values into the
- floating point registers before calling the 32 bit function. The
- linker will magically adjust the function call to either the 16 bit
- function or the 32 bit stub, depending upon where the function call
+ point registers. If we are, and the call turns out to be to a
+ 32-bit function, the stub will be used to move the values into the
+ floating point registers before calling the 32-bit function. The
+ linker will magically adjust the function call to either the 16-bit
+ function or the 32-bit stub, depending upon where the function call
is actually defined.
Similarly, we need a stub if the return value might come back in a
{
/* Build a special purpose stub. When the linker sees a
function call in mips16 code, it will check where the target
- is defined. If the target is a 32 bit call, the linker will
+ is defined. If the target is a 32-bit call, the linker will
search for the section defined here. It can tell which
symbol this section is associated with by looking at the
relocation information (the name is unreliable, since this
}
/* We build the stub code by hand. That's the only way we can
- do it, since we can't generate 32 bit code during a 16 bit
+ do it, since we can't generate 32-bit code during a 16-bit
compilation. */
/* We don't want the assembler to insert any nops here. */
static bool
vr4130_swap_insns_p (rtx insn1, rtx insn2)
{
- rtx dep;
+ dep_link_t dep;
/* Check for the following case:
If INSN1 is the last instruction blocking X, it would better to
choose (INSN1, X) over (INSN2, INSN1). */
- for (dep = INSN_DEPEND (insn1); dep != 0; dep = XEXP (dep, 1))
- if (REG_NOTE_KIND (dep) == REG_DEP_ANTI
- && INSN_PRIORITY (XEXP (dep, 0)) > INSN_PRIORITY (insn2)
- && recog_memoized (XEXP (dep, 0)) >= 0
- && get_attr_vr4130_class (XEXP (dep, 0)) == VR4130_CLASS_ALU)
+ FOR_EACH_DEP_LINK (dep, INSN_FORW_DEPS (insn1))
+ if (DEP_LINK_KIND (dep) == REG_DEP_ANTI
+ && INSN_PRIORITY (DEP_LINK_CON (dep)) > INSN_PRIORITY (insn2)
+ && recog_memoized (DEP_LINK_CON (dep)) >= 0
+ && get_attr_vr4130_class (DEP_LINK_CON (dep)) == VR4130_CLASS_ALU)
return false;
if (vr4130_last_insn != 0
{
switch (mips_tune)
{
+ case PROCESSOR_74KC:
+ case PROCESSOR_74KF:
+ case PROCESSOR_74KX:
case PROCESSOR_R4130:
case PROCESSOR_R5400:
case PROCESSOR_R5500:
#define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
#define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
#define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
+#define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
/* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
- DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
- DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
- DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
- DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
- DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
- DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
- DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
- DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
- DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
- DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
- DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
- DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
- DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
+ DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
+ DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
+ DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
+ DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
+ DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
+ BPOSGE_BUILTIN (32, MASK_DSP),
+
+ /* The following are for the MIPS DSP ASE REV 2. */
+ DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
+ DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
+ DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
+ DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
+ DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
+ DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
+ DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
+ DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
+ DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
+ DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
+ DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
+ DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
+ DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
+ DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
+ DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
+ DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
+ DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
+ DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
+ DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
+ DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
+ DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
+ DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
+};
+
+static const struct builtin_description dsp_32only_bdesc[] =
+{
+ DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
+ DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
+ DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
+ DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
+ DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
+ DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
+ DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
+ DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
+ DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
+ DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
+ DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
+ DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
+ DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
- DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
- DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
- DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
- DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
- DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
- BPOSGE_BUILTIN (32, MASK_DSP)
+
+ /* The following are for the MIPS DSP ASE REV 2. */
+ DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
+ DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
+ DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
+ DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
+ DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
+ DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
+ DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
+ DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
};
/* This helps provide a mapping from builtin function codes to bdesc
/* The target processor that supports these builtin functions.
PROCESSOR_MAX means we enable them for all processors. */
enum processor_type proc;
+
+ /* If the target has these flags, this builtin function table
+ will not be supported. */
+ int unsupported_target_flags;
};
static const struct bdesc_map bdesc_arrays[] =
{
- { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX },
- { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1 },
- { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX }
+ { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX, 0 },
+ { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1, 0 },
+ { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX, 0 },
+ { dsp_32only_bdesc, ARRAY_SIZE (dsp_32only_bdesc), PROCESSOR_MAX,
+ MASK_64BIT }
};
-/* Take the head of argument list *ARGLIST and convert it into a form
- suitable for input operand OP of instruction ICODE. Return the value
- and point *ARGLIST at the next element of the list. */
+/* Take the argument ARGNUM of the arglist of EXP and convert it into a form
+ suitable for input operand OP of instruction ICODE. Return the value. */
static rtx
mips_prepare_builtin_arg (enum insn_code icode,
- unsigned int op, tree *arglist)
+ unsigned int op, tree exp, unsigned int argnum)
{
rtx value;
enum machine_mode mode;
- value = expand_normal (TREE_VALUE (*arglist));
+ value = expand_normal (CALL_EXPR_ARG (exp, argnum));
mode = insn_data[icode].operand[op].mode;
if (!insn_data[icode].operand[op].predicate (value, mode))
{
}
}
- *arglist = TREE_CHAIN (*arglist);
return value;
}
{
enum insn_code icode;
enum mips_builtin_type type;
- tree fndecl, arglist;
+ tree fndecl;
unsigned int fcode;
const struct builtin_description *bdesc;
const struct bdesc_map *m;
- fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
- arglist = TREE_OPERAND (exp, 1);
+ fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
fcode = DECL_FUNCTION_CODE (fndecl);
bdesc = NULL;
switch (type)
{
case MIPS_BUILTIN_DIRECT:
- return mips_expand_builtin_direct (icode, target, arglist, true);
+ return mips_expand_builtin_direct (icode, target, exp, true);
case MIPS_BUILTIN_DIRECT_NO_TARGET:
- return mips_expand_builtin_direct (icode, target, arglist, false);
+ return mips_expand_builtin_direct (icode, target, exp, false);
case MIPS_BUILTIN_MOVT:
case MIPS_BUILTIN_MOVF:
return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
- target, arglist);
+ target, exp);
case MIPS_BUILTIN_CMP_ANY:
case MIPS_BUILTIN_CMP_ALL:
case MIPS_BUILTIN_CMP_LOWER:
case MIPS_BUILTIN_CMP_SINGLE:
return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
- target, arglist);
+ target, exp);
case MIPS_BUILTIN_BPOSGE32:
return mips_expand_builtin_bposge (type, target);
types[MIPS_SI_FTYPE_VOID]
= build_function_type (intSI_type_node, void_list_node);
+
+ if (TARGET_DSPR2)
+ {
+ types[MIPS_V4QI_FTYPE_V4QI]
+ = build_function_type_list (V4QI_type_node,
+ V4QI_type_node,
+ NULL_TREE);
+
+ types[MIPS_SI_FTYPE_SI_SI_SI]
+ = build_function_type_list (intSI_type_node,
+ intSI_type_node, intSI_type_node,
+ intSI_type_node, NULL_TREE);
+
+ types[MIPS_DI_FTYPE_DI_USI_USI]
+ = build_function_type_list (intDI_type_node,
+ intDI_type_node,
+ unsigned_intSI_type_node,
+ unsigned_intSI_type_node, NULL_TREE);
+
+ types[MIPS_DI_FTYPE_SI_SI]
+ = build_function_type_list (intDI_type_node,
+ intSI_type_node, intSI_type_node,
+ NULL_TREE);
+
+ types[MIPS_DI_FTYPE_USI_USI]
+ = build_function_type_list (intDI_type_node,
+ unsigned_intSI_type_node,
+ unsigned_intSI_type_node, NULL_TREE);
+
+ types[MIPS_V2HI_FTYPE_SI_SI_SI]
+ = build_function_type_list (V2HI_type_node,
+ intSI_type_node, intSI_type_node,
+ intSI_type_node, NULL_TREE);
+
+ }
}
/* Iterate through all of the bdesc arrays, initializing all of the
offset = 0;
for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
{
- if (m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
+ if ((m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
+ && (m->unsupported_target_flags & target_flags) == 0)
for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
if ((d->target_flags & target_flags) == d->target_flags)
- lang_hooks.builtin_function (d->name, types[d->function_type],
- d - m->bdesc + offset,
- BUILT_IN_MD, NULL, NULL);
+ add_builtin_function (d->name, types[d->function_type],
+ d - m->bdesc + offset,
+ BUILT_IN_MD, NULL, NULL);
offset += m->size;
}
}
/* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
- .md pattern and ARGLIST is the list of function arguments. TARGET,
+ .md pattern and CALL is the function expr with arguments. TARGET,
if nonnull, suggests a good place to put the result.
HAS_TARGET indicates the function must return something. */
static rtx
-mips_expand_builtin_direct (enum insn_code icode, rtx target, tree arglist,
+mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
bool has_target)
{
rtx ops[MAX_RECOG_OPERANDS];
int i = 0;
+ int j = 0;
if (has_target)
{
i = 1;
}
- /* We need to test if arglist is not zero. Some instructions have extra
+ /* We need to test if the arglist is not zero. Some instructions have extra
clobber registers. */
- for (; i < insn_data[icode].n_operands && arglist != 0; i++)
- ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
+ for (; i < insn_data[icode].n_operands && i <= call_expr_nargs (exp); i++, j++)
+ ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
switch (i)
{
}
/* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
- function (TYPE says which). ARGLIST is the list of arguments to the
+ function (TYPE says which). EXP is the tree for the function
function, ICODE is the instruction that should be used to compare
the first two arguments, and COND is the condition it should test.
TARGET, if nonnull, suggests a good place to put the result. */
static rtx
mips_expand_builtin_movtf (enum mips_builtin_type type,
enum insn_code icode, enum mips_fp_condition cond,
- rtx target, tree arglist)
+ rtx target, tree exp)
{
rtx cmp_result, op0, op1;
cmp_result = mips_prepare_builtin_target (icode, 0, 0);
- op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
- op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
+ op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
+ op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
icode = CODE_FOR_mips_cond_move_tf_ps;
target = mips_prepare_builtin_target (icode, 0, target);
if (type == MIPS_BUILTIN_MOVT)
{
- op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
- op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
+ op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
+ op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
}
else
{
- op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
- op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
+ op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
+ op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
}
emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
return target;
/* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
of the comparison instruction and COND is the condition it should test.
- ARGLIST is the list of function arguments and TARGET, if nonnull,
+ EXP is the function call and arguments and TARGET, if nonnull,
suggests a good place to put the boolean result. */
static rtx
mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
enum insn_code icode, enum mips_fp_condition cond,
- rtx target, tree arglist)
+ rtx target, tree exp)
{
rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
int i;
+ int j = 0;
if (target == 0 || GET_MODE (target) != SImode)
target = gen_reg_rtx (SImode);
/* Prepare the operands to the comparison. */
cmp_result = mips_prepare_builtin_target (icode, 0, 0);
- for (i = 1; i < insn_data[icode].n_operands - 1; i++)
- ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
+ for (i = 1; i < insn_data[icode].n_operands - 1; i++, j++)
+ ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
switch (insn_data[icode].n_operands)
{
}
}
-/* Implement TARGET_EXTRA_LIVE_ON_ENTRY. PIC_FUNCTION_ADDR_REGNUM is live
- on entry to a function when generating -mshared abicalls code. */
+/* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
+ value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
static void
mips_extra_live_on_entry (bitmap regs)
{
- if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
+ if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
}