/* Definitions of target machine for GCC for IA-32.
Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
- 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
This file is part of GCC.
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING3. If not see
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* The purpose of this file is to define the characteristics of the i386,
#define TARGET_AVX OPTION_ISA_AVX
#define TARGET_FMA OPTION_ISA_FMA
#define TARGET_SSE4A OPTION_ISA_SSE4A
-#define TARGET_SSE5 OPTION_ISA_SSE5
+#define TARGET_FMA4 OPTION_ISA_FMA4
+#define TARGET_XOP OPTION_ISA_XOP
+#define TARGET_LWP OPTION_ISA_LWP
#define TARGET_ROUND OPTION_ISA_ROUND
#define TARGET_ABM OPTION_ISA_ABM
#define TARGET_POPCNT OPTION_ISA_POPCNT
#define TARGET_SAHF OPTION_ISA_SAHF
+#define TARGET_MOVBE OPTION_ISA_MOVBE
+#define TARGET_CRC32 OPTION_ISA_CRC32
#define TARGET_AES OPTION_ISA_AES
#define TARGET_PCLMUL OPTION_ISA_PCLMUL
#define TARGET_CMPXCHG16B OPTION_ISA_CX16
+#define TARGET_FSGSBASE OPTION_ISA_FSGSBASE
+#define TARGET_RDRND OPTION_ISA_RDRND
+#define TARGET_F16C OPTION_ISA_F16C
-/* SSE5 and SSE4.1 define the same round instructions */
-#define OPTION_MASK_ISA_ROUND (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE5)
+/* SSE4.1 defines round instructions */
+#define OPTION_MASK_ISA_ROUND OPTION_MASK_ISA_SSE4_1
#define OPTION_ISA_ROUND ((ix86_isa_flags & OPTION_MASK_ISA_ROUND) != 0)
#include "config/vxworks-dummy.h"
#define TARGET_GENERIC64 (ix86_tune == PROCESSOR_GENERIC64)
#define TARGET_GENERIC (TARGET_GENERIC32 || TARGET_GENERIC64)
#define TARGET_AMDFAM10 (ix86_tune == PROCESSOR_AMDFAM10)
+#define TARGET_BDVER1 (ix86_tune == PROCESSOR_BDVER1)
+#define TARGET_ATOM (ix86_tune == PROCESSOR_ATOM)
/* Feature tests against the various tunings. */
enum ix86_tune_indices {
X86_TUNE_USE_LEAVE,
X86_TUNE_PUSH_MEMORY,
X86_TUNE_ZERO_EXTEND_WITH_AND,
- X86_TUNE_USE_BIT_TEST,
X86_TUNE_UNROLL_STRLEN,
X86_TUNE_DEEP_BRANCH_PREDICTION,
X86_TUNE_BRANCH_PREDICTION_HINTS,
X86_TUNE_HIMODE_MATH,
X86_TUNE_PROMOTE_QI_REGS,
X86_TUNE_PROMOTE_HI_REGS,
- X86_TUNE_ADD_ESP_4,
- X86_TUNE_ADD_ESP_8,
- X86_TUNE_SUB_ESP_4,
- X86_TUNE_SUB_ESP_8,
+ X86_TUNE_SINGLE_POP,
+ X86_TUNE_DOUBLE_POP,
+ X86_TUNE_SINGLE_PUSH,
+ X86_TUNE_DOUBLE_PUSH,
X86_TUNE_INTEGER_DFMODE_MOVES,
X86_TUNE_PARTIAL_REG_DEPENDENCY,
X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY,
- X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL,
+ X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL,
+ X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL,
+ X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL,
X86_TUNE_SSE_SPLIT_REGS,
X86_TUNE_SSE_TYPELESS_STORES,
X86_TUNE_SSE_LOAD0_BY_PXOR,
X86_TUNE_MOVE_M1_VIA_OR,
X86_TUNE_NOT_UNPAIRABLE,
X86_TUNE_NOT_VECTORMODE,
+ X86_TUNE_USE_VECTOR_FP_CONVERTS,
X86_TUNE_USE_VECTOR_CONVERTS,
X86_TUNE_FUSE_CMP_AND_BRANCH,
+ X86_TUNE_OPT_AGU,
+ X86_TUNE_VECTORIZE_DOUBLE,
X86_TUNE_LAST
};
#define TARGET_PUSH_MEMORY ix86_tune_features[X86_TUNE_PUSH_MEMORY]
#define TARGET_ZERO_EXTEND_WITH_AND \
ix86_tune_features[X86_TUNE_ZERO_EXTEND_WITH_AND]
-#define TARGET_USE_BIT_TEST ix86_tune_features[X86_TUNE_USE_BIT_TEST]
#define TARGET_UNROLL_STRLEN ix86_tune_features[X86_TUNE_UNROLL_STRLEN]
#define TARGET_DEEP_BRANCH_PREDICTION \
ix86_tune_features[X86_TUNE_DEEP_BRANCH_PREDICTION]
#define TARGET_HIMODE_MATH ix86_tune_features[X86_TUNE_HIMODE_MATH]
#define TARGET_PROMOTE_QI_REGS ix86_tune_features[X86_TUNE_PROMOTE_QI_REGS]
#define TARGET_PROMOTE_HI_REGS ix86_tune_features[X86_TUNE_PROMOTE_HI_REGS]
-#define TARGET_ADD_ESP_4 ix86_tune_features[X86_TUNE_ADD_ESP_4]
-#define TARGET_ADD_ESP_8 ix86_tune_features[X86_TUNE_ADD_ESP_8]
-#define TARGET_SUB_ESP_4 ix86_tune_features[X86_TUNE_SUB_ESP_4]
-#define TARGET_SUB_ESP_8 ix86_tune_features[X86_TUNE_SUB_ESP_8]
+#define TARGET_SINGLE_POP ix86_tune_features[X86_TUNE_SINGLE_POP]
+#define TARGET_DOUBLE_POP ix86_tune_features[X86_TUNE_DOUBLE_POP]
+#define TARGET_SINGLE_PUSH ix86_tune_features[X86_TUNE_SINGLE_PUSH]
+#define TARGET_DOUBLE_PUSH ix86_tune_features[X86_TUNE_DOUBLE_PUSH]
#define TARGET_INTEGER_DFMODE_MOVES \
ix86_tune_features[X86_TUNE_INTEGER_DFMODE_MOVES]
#define TARGET_PARTIAL_REG_DEPENDENCY \
ix86_tune_features[X86_TUNE_PARTIAL_REG_DEPENDENCY]
#define TARGET_SSE_PARTIAL_REG_DEPENDENCY \
ix86_tune_features[X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY]
-#define TARGET_SSE_UNALIGNED_MOVE_OPTIMAL \
- ix86_tune_features[X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL]
+#define TARGET_SSE_UNALIGNED_LOAD_OPTIMAL \
+ ix86_tune_features[X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL]
+#define TARGET_SSE_UNALIGNED_STORE_OPTIMAL \
+ ix86_tune_features[X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL]
+#define TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL \
+ ix86_tune_features[X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL]
#define TARGET_SSE_SPLIT_REGS ix86_tune_features[X86_TUNE_SSE_SPLIT_REGS]
#define TARGET_SSE_TYPELESS_STORES \
ix86_tune_features[X86_TUNE_SSE_TYPELESS_STORES]
#define TARGET_MOVE_M1_VIA_OR ix86_tune_features[X86_TUNE_MOVE_M1_VIA_OR]
#define TARGET_NOT_UNPAIRABLE ix86_tune_features[X86_TUNE_NOT_UNPAIRABLE]
#define TARGET_NOT_VECTORMODE ix86_tune_features[X86_TUNE_NOT_VECTORMODE]
+#define TARGET_USE_VECTOR_FP_CONVERTS \
+ ix86_tune_features[X86_TUNE_USE_VECTOR_FP_CONVERTS]
#define TARGET_USE_VECTOR_CONVERTS \
ix86_tune_features[X86_TUNE_USE_VECTOR_CONVERTS]
#define TARGET_FUSE_CMP_AND_BRANCH \
ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH]
+#define TARGET_OPT_AGU ix86_tune_features[X86_TUNE_OPT_AGU]
+#define TARGET_VECTORIZE_DOUBLE \
+ ix86_tune_features[X86_TUNE_VECTORIZE_DOUBLE]
/* Feature tests against the various architecture variations. */
enum ix86_arch_indices {
#define TARGET_GNU_TLS (ix86_tls_dialect == TLS_DIALECT_GNU)
#define TARGET_GNU2_TLS (ix86_tls_dialect == TLS_DIALECT_GNU2)
#define TARGET_ANY_GNU_TLS (TARGET_GNU_TLS || TARGET_GNU2_TLS)
-#define TARGET_SUN_TLS (ix86_tls_dialect == TLS_DIALECT_SUN)
+#define TARGET_SUN_TLS 0
extern int ix86_isa_flags;
redefines this to 1. */
#define TARGET_MACHO 0
-/* Likewise, for the Windows 64-bit ABI. */
+/* Branch island 'stubs' are emitted for earlier versions of darwin.
+ This provides a default (over-ridden in darwin.h.) */
+#ifndef TARGET_MACHO_BRANCH_ISLANDS
+#define TARGET_MACHO_BRANCH_ISLANDS 0
+#endif
+
+/* For the Windows 64-bit ABI. */
#define TARGET_64BIT_MS_ABI (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
/* Available call abi. */
MS_ABI = 1
};
-/* The default abi form used by target. */
+/* The abi used by target. */
+extern enum calling_abi ix86_abi;
+
+/* The default abi used by target. */
#define DEFAULT_ABI SYSV_ABI
/* Subtargets may reset this to 1 in order to enable 96-bit long double
#ifndef CC1_CPU_SPEC
#define CC1_CPU_SPEC_1 "\
-%{mcpu=*:-mtune=%* \
-%n`-mcpu=' is deprecated. Use `-mtune=' or '-march=' instead.\n} \
-%<mcpu=* \
-%{mintel-syntax:-masm=intel \
-%n`-mintel-syntax' is deprecated. Use `-masm=intel' instead.\n} \
-%{mno-intel-syntax:-masm=att \
-%n`-mno-intel-syntax' is deprecated. Use `-masm=att' instead.\n}"
+%{msse5:-mavx \
+%n'-msse5' was removed.\n}"
#ifndef HAVE_LOCAL_CPU_DETECT
#define CC1_CPU_SPEC CC1_CPU_SPEC_1
TARGET_CPU_DEFAULT_prescott,
TARGET_CPU_DEFAULT_nocona,
TARGET_CPU_DEFAULT_core2,
+ TARGET_CPU_DEFAULT_atom,
TARGET_CPU_DEFAULT_geode,
TARGET_CPU_DEFAULT_k6,
TARGET_CPU_DEFAULT_athlon_sse,
TARGET_CPU_DEFAULT_k8,
TARGET_CPU_DEFAULT_amdfam10,
+ TARGET_CPU_DEFAULT_bdver1,
TARGET_CPU_DEFAULT_max
};
#define TARGET_FLT_EVAL_METHOD \
(TARGET_MIX_SSE_I387 ? -1 : TARGET_SSE_MATH ? 0 : 2)
+/* Whether to allow x87 floating-point arithmetic on MODE (one of
+ SFmode, DFmode and XFmode) in the current excess precision
+ configuration. */
+#define X87_ENABLE_ARITH(MODE) \
+ (flag_excess_precision == EXCESS_PRECISION_FAST || (MODE) == XFmode)
+
+/* Likewise, whether to allow direct conversions from integer mode
+ IMODE (HImode, SImode or DImode) to MODE. */
+#define X87_ENABLE_FLOAT(MODE, IMODE) \
+ (flag_excess_precision == EXCESS_PRECISION_FAST \
+ || (MODE) == XFmode \
+ || ((MODE) == DFmode && (IMODE) == SImode) \
+ || (IMODE) == HImode)
+
/* target machine storage layout */
#define SHORT_TYPE_SIZE 16
#define INT_TYPE_SIZE 32
+#define LONG_LONG_TYPE_SIZE 64
#define FLOAT_TYPE_SIZE 32
-#define LONG_TYPE_SIZE BITS_PER_WORD
#define DOUBLE_TYPE_SIZE 64
-#define LONG_LONG_TYPE_SIZE 64
#define LONG_DOUBLE_TYPE_SIZE 80
#define WIDEST_HARDWARE_FP_SIZE LONG_DOUBLE_TYPE_SIZE
#define WORDS_BIG_ENDIAN 0
/* Width of a word, in units (bytes). */
-#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
+#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
#ifdef IN_LIBGCC2
#define MIN_UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
#else
#define PARM_BOUNDARY BITS_PER_WORD
/* Boundary (in *bits*) on which stack pointer should be aligned. */
-#define STACK_BOUNDARY (TARGET_64BIT && DEFAULT_ABI == MS_ABI ? 128 \
- : BITS_PER_WORD)
+#define STACK_BOUNDARY \
+ (TARGET_64BIT && ix86_abi == MS_ABI ? 128 : BITS_PER_WORD)
/* Stack boundary of the main function guaranteed by OS. */
#define MAIN_STACK_BOUNDARY (TARGET_64BIT ? 128 : 32)
generate an alternate prologue and epilogue that realigns the
runtime stack if nessary. This supports mixing codes that keep a
4-byte aligned stack, as specified by i386 psABI, with codes that
- need a 16-byte aligned stack, as required by SSE instructions. If
- STACK_REALIGN_DEFAULT is 1 and PREFERRED_STACK_BOUNDARY_DEFAULT is
- 128, stacks for all functions may be realigned. */
+ need a 16-byte aligned stack, as required by SSE instructions. */
#define STACK_REALIGN_DEFAULT 0
/* Boundary (in *bits*) on which the incoming stack is aligned. */
/* C++ stores the virtual bit in the lowest bit of function pointers. */
#define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_pfn
-/* Alignment of field after `int : 0' in a structure. */
-
-#define EMPTY_FIELD_BOUNDARY BITS_PER_WORD
-
/* Minimum size in bits of the largest boundary to which any
and all fundamental data types supported by the hardware
might need to be aligned. No data type wants to be aligned
Pentium+ prefers DFmode values to be aligned to 64 bit boundary
and Pentium Pro XFmode values at 128 bit boundaries. */
-#define BIGGEST_ALIGNMENT (TARGET_AVX ? 256: 128)
+#define BIGGEST_ALIGNMENT (TARGET_AVX ? 256 : 128)
/* Maximum stack alignment. */
#define MAX_STACK_ALIGNMENT MAX_OFILE_ALIGNMENT
+/* Alignment value for attribute ((aligned)). It is a constant since
+ it is the part of the ABI. We shouldn't change it with -mavx. */
+#define ATTRIBUTE_ALIGNED_VALUE 128
+
/* Decide whether a variable of mode MODE should be 128 bit aligned. */
#define ALIGN_MODE_128(MODE) \
((MODE) == XFmode || SSE_REG_MODE_P (MODE))
#define STACK_SLOT_ALIGNMENT(TYPE, MODE, ALIGN) \
ix86_local_alignment ((TYPE), (MODE), (ALIGN))
+/* If defined, a C expression to compute the alignment for a local
+ variable DECL.
+
+ If this macro is not defined, then
+ LOCAL_ALIGNMENT (TREE_TYPE (DECL), DECL_ALIGN (DECL)) will be used.
+
+ One use of this macro is to increase alignment of medium-size
+ data to make it all fit in fewer cache lines. */
+
+#define LOCAL_DECL_ALIGNMENT(DECL) \
+ ix86_local_alignment ((DECL), VOIDmode, DECL_ALIGN (DECL))
+
+/* If defined, a C expression to compute the minimum required alignment
+ for dynamic stack realignment purposes for EXP (a TYPE or DECL),
+ MODE, assuming normal alignment ALIGN.
+
+ If this macro is not defined, then (ALIGN) will be used. */
+
+#define MINIMUM_ALIGNMENT(EXP, MODE, ALIGN) \
+ ix86_minimum_alignment (EXP, MODE, ALIGN)
+
+
/* If defined, a C expression that gives the alignment boundary, in
bits, of an argument with the specified mode and type. If it is
not defined, `PARM_BOUNDARY' is used for all arguments. */
|| ((MODE) == DFmode && (!TARGET_SSE2 || !TARGET_SSE_MATH)) \
|| (MODE) == XFmode)
+/* Cover class containing the stack registers. */
+#define STACK_REG_COVER_CLASS FLOAT_REGS
+
/* Number of actual hardware registers.
The hardware registers are assigned numbers for the compiler
from 0 to just below FIRST_PSEUDO_REGISTER.
1, 1, 1, 1, 1, \
/*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \
0, 0, 0, 0, 0, 0, 0, 0, \
-/*mmx0,mmx1,mmx2,mmx3,mmx4,mmx5,mmx6,mmx7*/ \
+/* mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7*/ \
0, 0, 0, 0, 0, 0, 0, 0, \
/* r8, r9, r10, r11, r12, r13, r14, r15*/ \
2, 2, 2, 2, 2, 2, 2, 2, \
1, 1, 1, 1, 1, \
/*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \
1, 1, 1, 1, 1, 1, 1, 1, \
-/*mmx0,mmx1,mmx2,mmx3,mmx4,mmx5,mmx6,mmx7*/ \
+/* mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7*/ \
1, 1, 1, 1, 1, 1, 1, 1, \
/* r8, r9, r10, r11, r12, r13, r14, r15*/ \
1, 1, 1, 1, 2, 2, 2, 2, \
registers listed in CALL_USED_REGISTERS, keeping the others
available for storage of persistent values.
- The ORDER_REGS_FOR_LOCAL_ALLOC actually overwrite the order,
+ The ADJUST_REG_ALLOC_ORDER actually overwrite the order,
so this is just empty initializer for array. */
#define REG_ALLOC_ORDER \
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, \
48, 49, 50, 51, 52 }
-/* ORDER_REGS_FOR_LOCAL_ALLOC is a macro which permits reg_alloc_order
+/* ADJUST_REG_ALLOC_ORDER is a macro which permits reg_alloc_order
to be rearranged based on a particular function. When using sse math,
we want to allocate SSE before x87 registers and vice versa. */
-#define ORDER_REGS_FOR_LOCAL_ALLOC x86_order_regs_for_local_alloc ()
+#define ADJUST_REG_ALLOC_ORDER x86_order_regs_for_local_alloc ()
#define OVERRIDE_ABI_FORMAT(FNDECL) ix86_call_abi_override (FNDECL)
/* Macro to conditionally modify fixed_regs/call_used_regs. */
-#define CONDITIONAL_REGISTER_USAGE \
-do { \
- int i; \
- unsigned int j; \
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) \
- { \
- if (fixed_regs[i] > 1) \
- fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2)); \
- if (call_used_regs[i] > 1) \
- call_used_regs[i] = (call_used_regs[i] \
- == (TARGET_64BIT ? 3 : 2)); \
- } \
- j = PIC_OFFSET_TABLE_REGNUM; \
- if (j != INVALID_REGNUM) \
- { \
- fixed_regs[j] = 1; \
- call_used_regs[j] = 1; \
- } \
- if (! TARGET_MMX) \
- { \
- int i; \
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) \
- if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i)) \
- fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = ""; \
- } \
- if (! TARGET_SSE) \
- { \
- int i; \
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) \
- if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i)) \
- fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = ""; \
- } \
- if (! TARGET_80387 && ! TARGET_FLOAT_RETURNS_IN_80387) \
- { \
- int i; \
- HARD_REG_SET x; \
- COPY_HARD_REG_SET (x, reg_class_contents[(int)FLOAT_REGS]); \
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) \
- if (TEST_HARD_REG_BIT (x, i)) \
- fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = ""; \
- } \
- if (! TARGET_64BIT) \
- { \
- int i; \
- for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++) \
- reg_names[i] = ""; \
- for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++) \
- reg_names[i] = ""; \
- } \
- if (TARGET_64BIT && DEFAULT_ABI == MS_ABI) \
- { \
- call_used_regs[4 /*RSI*/] = 0; \
- call_used_regs[5 /*RDI*/] = 0; \
- } \
- } while (0)
+#define CONDITIONAL_REGISTER_USAGE ix86_conditional_register_usage ()
/* Return number of consecutive hard regs needed starting at reg REGNO
to hold something of mode MODE.
|| (MODE) == V2DImode || (MODE) == DFmode)
#define VALID_SSE_REG_MODE(MODE) \
- ((MODE) == TImode || (MODE) == V4SFmode || (MODE) == V4SImode \
+ ((MODE) == V1TImode || (MODE) == TImode \
+ || (MODE) == V4SFmode || (MODE) == V4SImode \
|| (MODE) == SFmode || (MODE) == TFmode)
#define VALID_MMX_REG_MODE_3DNOW(MODE) \
|| (MODE) == V2SImode || (MODE) == SImode \
|| (MODE) == V4HImode || (MODE) == V8QImode)
-/* ??? No autovectorization into MMX or 3DNOW until we can reliably
- place emms and femms instructions.
- FIXME: AVX has 32byte floating point vector operations and 16byte
- integer vector operations. But vectorizer doesn't support
- different sizes for integer and floating point vectors. We limit
- vector size to 16byte. */
-#define UNITS_PER_SIMD_WORD(MODE) \
- (TARGET_AVX ? (((MODE) == DFmode || (MODE) == SFmode) ? 16 : 16) \
- : (TARGET_SSE ? 16 : UNITS_PER_WORD))
+#define UNITS_PER_SIMD_WORD(MODE) ix86_units_per_simd_word (MODE)
#define VALID_DFP_MODE_P(MODE) \
((MODE) == SDmode || (MODE) == DDmode || (MODE) == TDmode)
/* Return true for modes passed in SSE registers. */
#define SSE_REG_MODE_P(MODE) \
- ((MODE) == TImode || (MODE) == V16QImode || (MODE) == TFmode \
- || (MODE) == V8HImode || (MODE) == V2DFmode || (MODE) == V2DImode \
- || (MODE) == V4SFmode || (MODE) == V4SImode || (MODE) == V32QImode \
- || (MODE) == V16HImode || (MODE) == V8SImode || (MODE) == V4DImode \
- || (MODE) == V8SFmode || (MODE) == V4DFmode)
+ ((MODE) == V1TImode || (MODE) == TImode || (MODE) == V16QImode \
+ || (MODE) == TFmode || (MODE) == V8HImode || (MODE) == V2DFmode \
+ || (MODE) == V2DImode || (MODE) == V4SFmode || (MODE) == V4SImode \
+ || (MODE) == V32QImode || (MODE) == V16HImode || (MODE) == V8SImode \
+ || (MODE) == V4DImode || (MODE) == V8SFmode || (MODE) == V4DFmode)
/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. */
: (MODE) == VOIDmode && (NREGS) != 1 ? VOIDmode \
: (MODE) == VOIDmode ? choose_hard_reg_mode ((REGNO), (NREGS), false) \
: (MODE) == HImode && !TARGET_PARTIAL_REG_STALL ? SImode \
- : (MODE) == QImode && (REGNO) >= 4 && !TARGET_64BIT ? SImode \
+ : (MODE) == QImode && (REGNO) > BX_REG && !TARGET_64BIT ? SImode \
: (MODE))
+/* The only ABI that saves SSE registers across calls is Win64 (thus no
+ need to check the current ABI here), and with AVX enabled Win64 only
+ guarantees that the low 16 bytes are saved. */
+#define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) \
+ (SSE_REGNO_P (REGNO) && GET_MODE_SIZE (MODE) > 16)
+
/* Specify the registers used for certain standard purposes.
The values of these macros are register numbers. */
#define FIRST_REX_SSE_REG (LAST_REX_INT_REG + 1)
#define LAST_REX_SSE_REG (FIRST_REX_SSE_REG + 7)
-/* Value should be nonzero if functions must have frame pointers.
- Zero means the frame pointer need not be set up (and parms
- may be accessed via the stack pointer) in functions that seem suitable.
- This is computed in `reload', in reload1.c. */
-#define FRAME_POINTER_REQUIRED ix86_frame_pointer_required ()
-
/* Override this in other tm.h files to cope with various OS lossage
requiring a frame pointer. */
#ifndef SUBTARGET_FRAME_POINTER_REQUIRED
/* Base register for access to arguments of the function. */
#define ARG_POINTER_REGNUM 16
-/* Register in which static-chain is passed to a function.
- We do use ECX as static chain register for 32 bit ABI. On the
- 64bit ABI, ECX is an argument register, so we use R10 instead. */
-#define STATIC_CHAIN_REGNUM (TARGET_64BIT ? R10_REG : CX_REG)
-
/* Register to hold the addressing base for position independent
code access to data items. We don't use PIC pointer for 64bit
mode. Define the regnum to dummy value to prevent gcc from
NO_REGS,
AREG, DREG, CREG, BREG, SIREG, DIREG,
AD_REGS, /* %eax/%edx for DImode */
+ CLOBBERED_REGS, /* call-clobbered integers */
Q_REGS, /* %eax %ebx %ecx %edx */
NON_Q_REGS, /* %esi %edi %ebp %esp */
INDEX_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp */
"AREG", "DREG", "CREG", "BREG", \
"SIREG", "DIREG", \
"AD_REGS", \
+ "CLOBBERED_REGS", \
"Q_REGS", "NON_Q_REGS", \
"INDEX_REGS", \
"LEGACY_REGS", \
"FLOAT_INT_SSE_REGS", \
"ALL_REGS" }
-/* Define which registers fit in which classes.
- This is an initializer for a vector of HARD_REG_SET
- of length N_REG_CLASSES. */
+/* Define which registers fit in which classes. This is an initializer
+ for a vector of HARD_REG_SET of length N_REG_CLASSES.
+
+ Note that the default setting of CLOBBERED_REGS is for 32-bit; this
+ is adjusted by CONDITIONAL_REGISTER_USAGE for the 64-bit ABI in effect. */
#define REG_CLASS_CONTENTS \
{ { 0x00, 0x0 }, \
{ 0x04, 0x0 }, { 0x08, 0x0 }, /* CREG, BREG */ \
{ 0x10, 0x0 }, { 0x20, 0x0 }, /* SIREG, DIREG */ \
{ 0x03, 0x0 }, /* AD_REGS */ \
+ { 0x07, 0x0 }, /* CLOBBERED_REGS */ \
{ 0x0f, 0x0 }, /* Q_REGS */ \
{ 0x1100f0, 0x1fe0 }, /* NON_Q_REGS */ \
{ 0x7f, 0x1fe0 }, /* INDEX_REGS */ \
{ 0xffffffff,0x1fffff } \
}
-/* The following macro defines cover classes for Integrated Register
- Allocator. Cover classes is a set of non-intersected register
- classes covering all hard registers used for register allocation
- purpose. Any move between two registers of a cover class should be
- cheaper than load or store of the registers. The macro value is
- array of register classes with LIM_REG_CLASSES used as the end
- marker. */
-
-#define IRA_COVER_CLASSES \
-{ \
- GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES \
-}
-
/* The same information, inverted:
Return the class number of the smallest class containing
reg number REGNO. This could be a conditional expression
#define REGNO_REG_CLASS(REGNO) (regclass_map[REGNO])
-/* When defined, the compiler allows registers explicitly used in the
- rtl to be used as spill registers but prevents the compiler from
- extending the lifetime of these registers. */
+/* When this hook returns true for MODE, the compiler allows
+ registers explicitly used in the rtl to be used as spill registers
+ but prevents the compiler from extending the lifetime of these
+ registers. */
+#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P hook_bool_mode_true
-#define SMALL_REGISTER_CLASSES 1
-
-#define QI_REG_P(X) (REG_P (X) && REGNO (X) < 4)
+#define QI_REG_P(X) (REG_P (X) && REGNO (X) <= BX_REG)
#define GENERAL_REGNO_P(N) \
((N) <= STACK_POINTER_REGNUM || REX_INT_REGNO_P (N))
(TARGET_AVX && ((MODE) == V4SFmode || (MODE) == V2DFmode \
|| (MODE) == V8SFmode || (MODE) == V4DFmode))
+#define FMA4_VEC_FLOAT_MODE_P(MODE) \
+ (TARGET_FMA4 && ((MODE) == V4SFmode || (MODE) == V2DFmode \
+ || (MODE) == V8SFmode || (MODE) == V4DFmode))
+
#define MMX_REG_P(XOP) (REG_P (XOP) && MMX_REGNO_P (REGNO (XOP)))
#define MMX_REGNO_P(N) IN_RANGE ((N), FIRST_MMX_REG, LAST_MMX_REG)
: (((((MODE) == XFmode ? 12 : GET_MODE_SIZE (MODE))) \
+ UNITS_PER_WORD - 1) / UNITS_PER_WORD))
-/* A C expression whose value is nonzero if pseudos that have been
- assigned to registers of class CLASS would likely be spilled
- because registers of CLASS are needed for spill registers.
-
- The default value of this macro returns 1 if CLASS has exactly one
- register and zero otherwise. On most machines, this default
- should be used. Only define this macro to some other expression
- if pseudo allocated by `local-alloc.c' end up in memory because
- their hard registers were needed for spill registers. If this
- macro returns nonzero for those classes, those pseudos will only
- be allocated by `global.c', which knows how to reallocate the
- pseudo to another register. If there would not be another
- register available for reallocation, you should not change the
- definition of this macro since the only effect of such a
- definition would be to slow down register allocation. */
-
-#define CLASS_LIKELY_SPILLED_P(CLASS) \
- (((CLASS) == AREG) \
- || ((CLASS) == DREG) \
- || ((CLASS) == CREG) \
- || ((CLASS) == BREG) \
- || ((CLASS) == AD_REGS) \
- || ((CLASS) == SIREG) \
- || ((CLASS) == DIREG) \
- || ((CLASS) == FP_TOP_REG) \
- || ((CLASS) == FP_SECOND_REG))
-
/* Return a class of registers that cannot change FROM mode to TO mode. */
#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
be computed and placed into the variable
`crtl->outgoing_args_size'. No space will be pushed onto the
stack for each call; instead, the function prologue should increase the stack
- frame size by this amount. */
+ frame size by this amount.
+
+ MS ABI seem to require 16 byte alignment everywhere except for function
+ prologue and apilogue. This is not possible without
+ ACCUMULATE_OUTGOING_ARGS. */
-#define ACCUMULATE_OUTGOING_ARGS TARGET_ACCUMULATE_OUTGOING_ARGS
+#define ACCUMULATE_OUTGOING_ARGS \
+ (TARGET_ACCUMULATE_OUTGOING_ARGS || ix86_cfun_abi () == MS_ABI)
/* If defined, a C expression whose value is nonzero when we want to use PUSH
instructions to pass outgoing arguments. */
which. */
#define REG_PARM_STACK_SPACE(FNDECL) ix86_reg_parm_stack_space (FNDECL)
-#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) (ix86_function_type_abi (FNTYPE) == MS_ABI ? 1 : 0)
-
-/* Value is the number of bytes of arguments automatically
- popped when returning from a subroutine call.
- FUNDECL is the declaration node of the function (as a tree),
- FUNTYPE is the data type of the function (as a tree),
- or for a library call it is an identifier node for the subroutine name.
- SIZE is the number of bytes of arguments passed on the stack.
-
- On the 80386, the RTD insn may be used to pop them if the number
- of args is fixed, but if the number is variable then the caller
- must pop them all. RTD can't be used for library calls now
- because the library is compiled with the Unix compiler.
- Use of RTD is a selectable option, since it is incompatible with
- standard Unix calling sequences. If the option is not selected,
- the caller must always pop the args.
-
- The attribute stdcall is equivalent to RTD on a per module basis. */
-
-#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, SIZE) \
- ix86_return_pops_args ((FUNDECL), (FUNTYPE), (SIZE))
-
-#define FUNCTION_VALUE_REGNO_P(N) \
- ix86_function_value_regno_p (N)
+#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) \
+ (ix86_function_type_abi (FNTYPE) == MS_ABI)
/* Define how to find the value returned by a library function
assuming the value has mode MODE. */
-#define LIBCALL_VALUE(MODE) \
- ix86_libcall_value (MODE)
+#define LIBCALL_VALUE(MODE) ix86_libcall_value (MODE)
/* Define the size of the result block used for communication between
untyped_call and untyped_return. The block contains a DImode value
int words; /* # words passed so far */
int nregs; /* # registers available for passing */
int regno; /* next available register number */
- int fastcall; /* fastcall calling convention is used */
+ int fastcall; /* fastcall or thiscall calling convention
+ is used */
int sse_words; /* # sse words passed so far */
int sse_nregs; /* # sse registers available for passing */
int warn_avx; /* True when we want to warn about AVX ABI. */
int mmx_nregs; /* # mmx registers available for passing */
int mmx_regno; /* next available mmx register number */
int maybe_vaarg; /* true for calls to possibly vardic fncts. */
- int float_in_sse; /* 1 if in 32-bit mode SFmode (2 for DFmode) should
- be passed in SSE registers. Otherwise 0. */
- int call_abi; /* Set to SYSV_ABI for sysv abi. Otherwise
+ int float_in_sse; /* Set to 1 or 2 for 32bit targets if
+ SFmode/DFmode arguments should be passed
+ in SSE registers. Otherwise 0. */
+ enum calling_abi call_abi; /* Set to SYSV_ABI for sysv abi. Otherwise
MS_ABI for ms abi. */
} CUMULATIVE_ARGS;
#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
init_cumulative_args (&(CUM), (FNTYPE), (LIBNAME), (FNDECL))
-/* Update the data in CUM to advance over an argument
- of mode MODE and data type TYPE.
- (TYPE is null for libcalls where that information may not be available.) */
-
-#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
- function_arg_advance (&(CUM), (MODE), (TYPE), (NAMED))
-
-/* Define where to put the arguments to a function.
- Value is zero to push the argument on the stack,
- or a hard register in which to store the argument.
-
- MODE is the argument's machine mode.
- TYPE is the data type of the argument (as a tree).
- This is null for libcalls where that information may
- not be available.
- CUM is a variable of type CUMULATIVE_ARGS which gives info about
- the preceding args and about the function being called.
- NAMED is nonzero if this argument is a named parameter
- (otherwise it is an extra parameter matching an ellipsis). */
-
-#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
- function_arg (&(CUM), (MODE), (TYPE), (NAMED))
-
-#define TARGET_ASM_FILE_END ix86_file_end
-#define NEED_INDICATE_EXEC_STACK 0
-
/* Output assembler code to FILE to increment profiler label # LABELNO
for profiling a function entry. */
#define MCOUNT_NAME "_mcount"
+#define MCOUNT_NAME_BEFORE_PROLOGUE "__fentry__"
+
#define PROFILE_COUNT_REGISTER "edx"
/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
/* Length in units of the trampoline for entering a nested function. */
-#define TRAMPOLINE_SIZE (TARGET_64BIT ? 23 : 10)
-
-/* Emit RTL insns to initialize the variable parts of a trampoline.
- FNADDR is an RTX for the address of the function's pure code.
- CXT is an RTX for the static chain value for the function. */
-
-#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
- x86_initialize_trampoline ((TRAMP), (FNADDR), (CXT))
+#define TRAMPOLINE_SIZE (TARGET_64BIT ? 24 : 10)
\f
/* Definitions for register eliminations.
{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
{ FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} \
-/* Given FROM and TO register numbers, say whether this elimination is
- allowed. */
-
-#define CAN_ELIMINATE(FROM, TO) ix86_can_eliminate ((FROM), (TO))
-
/* Define the offset between two registers, one to be eliminated, and the other
its replacement, at the start of a routine. */
#define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_STRICT_P (X)
#endif
-/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+/* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
that is a valid memory address for an instruction.
The MODE argument is the machine mode for the MEM expression
that wants to use this address.
- The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS,
+ The other macros defined here are used only in TARGET_LEGITIMATE_ADDRESS_P,
except for CONSTANT_ADDRESS_P which is usually machine-independent.
See legitimize_pic_address in i386.c for details as to what
#define LEGITIMATE_CONSTANT_P(X) legitimate_constant_p (X)
-#ifdef REG_OK_STRICT
-#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
-do { \
- if (legitimate_address_p ((MODE), (X), 1)) \
- goto ADDR; \
-} while (0)
-
-#else
-#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
-do { \
- if (legitimate_address_p ((MODE), (X), 0)) \
- goto ADDR; \
-} while (0)
-
-#endif
-
/* If defined, a C expression to determine the base term of address X.
This macro is used in only one place: `find_base_term' in alias.c.
#define FIND_BASE_TERM(X) ix86_find_base_term (X)
-/* Try machine-dependent ways of modifying an illegitimate address
- to be legitimate. If we find one, return the new, valid address.
- This macro is used in only one place: `memory_address' in explow.c.
-
- OLDX is the address as it was before break_out_memory_refs was called.
- In some cases it is useful to look at this to decide what needs to be done.
-
- MODE and WIN are passed so that this macro can use
- GO_IF_LEGITIMATE_ADDRESS.
-
- It is always safe for this macro to do nothing. It exists to recognize
- opportunities to optimize the output.
-
- For the 80386, we handle X+REG by loading X into a register R and
- using R+REG. R will go in a general reg and indexing will be used.
- However, if REG is a broken-out memory address or multiplication,
- nothing needs to be done because REG can certainly go in a general reg.
-
- When -fpic is used, special handling is needed for symbolic references.
- See comments by legitimize_pic_address in i386.c for details. */
-
-#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
-do { \
- (X) = legitimize_address ((X), (OLDX), (MODE)); \
- if (memory_address_p ((MODE), (X))) \
- goto WIN; \
-} while (0)
-
/* Nonzero if the constant value X is a legitimate general operand
when generating PIC code. It is given that flag_pic is on and
that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
(GET_CODE (X) == SYMBOL_REF \
|| GET_CODE (X) == LABEL_REF \
|| (GET_CODE (X) == CONST && symbolic_reference_mentioned_p (X)))
-
-/* Go to LABEL if ADDR (a legitimate address expression)
- has an effect that depends on the machine mode it is used for.
- On the 80386, only postdecrement and postincrement address depend thus
- (the amount of decrement or increment being the length of the operand).
- These are now caught in recog.c. */
-#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL)
\f
/* Max number of args passed in registers. If this is more than 3, we will
have problems with ebx (register #4), since it is a caller save register and
/* Abi specific values for REGPARM_MAX and SSE_REGPARM_MAX */
#define X86_64_REGPARM_MAX 6
-#define X64_REGPARM_MAX 4
+#define X86_64_MS_REGPARM_MAX 4
+
#define X86_32_REGPARM_MAX 3
+#define REGPARM_MAX \
+ (TARGET_64BIT \
+ ? (TARGET_64BIT_MS_ABI \
+ ? X86_64_MS_REGPARM_MAX \
+ : X86_64_REGPARM_MAX) \
+ : X86_32_REGPARM_MAX)
+
#define X86_64_SSE_REGPARM_MAX 8
-#define X64_SSE_REGPARM_MAX 4
-#define X86_32_SSE_REGPARM_MAX (TARGET_SSE ? 3 : 0)
+#define X86_64_MS_SSE_REGPARM_MAX 4
-#define REGPARM_MAX (TARGET_64BIT ? (TARGET_64BIT_MS_ABI ? X64_REGPARM_MAX \
- : X86_64_REGPARM_MAX) \
- : X86_32_REGPARM_MAX)
+#define X86_32_SSE_REGPARM_MAX (TARGET_SSE ? (TARGET_MACHO ? 4 : 3) : 0)
-#define SSE_REGPARM_MAX (TARGET_64BIT ? (TARGET_64BIT_MS_ABI ? X64_SSE_REGPARM_MAX \
- : X86_64_SSE_REGPARM_MAX) \
- : X86_32_SSE_REGPARM_MAX)
+#define SSE_REGPARM_MAX \
+ (TARGET_64BIT \
+ ? (TARGET_64BIT_MS_ABI \
+ ? X86_64_MS_SSE_REGPARM_MAX \
+ : X86_64_SSE_REGPARM_MAX) \
+ : X86_32_SSE_REGPARM_MAX)
#define MMX_REGPARM_MAX (TARGET_64BIT ? 0 : (TARGET_MMX ? 3 : 0))
-
\f
/* Specify the machine mode that this machine uses
for the index in the tablejump instruction. */
#define CLEAR_RATIO(speed) ((speed) ? MIN (6, ix86_cost->move_ratio) : 2)
-/* Define if shifts truncate the shift count
- which implies one can omit a sign-extension or zero-extension
- of a shift count. */
-/* On i386, shifts do truncate the count. But bit opcodes don't. */
+/* Define if shifts truncate the shift count which implies one can
+ omit a sign-extension or zero-extension of a shift count.
+
+ On i386, shifts do truncate the count. But bit test instructions
+ take the modulo of the bit offset operand. */
/* #define SHIFT_COUNT_TRUNCATED */
so give the MEM rtx a byte's mode. */
#define FUNCTION_MODE QImode
\f
-/* A C expression for the cost of moving data from a register in class FROM to
- one in class TO. The classes are expressed using the enumeration values
- such as `GENERAL_REGS'. A value of 2 is the default; other values are
- interpreted relative to that.
-
- It is not required that the cost always equal 2 when FROM is the same as TO;
- on some machines it is expensive to move between registers if they are not
- general registers. */
-
-#define REGISTER_MOVE_COST(MODE, CLASS1, CLASS2) \
- ix86_register_move_cost ((MODE), (CLASS1), (CLASS2))
-
-/* A C expression for the cost of moving data of mode M between a
- register and memory. A value of 2 is the default; this cost is
- relative to those in `REGISTER_MOVE_COST'.
-
- If moving between registers and memory is more expensive than
- between two registers, you should define this macro to express the
- relative cost. */
-
-#define MEMORY_MOVE_COST(MODE, CLASS, IN) \
- ix86_memory_move_cost ((MODE), (CLASS), (IN))
/* A C expression for the cost of a branch instruction. A value of 1
is the default; other values are interpreted relative to that. */
For non floating point regs, the following are the HImode names.
For float regs, the stack top is sometimes referred to as "%st(0)"
- instead of just "%st". PRINT_OPERAND handles this with the "y" code. */
+ instead of just "%st". TARGET_PRINT_OPERAND handles this with the
+ "y" code. */
#define HI_REGISTER_NAMES \
{"ax","dx","cx","bx","si","di","bp","sp", \
#define INCOMING_FRAME_SP_OFFSET UNITS_PER_WORD
/* Describe how we implement __builtin_eh_return. */
-#define EH_RETURN_DATA_REGNO(N) ((N) < 2 ? (N) : INVALID_REGNUM)
-#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, 2)
+#define EH_RETURN_DATA_REGNO(N) ((N) <= DX_REG ? (N) : INVALID_REGNUM)
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, CX_REG)
/* Select a format to encode pointers in exception handling data. CODE
#define ASM_OUTPUT_OPCODE(STREAM, PTR) \
ASM_OUTPUT_AVX_PREFIX ((STREAM), (PTR))
+/* A C statement to output to the stdio stream FILE an assembler
+ command to pad the location counter to a multiple of 1<<LOG
+ bytes if it is within MAX_SKIP bytes. */
+
+#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
+#undef ASM_OUTPUT_MAX_SKIP_PAD
+#define ASM_OUTPUT_MAX_SKIP_PAD(FILE, LOG, MAX_SKIP) \
+ if ((LOG) != 0) \
+ { \
+ if ((MAX_SKIP) == 0) \
+ fprintf ((FILE), "\t.p2align %d\n", (LOG)); \
+ else \
+ fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP)); \
+ }
+#endif
+
+/* Write the extra assembler code needed to declare a function
+ properly. */
+
+#undef ASM_OUTPUT_FUNCTION_LABEL
+#define ASM_OUTPUT_FUNCTION_LABEL(FILE, NAME, DECL) \
+ ix86_asm_output_function_label (FILE, NAME, DECL)
+
/* Under some conditions we need jump tables in the text section,
because the assembler cannot handle label differences between
sections. This is the case for x86_64 on Mach-O for example. */
/* Switch to init or fini section via SECTION_OP, emit a call to FUNC,
and switch back. For x86 we do this only to save a few bytes that
would otherwise be unused in the text section. */
-#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
- asm (SECTION_OP "\n\t" \
- "call " USER_LABEL_PREFIX #FUNC "\n" \
- TEXT_SECTION_ASM_OP);
-\f
-/* Print operand X (an rtx) in assembler syntax to file FILE.
- CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
- Effect of various CODE letters is described in i386.c near
- print_operand function. */
-
-#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
- ((CODE) == '*' || (CODE) == '+' || (CODE) == '&' || (CODE) == ';')
-
-#define PRINT_OPERAND(FILE, X, CODE) \
- print_operand ((FILE), (X), (CODE))
-
-#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
- print_operand_address ((FILE), (ADDR))
+#define CRT_MKSTR2(VAL) #VAL
+#define CRT_MKSTR(x) CRT_MKSTR2(x)
-#define OUTPUT_ADDR_CONST_EXTRA(FILE, X, FAIL) \
-do { \
- if (! output_addr_const_extra (FILE, (X))) \
- goto FAIL; \
-} while (0);
+#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
+ asm (SECTION_OP "\n\t" \
+ "call " CRT_MKSTR(__USER_LABEL_PREFIX__) #FUNC "\n" \
+ TEXT_SECTION_ASM_OP);
\f
/* Which processor to schedule for. The cpu attribute defines a list that
mirrors this list, so changes to i386.md must be made at the same time. */
PROCESSOR_GENERIC32,
PROCESSOR_GENERIC64,
PROCESSOR_AMDFAM10,
+ PROCESSOR_BDVER1,
+ PROCESSOR_ATOM,
PROCESSOR_max
};
/* Smallest class containing REGNO. */
extern enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER];
-extern rtx ix86_compare_op0; /* operand 0 for comparisons */
-extern rtx ix86_compare_op1; /* operand 1 for comparisons */
-extern rtx ix86_compare_emitted;
+enum ix86_fpcmp_strategy {
+ IX86_FPCMP_SAHF,
+ IX86_FPCMP_COMI,
+ IX86_FPCMP_ARITH
+};
\f
/* To properly truncate FP values into integers, we need to set i387 control
word. We can't emit proper mode switching code before reload, as spills
\f
#define FASTCALL_PREFIX '@'
\f
-struct machine_function GTY(())
+/* Machine specific frame tracking during prologue/epilogue generation. */
+
+#ifndef USED_FOR_TARGET
+struct GTY(()) machine_frame_state
{
+ /* This pair tracks the currently active CFA as reg+offset. When reg
+ is drap_reg, we don't bother trying to record here the real CFA when
+ it might really be a DW_CFA_def_cfa_expression. */
+ rtx cfa_reg;
+ HOST_WIDE_INT cfa_offset;
+
+ /* The current offset (canonically from the CFA) of ESP and EBP.
+ When stack frame re-alignment is active, these may not be relative
+ to the CFA. However, in all cases they are relative to the offsets
+ of the saved registers stored in ix86_frame. */
+ HOST_WIDE_INT sp_offset;
+ HOST_WIDE_INT fp_offset;
+
+ /* The size of the red-zone that may be assumed for the purposes of
+ eliding register restore notes in the epilogue. This may be zero
+ if no red-zone is in effect, or may be reduced from the real
+ red-zone value by a maximum runtime stack re-alignment value. */
+ int red_zone_offset;
+
+ /* Indicate whether each of ESP, EBP or DRAP currently holds a valid
+ value within the frame. If false then the offset above should be
+ ignored. Note that DRAP, if valid, *always* points to the CFA and
+ thus has an offset of zero. */
+ BOOL_BITFIELD sp_valid : 1;
+ BOOL_BITFIELD fp_valid : 1;
+ BOOL_BITFIELD drap_valid : 1;
+
+ /* Indicate whether the local stack frame has been re-aligned. When
+ set, the SP/FP offsets above are relative to the aligned frame
+ and not the CFA. */
+ BOOL_BITFIELD realigned : 1;
+};
+
+struct GTY(()) machine_function {
struct stack_local_entry *stack_locals;
const char *some_ld_name;
int varargs_gpr_size;
int varargs_fpr_size;
- int accesses_prev_frame;
int optimize_mode_switching[MAX_386_ENTITIES];
- int needs_cld;
+
+ /* Number of saved registers USE_FAST_PROLOGUE_EPILOGUE
+ has been computed for. */
+ int use_fast_prologue_epilogue_nregs;
+
+ /* This value is used for amd64 targets and specifies the current abi
+ to be used. MS_ABI means ms abi. Otherwise SYSV_ABI means sysv abi. */
+ ENUM_BITFIELD(calling_abi) call_abi : 8;
+
+ /* Nonzero if the function accesses a previous frame. */
+ BOOL_BITFIELD accesses_prev_frame : 1;
+
+ /* Nonzero if the function requires a CLD in the prologue. */
+ BOOL_BITFIELD needs_cld : 1;
+
/* Set by ix86_compute_frame_layout and used by prologue/epilogue
expander to determine the style used. */
- int use_fast_prologue_epilogue;
- /* Number of saved registers USE_FAST_PROLOGUE_EPILOGUE has been computed
- for. */
- int use_fast_prologue_epilogue_nregs;
+ BOOL_BITFIELD use_fast_prologue_epilogue : 1;
+
/* If true, the current function needs the default PIC register, not
an alternate register (on x86) and must not use the red zone (on
x86_64), even if it's a leaf function. We don't want the
if all such instructions are optimized away. Use the
ix86_current_function_calls_tls_descriptor macro for a better
approximation. */
- int tls_descriptor_call_expanded_p;
- /* This value is used for amd64 targets and specifies the current abi
- to be used. MS_ABI means ms abi. Otherwise SYSV_ABI means sysv abi. */
- int call_abi;
+ BOOL_BITFIELD tls_descriptor_call_expanded_p : 1;
+
+ /* If true, the current function has a STATIC_CHAIN is placed on the
+ stack below the return address. */
+ BOOL_BITFIELD static_chain_on_stack : 1;
+
+ /* During prologue/epilogue generation, the current frame state.
+ Otherwise, the frame state at the end of the prologue. */
+ struct machine_frame_state fs;
};
+#endif
#define ix86_stack_locals (cfun->machine->stack_locals)
#define ix86_varargs_gpr_size (cfun->machine->varargs_gpr_size)
REG_SP is live. */
#define ix86_current_function_calls_tls_descriptor \
(ix86_tls_descriptor_calls_expanded_in_cfun && df_regs_ever_live_p (SP_REG))
+#define ix86_static_chain_on_stack (cfun->machine->static_chain_on_stack)
/* Control behavior of x86_file_start. */
#define X86_FILE_START_VERSION_DIRECTIVE false
#define SYMBOL_REF_DLLEXPORT_P(X) \
((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_DLLEXPORT) != 0)
-/* Model costs for vectorizer. */
-
-/* Cost of conditional branch. */
-#undef TARG_COND_BRANCH_COST
-#define TARG_COND_BRANCH_COST ix86_cost->branch_cost
-
-/* Enum through the target specific extra va_list types. Please, do not
- iterate the base va_list type name. */
-#define TARGET_ENUM_VA_LIST(IDX, PNAME, PTYPE) \
- (!TARGET_64BIT ? 0 : ix86_enum_va_list (IDX, PNAME, PTYPE))
-
-/* Cost of any scalar operation, excluding load and store. */
-#undef TARG_SCALAR_STMT_COST
-#define TARG_SCALAR_STMT_COST ix86_cost->scalar_stmt_cost
-
-/* Cost of scalar load. */
-#undef TARG_SCALAR_LOAD_COST
-#define TARG_SCALAR_LOAD_COST ix86_cost->scalar_load_cost
-
-/* Cost of scalar store. */
-#undef TARG_SCALAR_STORE_COST
-#define TARG_SCALAR_STORE_COST ix86_cost->scalar_store_cost
-
-/* Cost of any vector operation, excluding load, store or vector to scalar
- operation. */
-#undef TARG_VEC_STMT_COST
-#define TARG_VEC_STMT_COST ix86_cost->vec_stmt_cost
-
-/* Cost of vector to scalar operation. */
-#undef TARG_VEC_TO_SCALAR_COST
-#define TARG_VEC_TO_SCALAR_COST ix86_cost->vec_to_scalar_cost
-
-/* Cost of scalar to vector operation. */
-#undef TARG_SCALAR_TO_VEC_COST
-#define TARG_SCALAR_TO_VEC_COST ix86_cost->scalar_to_vec_cost
-
-/* Cost of aligned vector load. */
-#undef TARG_VEC_LOAD_COST
-#define TARG_VEC_LOAD_COST ix86_cost->vec_align_load_cost
-
-/* Cost of misaligned vector load. */
-#undef TARG_VEC_UNALIGNED_LOAD_COST
-#define TARG_VEC_UNALIGNED_LOAD_COST ix86_cost->vec_unalign_load_cost
-
-/* Cost of vector store. */
-#undef TARG_VEC_STORE_COST
-#define TARG_VEC_STORE_COST ix86_cost->vec_store_cost
-
-/* Cost of conditional taken branch for vectorizer cost model. */
-#undef TARG_COND_TAKEN_BRANCH_COST
-#define TARG_COND_TAKEN_BRANCH_COST ix86_cost->cond_taken_branch_cost
-
-/* Cost of conditional not taken branch for vectorizer cost model. */
-#undef TARG_COND_NOT_TAKEN_BRANCH_COST
-#define TARG_COND_NOT_TAKEN_BRANCH_COST ix86_cost->cond_not_taken_branch_cost
+extern void debug_ready_dispatch (void);
+extern void debug_dispatch_window (int);
/*
Local variables: