/* Definitions of target machine for GNU compiler for IA-32.
Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
- 2001, 2002 Free Software Foundation, Inc.
+ 2001, 2002, 2003 Free Software Foundation, Inc.
This file is part of GNU CC.
/* configure can arrange to make this 2, to force a 486. */
#ifndef TARGET_CPU_DEFAULT
+#ifdef TARGET_64BIT_DEFAULT
+#define TARGET_CPU_DEFAULT TARGET_CPU_DEFAULT_k8
+#else
#define TARGET_CPU_DEFAULT 0
#endif
+#endif
/* Masks for the -m switches */
#define MASK_80387 0x00000001 /* Hardware floating point */
#define MASK_MMX 0x00002000 /* Support MMX regs/builtins */
#define MASK_SSE 0x00004000 /* Support SSE regs/builtins */
#define MASK_SSE2 0x00008000 /* Support SSE2 regs/builtins */
-#define MASK_3DNOW 0x00010000 /* Support 3Dnow builtins */
-#define MASK_3DNOW_A 0x00020000 /* Support Athlon 3Dnow builtins */
-#define MASK_128BIT_LONG_DOUBLE 0x00040000 /* long double size is 128bit */
-#define MASK_64BIT 0x00080000 /* Produce 64bit code */
-#define MASK_MS_BITFIELD_LAYOUT 0x00100000 /* Use native (MS) bitfield layout */
+#define MASK_PNI 0x00010000 /* Support PNI regs/builtins */
+#define MASK_3DNOW 0x00020000 /* Support 3Dnow builtins */
+#define MASK_3DNOW_A 0x00040000 /* Support Athlon 3Dnow builtins */
+#define MASK_128BIT_LONG_DOUBLE 0x00080000 /* long double size is 128bit */
+#define MASK_64BIT 0x00100000 /* Produce 64bit code */
+#define MASK_MS_BITFIELD_LAYOUT 0x00200000 /* Use native (MS) bitfield layout */
+#define MASK_TLS_DIRECT_SEG_REFS 0x00400000 /* Avoid adding %gs:0 */
/* Unused: 0x03e0000 */
#endif
#endif
-#define TARGET_386 (ix86_cpu == PROCESSOR_I386)
-#define TARGET_486 (ix86_cpu == PROCESSOR_I486)
-#define TARGET_PENTIUM (ix86_cpu == PROCESSOR_PENTIUM)
-#define TARGET_PENTIUMPRO (ix86_cpu == PROCESSOR_PENTIUMPRO)
-#define TARGET_K6 (ix86_cpu == PROCESSOR_K6)
-#define TARGET_ATHLON (ix86_cpu == PROCESSOR_ATHLON)
-#define TARGET_PENTIUM4 (ix86_cpu == PROCESSOR_PENTIUM4)
-#define TARGET_K8 (ix86_cpu == PROCESSOR_K8)
+/* Avoid adding %gs:0 in TLS references; use %gs:address directly. */
+#define TARGET_TLS_DIRECT_SEG_REFS (target_flags & MASK_TLS_DIRECT_SEG_REFS)
+
+#define TARGET_386 (ix86_tune == PROCESSOR_I386)
+#define TARGET_486 (ix86_tune == PROCESSOR_I486)
+#define TARGET_PENTIUM (ix86_tune == PROCESSOR_PENTIUM)
+#define TARGET_PENTIUMPRO (ix86_tune == PROCESSOR_PENTIUMPRO)
+#define TARGET_K6 (ix86_tune == PROCESSOR_K6)
+#define TARGET_ATHLON (ix86_tune == PROCESSOR_ATHLON)
+#define TARGET_PENTIUM4 (ix86_tune == PROCESSOR_PENTIUM4)
+#define TARGET_K8 (ix86_tune == PROCESSOR_K8)
#define TARGET_ATHLON_K8 (TARGET_K8 || TARGET_ATHLON)
-#define CPUMASK (1 << ix86_cpu)
+#define TUNEMASK (1 << ix86_tune)
extern const int x86_use_leave, x86_push_memory, x86_zero_extend_with_and;
extern const int x86_use_bit_test, x86_cmove, x86_deep_branch;
extern const int x86_branch_hints, x86_unroll_strlen;
extern const int x86_sse_partial_reg_dependency, x86_sse_partial_regs;
extern const int x86_sse_typeless_stores, x86_sse_load0_by_pxor;
extern const int x86_use_ffreep, x86_sse_partial_regs_for_cvtsd2ss;
+extern const int x86_inter_unit_moves;
extern int x86_prefetch_sse;
-#define TARGET_USE_LEAVE (x86_use_leave & CPUMASK)
-#define TARGET_PUSH_MEMORY (x86_push_memory & CPUMASK)
-#define TARGET_ZERO_EXTEND_WITH_AND (x86_zero_extend_with_and & CPUMASK)
-#define TARGET_USE_BIT_TEST (x86_use_bit_test & CPUMASK)
-#define TARGET_UNROLL_STRLEN (x86_unroll_strlen & CPUMASK)
+#define TARGET_USE_LEAVE (x86_use_leave & TUNEMASK)
+#define TARGET_PUSH_MEMORY (x86_push_memory & TUNEMASK)
+#define TARGET_ZERO_EXTEND_WITH_AND (x86_zero_extend_with_and & TUNEMASK)
+#define TARGET_USE_BIT_TEST (x86_use_bit_test & TUNEMASK)
+#define TARGET_UNROLL_STRLEN (x86_unroll_strlen & TUNEMASK)
/* For sane SSE instruction set generation we need fcomi instruction. It is
safe to enable all CMOVE instructions. */
#define TARGET_CMOVE ((x86_cmove & (1 << ix86_arch)) || TARGET_SSE)
-#define TARGET_DEEP_BRANCH_PREDICTION (x86_deep_branch & CPUMASK)
-#define TARGET_BRANCH_PREDICTION_HINTS (x86_branch_hints & CPUMASK)
-#define TARGET_DOUBLE_WITH_ADD (x86_double_with_add & CPUMASK)
-#define TARGET_USE_SAHF ((x86_use_sahf & CPUMASK) && !TARGET_64BIT)
-#define TARGET_MOVX (x86_movx & CPUMASK)
-#define TARGET_PARTIAL_REG_STALL (x86_partial_reg_stall & CPUMASK)
-#define TARGET_USE_LOOP (x86_use_loop & CPUMASK)
-#define TARGET_USE_FIOP (x86_use_fiop & CPUMASK)
-#define TARGET_USE_MOV0 (x86_use_mov0 & CPUMASK)
-#define TARGET_USE_CLTD (x86_use_cltd & CPUMASK)
-#define TARGET_SPLIT_LONG_MOVES (x86_split_long_moves & CPUMASK)
-#define TARGET_READ_MODIFY_WRITE (x86_read_modify_write & CPUMASK)
-#define TARGET_READ_MODIFY (x86_read_modify & CPUMASK)
-#define TARGET_PROMOTE_QImode (x86_promote_QImode & CPUMASK)
-#define TARGET_FAST_PREFIX (x86_fast_prefix & CPUMASK)
-#define TARGET_SINGLE_STRINGOP (x86_single_stringop & CPUMASK)
-#define TARGET_QIMODE_MATH (x86_qimode_math & CPUMASK)
-#define TARGET_HIMODE_MATH (x86_himode_math & CPUMASK)
-#define TARGET_PROMOTE_QI_REGS (x86_promote_qi_regs & CPUMASK)
-#define TARGET_PROMOTE_HI_REGS (x86_promote_hi_regs & CPUMASK)
-#define TARGET_ADD_ESP_4 (x86_add_esp_4 & CPUMASK)
-#define TARGET_ADD_ESP_8 (x86_add_esp_8 & CPUMASK)
-#define TARGET_SUB_ESP_4 (x86_sub_esp_4 & CPUMASK)
-#define TARGET_SUB_ESP_8 (x86_sub_esp_8 & CPUMASK)
-#define TARGET_INTEGER_DFMODE_MOVES (x86_integer_DFmode_moves & CPUMASK)
-#define TARGET_PARTIAL_REG_DEPENDENCY (x86_partial_reg_dependency & CPUMASK)
+#define TARGET_DEEP_BRANCH_PREDICTION (x86_deep_branch & TUNEMASK)
+#define TARGET_BRANCH_PREDICTION_HINTS (x86_branch_hints & TUNEMASK)
+#define TARGET_DOUBLE_WITH_ADD (x86_double_with_add & TUNEMASK)
+#define TARGET_USE_SAHF ((x86_use_sahf & TUNEMASK) && !TARGET_64BIT)
+#define TARGET_MOVX (x86_movx & TUNEMASK)
+#define TARGET_PARTIAL_REG_STALL (x86_partial_reg_stall & TUNEMASK)
+#define TARGET_USE_LOOP (x86_use_loop & TUNEMASK)
+#define TARGET_USE_FIOP (x86_use_fiop & TUNEMASK)
+#define TARGET_USE_MOV0 (x86_use_mov0 & TUNEMASK)
+#define TARGET_USE_CLTD (x86_use_cltd & TUNEMASK)
+#define TARGET_SPLIT_LONG_MOVES (x86_split_long_moves & TUNEMASK)
+#define TARGET_READ_MODIFY_WRITE (x86_read_modify_write & TUNEMASK)
+#define TARGET_READ_MODIFY (x86_read_modify & TUNEMASK)
+#define TARGET_PROMOTE_QImode (x86_promote_QImode & TUNEMASK)
+#define TARGET_FAST_PREFIX (x86_fast_prefix & TUNEMASK)
+#define TARGET_SINGLE_STRINGOP (x86_single_stringop & TUNEMASK)
+#define TARGET_QIMODE_MATH (x86_qimode_math & TUNEMASK)
+#define TARGET_HIMODE_MATH (x86_himode_math & TUNEMASK)
+#define TARGET_PROMOTE_QI_REGS (x86_promote_qi_regs & TUNEMASK)
+#define TARGET_PROMOTE_HI_REGS (x86_promote_hi_regs & TUNEMASK)
+#define TARGET_ADD_ESP_4 (x86_add_esp_4 & TUNEMASK)
+#define TARGET_ADD_ESP_8 (x86_add_esp_8 & TUNEMASK)
+#define TARGET_SUB_ESP_4 (x86_sub_esp_4 & TUNEMASK)
+#define TARGET_SUB_ESP_8 (x86_sub_esp_8 & TUNEMASK)
+#define TARGET_INTEGER_DFMODE_MOVES (x86_integer_DFmode_moves & TUNEMASK)
+#define TARGET_PARTIAL_REG_DEPENDENCY (x86_partial_reg_dependency & TUNEMASK)
#define TARGET_SSE_PARTIAL_REG_DEPENDENCY \
- (x86_sse_partial_reg_dependency & CPUMASK)
-#define TARGET_SSE_PARTIAL_REGS (x86_sse_partial_regs & CPUMASK)
+ (x86_sse_partial_reg_dependency & TUNEMASK)
+#define TARGET_SSE_PARTIAL_REGS (x86_sse_partial_regs & TUNEMASK)
#define TARGET_SSE_PARTIAL_REGS_FOR_CVTSD2SS \
- (x86_sse_partial_regs_for_cvtsd2ss & CPUMASK)
-#define TARGET_SSE_TYPELESS_STORES (x86_sse_typeless_stores & CPUMASK)
-#define TARGET_SSE_TYPELESS_LOAD0 (x86_sse_typeless_load0 & CPUMASK)
-#define TARGET_SSE_LOAD0_BY_PXOR (x86_sse_load0_by_pxor & CPUMASK)
-#define TARGET_MEMORY_MISMATCH_STALL (x86_memory_mismatch_stall & CPUMASK)
-#define TARGET_PROLOGUE_USING_MOVE (x86_prologue_using_move & CPUMASK)
-#define TARGET_EPILOGUE_USING_MOVE (x86_epilogue_using_move & CPUMASK)
-#define TARGET_DECOMPOSE_LEA (x86_decompose_lea & CPUMASK)
+ (x86_sse_partial_regs_for_cvtsd2ss & TUNEMASK)
+#define TARGET_SSE_TYPELESS_STORES (x86_sse_typeless_stores & TUNEMASK)
+#define TARGET_SSE_TYPELESS_LOAD0 (x86_sse_typeless_load0 & TUNEMASK)
+#define TARGET_SSE_LOAD0_BY_PXOR (x86_sse_load0_by_pxor & TUNEMASK)
+#define TARGET_MEMORY_MISMATCH_STALL (x86_memory_mismatch_stall & TUNEMASK)
+#define TARGET_PROLOGUE_USING_MOVE (x86_prologue_using_move & TUNEMASK)
+#define TARGET_EPILOGUE_USING_MOVE (x86_epilogue_using_move & TUNEMASK)
+#define TARGET_DECOMPOSE_LEA (x86_decompose_lea & TUNEMASK)
#define TARGET_PREFETCH_SSE (x86_prefetch_sse)
-#define TARGET_SHIFT1 (x86_shift1 & CPUMASK)
-#define TARGET_USE_FFREEP (x86_use_ffreep & CPUMASK)
-#define TARGET_REP_MOVL_OPTIMAL (x86_rep_movl_optimal & CPUMASK)
+#define TARGET_SHIFT1 (x86_shift1 & TUNEMASK)
+#define TARGET_USE_FFREEP (x86_use_ffreep & TUNEMASK)
+#define TARGET_REP_MOVL_OPTIMAL (x86_rep_movl_optimal & TUNEMASK)
+#define TARGET_INTER_UNIT_MOVES (x86_inter_unit_moves & TUNEMASK)
#define TARGET_STACK_PROBE (target_flags & MASK_STACK_PROBE)
#define ASSEMBLER_DIALECT (ix86_asm_dialect)
-#define TARGET_SSE ((target_flags & (MASK_SSE | MASK_SSE2)) != 0)
+#define TARGET_SSE ((target_flags & MASK_SSE) != 0)
#define TARGET_SSE2 ((target_flags & MASK_SSE2) != 0)
+#define TARGET_PNI ((target_flags & MASK_PNI) != 0)
#define TARGET_SSE_MATH ((ix86_fpmath & FPMATH_SSE) != 0)
#define TARGET_MIX_SSE_I387 ((ix86_fpmath & FPMATH_SSE) \
&& (ix86_fpmath & FPMATH_387))
N_("Support MMX, SSE and SSE2 built-in functions and code generation") }, \
{ "no-sse2", -MASK_SSE2, \
N_("Do not support MMX, SSE and SSE2 built-in functions and code generation") }, \
+ { "pni", MASK_PNI, \
+ N_("Support MMX, SSE, SSE2 and PNI built-in functions and code generation") },\
+ { "no-pni", -MASK_PNI, \
+ N_("Do not support MMX, SSE, SSE2 and PNI built-in functions and code generation") },\
{ "128bit-long-double", MASK_128BIT_LONG_DOUBLE, \
N_("sizeof(long double) is 16") }, \
{ "96bit-long-double", -MASK_128BIT_LONG_DOUBLE, \
N_("Use red-zone in the x86-64 code") }, \
{ "no-red-zone", MASK_NO_RED_ZONE, \
N_("Do not use red-zone in the x86-64 code") }, \
+ { "tls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS, \
+ N_("Use direct references against %gs when accessing tls data") }, \
+ { "no-tls-direct-seg-refs", -MASK_TLS_DIRECT_SEG_REFS, \
+ N_("Do not use direct references against %gs when accessing tls data") }, \
SUBTARGET_SWITCHES \
- { "", TARGET_DEFAULT | TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_DEFAULT, 0 }}
+ { "", \
+ TARGET_DEFAULT | TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_DEFAULT \
+ | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT, 0 }}
#ifndef TARGET_64BIT_DEFAULT
#define TARGET_64BIT_DEFAULT 0
#endif
+#ifndef TARGET_TLS_DIRECT_SEG_REFS_DEFAULT
+#define TARGET_TLS_DIRECT_SEG_REFS_DEFAULT 0
+#endif
/* Once GDB has been enhanced to deal with functions without frame
pointers, we can change this to allow for elimination of
option if the fixed part matches. The actual option name is made
by appending `-m' to the specified name. */
#define TARGET_OPTIONS \
-{ { "cpu=", &ix86_cpu_string, \
- N_("Schedule code for given CPU")}, \
+{ { "tune=", &ix86_tune_string, \
+ N_("Schedule code for given CPU"), 0}, \
{ "fpmath=", &ix86_fpmath_string, \
- N_("Generate floating point mathematics using given instruction set")},\
+ N_("Generate floating point mathematics using given instruction set"), 0},\
{ "arch=", &ix86_arch_string, \
- N_("Generate code for given CPU")}, \
+ N_("Generate code for given CPU"), 0}, \
{ "regparm=", &ix86_regparm_string, \
- N_("Number of registers used to pass integer arguments") }, \
+ N_("Number of registers used to pass integer arguments"), 0},\
{ "align-loops=", &ix86_align_loops_string, \
- N_("Loop code aligned to this power of 2") }, \
+ N_("Loop code aligned to this power of 2"), 0}, \
{ "align-jumps=", &ix86_align_jumps_string, \
- N_("Jump targets are aligned to this power of 2") }, \
+ N_("Jump targets are aligned to this power of 2"), 0}, \
{ "align-functions=", &ix86_align_funcs_string, \
- N_("Function starts are aligned to this power of 2") }, \
+ N_("Function starts are aligned to this power of 2"), 0}, \
{ "preferred-stack-boundary=", \
&ix86_preferred_stack_boundary_string, \
- N_("Attempt to keep stack aligned to this power of 2") }, \
+ N_("Attempt to keep stack aligned to this power of 2"), 0}, \
{ "branch-cost=", &ix86_branch_cost_string, \
- N_("Branches are this expensive (1-5, arbitrary units)") }, \
+ N_("Branches are this expensive (1-5, arbitrary units)"), 0},\
{ "cmodel=", &ix86_cmodel_string, \
- N_("Use given x86-64 code model") }, \
+ N_("Use given x86-64 code model"), 0}, \
{ "debug-arg", &ix86_debug_arg_string, \
- "" /* Undocumented. */ }, \
+ "" /* Undocumented. */, 0}, \
{ "debug-addr", &ix86_debug_addr_string, \
- "" /* Undocumented. */ }, \
+ "" /* Undocumented. */, 0}, \
{ "asm=", &ix86_asm_string, \
- N_("Use given assembler dialect") }, \
+ N_("Use given assembler dialect"), 0}, \
{ "tls-dialect=", &ix86_tls_dialect_string, \
- N_("Use given thread-local storage dialect") }, \
+ N_("Use given thread-local storage dialect"), 0}, \
SUBTARGET_OPTIONS \
}
#define OPTIMIZATION_OPTIONS(LEVEL, SIZE) \
optimization_options ((LEVEL), (SIZE))
+/* Support for configure-time defaults of some command line options. */
+#define OPTION_DEFAULT_SPECS \
+ {"arch", "%{!march=*:-march=%(VALUE)}"}, \
+ {"tune", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \
+ {"cpu", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }
+
/* Specs for the compiler proper */
#ifndef CC1_CPU_SPEC
#define CC1_CPU_SPEC "\
-%{!mcpu*: \
-%{m386:-mcpu=i386 \
-%n`-m386' is deprecated. Use `-march=i386' or `-mcpu=i386' instead.\n} \
-%{m486:-mcpu=i486 \
-%n`-m486' is deprecated. Use `-march=i486' or `-mcpu=i486' instead.\n} \
-%{mpentium:-mcpu=pentium \
-%n`-mpentium' is deprecated. Use `-march=pentium' or `-mcpu=pentium' instead.\n} \
-%{mpentiumpro:-mcpu=pentiumpro \
-%n`-mpentiumpro' is deprecated. Use `-march=pentiumpro' or `-mcpu=pentiumpro' instead.\n}} \
+%{!mtune*: \
+%{m386:mtune=i386 \
+%n`-m386' is deprecated. Use `-march=i386' or `-mtune=i386' instead.\n} \
+%{m486:-mtune=i486 \
+%n`-m486' is deprecated. Use `-march=i486' or `-mtune=i486' instead.\n} \
+%{mpentium:-mtune=pentium \
+%n`-mpentium' is deprecated. Use `-march=pentium' or `-mtune=pentium' instead.\n} \
+%{mpentiumpro:-mtune=pentiumpro \
+%n`-mpentiumpro' is deprecated. Use `-march=pentiumpro' or `-mtune=pentiumpro' instead.\n} \
+%{mcpu=*:-mtune=%* \
+%n`-mcpu=' is deprecated. Use `-mtune=' or '-march=' instead.\n}} \
+%<mcpu=* \
%{mintel-syntax:-masm=intel \
%n`-mintel-syntax' is deprecated. Use `-masm=intel' instead.\n} \
%{mno-intel-syntax:-masm=att \
do \
{ \
size_t arch_len = strlen (ix86_arch_string); \
- size_t cpu_len = strlen (ix86_cpu_string); \
+ size_t tune_len = strlen (ix86_tune_string); \
int last_arch_char = ix86_arch_string[arch_len - 1]; \
- int last_cpu_char = ix86_cpu_string[cpu_len - 1]; \
+ int last_tune_char = ix86_tune_string[tune_len - 1]; \
\
if (TARGET_64BIT) \
{ \
builtin_assert ("cpu=x86_64"); \
- builtin_assert ("machine=x86_64"); \
+ builtin_define ("__amd64"); \
+ builtin_define ("__amd64__"); \
builtin_define ("__x86_64"); \
builtin_define ("__x86_64__"); \
+ builtin_define ("__amd64"); \
+ builtin_define ("__amd64__"); \
} \
else \
{ \
builtin_define_std ("i386"); \
} \
\
- /* Built-ins based on -mcpu= (or -march= if no \
- CPU given). */ \
+ /* Built-ins based on -mtune= (or -march= if no \
+ -mtune= given). */ \
if (TARGET_386) \
builtin_define ("__tune_i386__"); \
else if (TARGET_486) \
{ \
builtin_define ("__tune_i586__"); \
builtin_define ("__tune_pentium__"); \
- if (last_cpu_char == 'x') \
+ if (last_tune_char == 'x') \
builtin_define ("__tune_pentium_mmx__"); \
} \
else if (TARGET_PENTIUMPRO) \
{ \
builtin_define ("__tune_i686__"); \
builtin_define ("__tune_pentiumpro__"); \
- switch (last_cpu_char) \
+ switch (last_tune_char) \
{ \
case '3': \
builtin_define ("__tune_pentium3__"); \
else if (TARGET_K6) \
{ \
builtin_define ("__tune_k6__"); \
- if (last_cpu_char == '2') \
+ if (last_tune_char == '2') \
builtin_define ("__tune_k6_2__"); \
- else if (last_cpu_char == '3') \
+ else if (last_tune_char == '3') \
builtin_define ("__tune_k6_3__"); \
} \
else if (TARGET_ATHLON) \
{ \
builtin_define ("__tune_athlon__"); \
/* Only plain "athlon" lacks SSE. */ \
- if (last_cpu_char != 'n') \
+ if (last_tune_char != 'n') \
builtin_define ("__tune_athlon_sse__"); \
} \
else if (TARGET_K8) \
builtin_define ("__SSE__"); \
if (TARGET_SSE2) \
builtin_define ("__SSE2__"); \
+ if (TARGET_PNI) \
+ builtin_define ("__PNI__"); \
if (TARGET_SSE_MATH && TARGET_SSE) \
builtin_define ("__SSE_MATH__"); \
if (TARGET_SSE_MATH && TARGET_SSE2) \
the rounding precision is indeterminate, since either may be chosen
apparently at random. */
#define TARGET_FLT_EVAL_METHOD \
- (TARGET_MIX_SSE_I387 ? -1 : TARGET_SSE_MATH ? 1 : 2)
+ (TARGET_MIX_SSE_I387 ? -1 : TARGET_SSE_MATH ? 0 : 2)
#define SHORT_TYPE_SIZE 16
#define INT_TYPE_SIZE 32
&& (TARGET_64BIT || !TARGET_PARTIAL_REG_STALL)) \
|| ((MODE1) == DImode && TARGET_64BIT)) \
&& ((MODE2) == HImode || (MODE2) == SImode \
- || ((MODE1) == QImode \
+ || ((MODE2) == QImode \
&& (TARGET_64BIT || !TARGET_PARTIAL_REG_STALL)) \
|| ((MODE2) == DImode && TARGET_64BIT))))
+/* It is possible to write patterns to move flags; but until someone
+ does it, */
+#define AVOID_CCMODE_COPIES
/* Specify the modes required to caller save a given hard regno.
We do this on i386 to prevent flags from being saved at all.
#define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \
(CC_REGNO_P (REGNO) ? VOIDmode \
: (MODE) == VOIDmode && (NREGS) != 1 ? VOIDmode \
- : (MODE) == VOIDmode ? choose_hard_reg_mode ((REGNO), (NREGS)) \
+ : (MODE) == VOIDmode ? choose_hard_reg_mode ((REGNO), (NREGS), false)\
: (MODE) == HImode && !TARGET_PARTIAL_REG_STALL ? SImode \
: (MODE) == QImode && (REGNO) >= 4 && !TARGET_64BIT ? SImode \
: (MODE))
should always be returned in memory. You should instead use
`DEFAULT_PCC_STRUCT_RETURN' to indicate this. */
+int ix86_return_in_memory (tree type);
#define RETURN_IN_MEMORY(TYPE) \
ix86_return_in_memory (TYPE)
+/* This is overridden by <cygwin.h>. */
+#define MS_AGGREGATE_RETURN 0
+
\f
/* Define the classes of registers for register constraints in the
machine description. Also define ranges of constants.
|| ((CLASS) == SIREG) \
|| ((CLASS) == DIREG))
+/* Return a class of registers that cannot change FROM mode to TO mode.
+
+ x87 registers can't do subreg as all values are reformated to extended
+ precision. XMM registers does not support with nonzero offsets equal
+ to 4, 8 and 12 otherwise valid for integer registers. Since we can't
+ determine these, prohibit all nonparadoxical subregs changing size. */
+
+#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
+ (GET_MODE_SIZE (TO) < GET_MODE_SIZE (FROM) \
+ ? reg_classes_intersect_p (FLOAT_SSE_REGS, (CLASS)) \
+ || MAYBE_MMX_CLASS_P (CLASS) \
+ : GET_MODE_SIZE (FROM) != GET_MODE_SIZE (TO) \
+ ? reg_classes_intersect_p (FLOAT_REGS, (CLASS)) : 0)
+
/* A C statement that adds to CLOBBERS any hard regs the port wishes
to automatically clobber for all asms.
definition that is usually appropriate, refer to expr.h for additional
documentation. If `REG_PARM_STACK_SPACE' is defined, the argument will be
computed in the stack and then loaded into a register. */
-#define MUST_PASS_IN_STACK(MODE, TYPE) \
- ((TYPE) != 0 \
- && (TREE_CODE (TYPE_SIZE (TYPE)) != INTEGER_CST \
- || TREE_ADDRESSABLE (TYPE) \
- || ((MODE) == TImode) \
- || ((MODE) == BLKmode \
- && ! ((TYPE) != 0 \
- && TREE_CODE (TYPE_SIZE (TYPE)) == INTEGER_CST \
- && 0 == (int_size_in_bytes (TYPE) \
- % (PARM_BOUNDARY / BITS_PER_UNIT))) \
- && (FUNCTION_ARG_PADDING (MODE, TYPE) \
- == (BYTES_BIG_ENDIAN ? upward : downward)))))
+#define MUST_PASS_IN_STACK(MODE, TYPE) ix86_must_pass_in_stack ((MODE), (TYPE))
/* Value is the number of bytes of arguments automatically
popped when returning from a subroutine call.
for a call to a function whose data type is FNTYPE.
For a library call, FNTYPE is 0. */
-#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
- init_cumulative_args (&(CUM), (FNTYPE), (LIBNAME))
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL) \
+ init_cumulative_args (&(CUM), (FNTYPE), (LIBNAME), (FNDECL))
/* Update the data in CUM to advance over an argument
of mode MODE and data type TYPE.
#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) 0
+/* A C expression that indicates when an argument must be passed by
+ reference. If nonzero for an argument, a copy of that argument is
+ made in memory and a pointer to the argument is passed instead of
+ the argument itself. The pointer is passed in whatever way is
+ appropriate for passing a pointer to that type. */
+
+#define FUNCTION_ARG_PASS_BY_REFERENCE(CUM, MODE, TYPE, NAMED) \
+ function_arg_pass_by_reference(&CUM, MODE, TYPE, NAMED)
+
/* Perform any needed actions needed for a function that is receiving a
variable number of arguments.
#define EXPAND_BUILTIN_VA_ARG(VALIST, TYPE) \
ix86_va_arg ((VALIST), (TYPE))
-/* This macro is invoked at the end of compilation. It is used here to
- output code for -fpic that will load the return address into %ebx. */
-
-#undef ASM_FILE_END
-#define ASM_FILE_END(FILE) ix86_asm_file_end (FILE)
+#define TARGET_ASM_FILE_END ix86_file_end
+#define NEED_INDICATE_EXEC_STACK 0
/* Output assembler code to FILE to increment profiler label # LABELNO
for profiling a function entry. */
IX86_BUILTIN_CVTPI2PS,
IX86_BUILTIN_CVTPS2PI,
IX86_BUILTIN_CVTSI2SS,
+ IX86_BUILTIN_CVTSI642SS,
IX86_BUILTIN_CVTSS2SI,
+ IX86_BUILTIN_CVTSS2SI64,
IX86_BUILTIN_CVTTPS2PI,
IX86_BUILTIN_CVTTSS2SI,
+ IX86_BUILTIN_CVTTSS2SI64,
IX86_BUILTIN_MAXPS,
IX86_BUILTIN_MAXSS,
IX86_BUILTIN_PADDB,
IX86_BUILTIN_PADDW,
IX86_BUILTIN_PADDD,
+ IX86_BUILTIN_PADDQ,
IX86_BUILTIN_PADDSB,
IX86_BUILTIN_PADDSW,
IX86_BUILTIN_PADDUSB,
IX86_BUILTIN_PSUBB,
IX86_BUILTIN_PSUBW,
IX86_BUILTIN_PSUBD,
+ IX86_BUILTIN_PSUBQ,
IX86_BUILTIN_PSUBSB,
IX86_BUILTIN_PSUBSW,
IX86_BUILTIN_PSUBUSB,
IX86_BUILTIN_CVTPI2PD,
IX86_BUILTIN_CVTSI2SD,
+ IX86_BUILTIN_CVTSI642SD,
IX86_BUILTIN_CVTSD2SI,
+ IX86_BUILTIN_CVTSD2SI64,
IX86_BUILTIN_CVTSD2SS,
IX86_BUILTIN_CVTSS2SD,
IX86_BUILTIN_CVTTSD2SI,
+ IX86_BUILTIN_CVTTSD2SI64,
IX86_BUILTIN_CVTPS2DQ,
IX86_BUILTIN_CVTPS2PD,
IX86_BUILTIN_MFENCE,
IX86_BUILTIN_LFENCE,
+ /* Prescott New Instructions. */
+ IX86_BUILTIN_ADDSUBPS,
+ IX86_BUILTIN_HADDPS,
+ IX86_BUILTIN_HSUBPS,
+ IX86_BUILTIN_MOVSHDUP,
+ IX86_BUILTIN_MOVSLDUP,
+ IX86_BUILTIN_ADDSUBPD,
+ IX86_BUILTIN_HADDPD,
+ IX86_BUILTIN_HSUBPD,
+ IX86_BUILTIN_LOADDDUP,
+ IX86_BUILTIN_MOVDDUP,
+ IX86_BUILTIN_LDDQU,
+
+ IX86_BUILTIN_MONITOR,
+ IX86_BUILTIN_MWAIT,
+
IX86_BUILTIN_MAX
};
\f
-#define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
-#define TARGET_STRIP_NAME_ENCODING ix86_strip_name_encoding
-
-#define ASM_OUTPUT_LABELREF(FILE,NAME) \
- do { \
- const char *xname = (NAME); \
- if (xname[0] == '%') \
- xname += 2; \
- if (xname[0] == '*') \
- xname += 1; \
- else \
- fputs (user_label_prefix, FILE); \
- fputs (xname, FILE); \
- } while (0)
-\f
/* Max number of args passed in registers. If this is more than 3, we will
have problems with ebx (register #4), since it is a caller save register and
is also used as the pic register in ELF. So for now, don't allow more than
is done just by pretending it is already truncated. */
#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
-/* We assume that the store-condition-codes instructions store 0 for false
- and some other value for true. This is the value stored for true. */
-
-#define STORE_FLAG_VALUE 1
-
/* When a prototype says `char' or `short', really pass an `int'.
(The 386 can't easily push less than an int.) */
so give the MEM rtx a byte's mode. */
#define FUNCTION_MODE QImode
\f
-/* A part of a C `switch' statement that describes the relative costs
- of constant RTL expressions. It must contain `case' labels for
- expression codes `const_int', `const', `symbol_ref', `label_ref'
- and `const_double'. Each case must ultimately reach a `return'
- statement to return the relative cost of the use of that kind of
- constant value in an expression. The cost may depend on the
- precise value of the constant, which is available for examination
- in X, and the rtx code of the expression in which it is contained,
- found in OUTER_CODE.
-
- CODE is the expression code--redundant, since it can be obtained
- with `GET_CODE (X)'. */
-
-#define CONST_COSTS(RTX, CODE, OUTER_CODE) \
- case CONST_INT: \
- case CONST: \
- case LABEL_REF: \
- case SYMBOL_REF: \
- if (TARGET_64BIT && !x86_64_sign_extended_value (RTX)) \
- return 3; \
- if (TARGET_64BIT && !x86_64_zero_extended_value (RTX)) \
- return 2; \
- return flag_pic && SYMBOLIC_CONST (RTX) ? 1 : 0; \
- \
- case CONST_DOUBLE: \
- if (GET_MODE (RTX) == VOIDmode) \
- return 0; \
- switch (standard_80387_constant_p (RTX)) \
- { \
- case 1: /* 0.0 */ \
- return 1; \
- case 2: /* 1.0 */ \
- return 2; \
- default: \
- /* Start with (MEM (SYMBOL_REF)), since that's where \
- it'll probably end up. Add a penalty for size. */ \
- return (COSTS_N_INSNS (1) + (flag_pic != 0) \
- + (GET_MODE (RTX) == SFmode ? 0 \
- : GET_MODE (RTX) == DFmode ? 1 : 2)); \
- }
-
-/* Delete the definition here when TOPLEVEL_COSTS_N_INSNS gets added to cse.c */
-#define TOPLEVEL_COSTS_N_INSNS(N) \
- do { total = COSTS_N_INSNS (N); goto egress_rtx_costs; } while (0)
-
-/* Return index of given mode in mult and division cost tables. */
-#define MODE_INDEX(mode) \
- ((mode) == QImode ? 0 \
- : (mode) == HImode ? 1 \
- : (mode) == SImode ? 2 \
- : (mode) == DImode ? 3 \
- : 4)
-
-/* Like `CONST_COSTS' but applies to nonconstant RTL expressions.
- This can be used, for example, to indicate how costly a multiply
- instruction is. In writing this macro, you can use the construct
- `COSTS_N_INSNS (N)' to specify a cost equal to N fast
- instructions. OUTER_CODE is the code of the expression in which X
- is contained.
-
- This macro is optional; do not define it if the default cost
- assumptions are adequate for the target machine. */
-
-#define RTX_COSTS(X, CODE, OUTER_CODE) \
- case ZERO_EXTEND: \
- /* The zero extensions is often completely free on x86_64, so make \
- it as cheap as possible. */ \
- if (TARGET_64BIT && GET_MODE (X) == DImode \
- && GET_MODE (XEXP (X, 0)) == SImode) \
- { \
- total = 1; goto egress_rtx_costs; \
- } \
- else \
- TOPLEVEL_COSTS_N_INSNS (TARGET_ZERO_EXTEND_WITH_AND ? \
- ix86_cost->add : ix86_cost->movzx); \
- break; \
- case SIGN_EXTEND: \
- TOPLEVEL_COSTS_N_INSNS (ix86_cost->movsx); \
- break; \
- case ASHIFT: \
- if (GET_CODE (XEXP (X, 1)) == CONST_INT \
- && (GET_MODE (XEXP (X, 0)) != DImode || TARGET_64BIT)) \
- { \
- HOST_WIDE_INT value = INTVAL (XEXP (X, 1)); \
- if (value == 1) \
- TOPLEVEL_COSTS_N_INSNS (ix86_cost->add); \
- if ((value == 2 || value == 3) \
- && !TARGET_DECOMPOSE_LEA \
- && ix86_cost->lea <= ix86_cost->shift_const) \
- TOPLEVEL_COSTS_N_INSNS (ix86_cost->lea); \
- } \
- /* fall through */ \
- \
- case ROTATE: \
- case ASHIFTRT: \
- case LSHIFTRT: \
- case ROTATERT: \
- if (!TARGET_64BIT && GET_MODE (XEXP (X, 0)) == DImode) \
- { \
- if (GET_CODE (XEXP (X, 1)) == CONST_INT) \
- { \
- if (INTVAL (XEXP (X, 1)) > 32) \
- TOPLEVEL_COSTS_N_INSNS(ix86_cost->shift_const + 2); \
- else \
- TOPLEVEL_COSTS_N_INSNS(ix86_cost->shift_const * 2); \
- } \
- else \
- { \
- if (GET_CODE (XEXP (X, 1)) == AND) \
- TOPLEVEL_COSTS_N_INSNS(ix86_cost->shift_var * 2); \
- else \
- TOPLEVEL_COSTS_N_INSNS(ix86_cost->shift_var * 6 + 2); \
- } \
- } \
- else \
- { \
- if (GET_CODE (XEXP (X, 1)) == CONST_INT) \
- TOPLEVEL_COSTS_N_INSNS (ix86_cost->shift_const); \
- else \
- TOPLEVEL_COSTS_N_INSNS (ix86_cost->shift_var); \
- } \
- break; \
- \
- case MULT: \
- if (FLOAT_MODE_P (GET_MODE (X))) \
- TOPLEVEL_COSTS_N_INSNS (ix86_cost->fmul); \
- else if (GET_CODE (XEXP (X, 1)) == CONST_INT) \
- { \
- unsigned HOST_WIDE_INT value = INTVAL (XEXP (X, 1)); \
- int nbits = 0; \
- \
- while (value != 0) \
- { \
- nbits++; \
- value >>= 1; \
- } \
- \
- TOPLEVEL_COSTS_N_INSNS (ix86_cost->mult_init \
- [MODE_INDEX (GET_MODE (X))] \
- + nbits * ix86_cost->mult_bit); \
- } \
- else /* This is arbitrary */ \
- TOPLEVEL_COSTS_N_INSNS (ix86_cost->mult_init \
- [MODE_INDEX (GET_MODE (X))] \
- + 7 * ix86_cost->mult_bit); \
- \
- case DIV: \
- case UDIV: \
- case MOD: \
- case UMOD: \
- if (FLOAT_MODE_P (GET_MODE (X))) \
- TOPLEVEL_COSTS_N_INSNS (ix86_cost->fdiv); \
- else \
- TOPLEVEL_COSTS_N_INSNS (ix86_cost->divide \
- [MODE_INDEX (GET_MODE (X))]); \
- break; \
- \
- case PLUS: \
- if (FLOAT_MODE_P (GET_MODE (X))) \
- TOPLEVEL_COSTS_N_INSNS (ix86_cost->fadd); \
- else if (!TARGET_DECOMPOSE_LEA \
- && INTEGRAL_MODE_P (GET_MODE (X)) \
- && GET_MODE_BITSIZE (GET_MODE (X)) <= GET_MODE_BITSIZE (Pmode)) \
- { \
- if (GET_CODE (XEXP (X, 0)) == PLUS \
- && GET_CODE (XEXP (XEXP (X, 0), 0)) == MULT \
- && GET_CODE (XEXP (XEXP (XEXP (X, 0), 0), 1)) == CONST_INT \
- && CONSTANT_P (XEXP (X, 1))) \
- { \
- HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (X, 0), 0), 1));\
- if (val == 2 || val == 4 || val == 8) \
- { \
- return (COSTS_N_INSNS (ix86_cost->lea) \
- + rtx_cost (XEXP (XEXP (X, 0), 1), \
- (OUTER_CODE)) \
- + rtx_cost (XEXP (XEXP (XEXP (X, 0), 0), 0), \
- (OUTER_CODE)) \
- + rtx_cost (XEXP (X, 1), (OUTER_CODE))); \
- } \
- } \
- else if (GET_CODE (XEXP (X, 0)) == MULT \
- && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT) \
- { \
- HOST_WIDE_INT val = INTVAL (XEXP (XEXP (X, 0), 1)); \
- if (val == 2 || val == 4 || val == 8) \
- { \
- return (COSTS_N_INSNS (ix86_cost->lea) \
- + rtx_cost (XEXP (XEXP (X, 0), 0), \
- (OUTER_CODE)) \
- + rtx_cost (XEXP (X, 1), (OUTER_CODE))); \
- } \
- } \
- else if (GET_CODE (XEXP (X, 0)) == PLUS) \
- { \
- return (COSTS_N_INSNS (ix86_cost->lea) \
- + rtx_cost (XEXP (XEXP (X, 0), 0), (OUTER_CODE)) \
- + rtx_cost (XEXP (XEXP (X, 0), 1), (OUTER_CODE)) \
- + rtx_cost (XEXP (X, 1), (OUTER_CODE))); \
- } \
- } \
- /* fall through */ \
- \
- case MINUS: \
- if (FLOAT_MODE_P (GET_MODE (X))) \
- TOPLEVEL_COSTS_N_INSNS (ix86_cost->fadd); \
- /* fall through */ \
- \
- case AND: \
- case IOR: \
- case XOR: \
- if (!TARGET_64BIT && GET_MODE (X) == DImode) \
- return (COSTS_N_INSNS (ix86_cost->add) * 2 \
- + (rtx_cost (XEXP (X, 0), (OUTER_CODE)) \
- << (GET_MODE (XEXP (X, 0)) != DImode)) \
- + (rtx_cost (XEXP (X, 1), (OUTER_CODE)) \
- << (GET_MODE (XEXP (X, 1)) != DImode))); \
- /* fall through */ \
- \
- case NEG: \
- if (FLOAT_MODE_P (GET_MODE (X))) \
- TOPLEVEL_COSTS_N_INSNS (ix86_cost->fchs); \
- /* fall through */ \
- \
- case NOT: \
- if (!TARGET_64BIT && GET_MODE (X) == DImode) \
- TOPLEVEL_COSTS_N_INSNS (ix86_cost->add * 2); \
- TOPLEVEL_COSTS_N_INSNS (ix86_cost->add); \
- \
- case FLOAT_EXTEND: \
- if (!TARGET_SSE_MATH \
- || !VALID_SSE_REG_MODE (GET_MODE (X))) \
- TOPLEVEL_COSTS_N_INSNS (0); \
- break; \
- \
- case ABS: \
- if (FLOAT_MODE_P (GET_MODE (X))) \
- TOPLEVEL_COSTS_N_INSNS (ix86_cost->fabs); \
- break; \
- \
- case SQRT: \
- if (FLOAT_MODE_P (GET_MODE (X))) \
- TOPLEVEL_COSTS_N_INSNS (ix86_cost->fsqrt); \
- break; \
- \
- egress_rtx_costs: \
- break;
-
-
-/* An expression giving the cost of an addressing mode that contains
- ADDRESS. If not defined, the cost is computed from the ADDRESS
- expression and the `CONST_COSTS' values.
-
- For most CISC machines, the default cost is a good approximation
- of the true cost of the addressing mode. However, on RISC
- machines, all instructions normally have the same length and
- execution time. Hence all addresses will have equal costs.
-
- In cases where more than one form of an address is known, the form
- with the lowest cost will be used. If multiple forms have the
- same, lowest, cost, the one that is the most complex will be used.
-
- For example, suppose an address that is equal to the sum of a
- register and a constant is used twice in the same basic block.
- When this macro is not defined, the address will be computed in a
- register and memory references will be indirect through that
- register. On machines where the cost of the addressing mode
- containing the sum is no higher than that of a simple indirect
- reference, this will produce an additional instruction and
- possibly require an additional register. Proper specification of
- this macro eliminates this overhead for such machines.
-
- Similar use of this macro is made in strength reduction of loops.
-
- ADDRESS need not be valid as an address. In such a case, the cost
- is not relevant and can be any value; invalid addresses need not be
- assigned a different cost.
-
- On machines where an address involving more than one register is as
- cheap as an address computation involving only one register,
- defining `ADDRESS_COST' to reflect this can cause two registers to
- be live over a region of code where only one would have been if
- `ADDRESS_COST' were not defined in that manner. This effect should
- be considered in the definition of this macro. Equivalent costs
- should probably only be given to addresses with different numbers
- of registers on machines with lots of registers.
-
- This macro will normally either not be defined or be defined as a
- constant.
-
- For i386, it is better to use a complex address than let gcc copy
- the address into a reg and make a new pseudo. But not if the address
- requires to two regs - that would mean more pseudos with longer
- lifetimes. */
-
-#define ADDRESS_COST(RTX) \
- ix86_address_cost (RTX)
-
/* A C expression for the cost of moving data from a register in class FROM to
one in class TO. The classes are expressed using the enumeration values
such as `GENERAL_REGS'. A value of 2 is the default; other values are
/* #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) 0 */
-/* Define this macro to inhibit strength reduction of memory
- addresses. (On some machines, such strength reduction seems to do
- harm rather than good.) */
-
-/* #define DONT_REDUCE_ADDR */
-
/* Define this macro if it is as good or better to call a constant
function address than to call an address kept in a register.
#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE, X) \
i386_dwarf_output_addr_const ((FILE), (X))
-/* Either simplify a location expression, or return the original. */
-
-#define ASM_SIMPLIFY_DWARF_ADDR(X) \
- i386_simplify_dwarf_addr (X)
-
/* Emit a dtp-relative reference to a TLS variable. */
#ifdef HAVE_AS_TLS
{"const0_operand", {CONST_INT, CONST_DOUBLE}}, \
{"const1_operand", {CONST_INT}}, \
{"const248_operand", {CONST_INT}}, \
+ {"const_0_to_3_operand", {CONST_INT}}, \
+ {"const_0_to_7_operand", {CONST_INT}}, \
+ {"const_0_to_15_operand", {CONST_INT}}, \
+ {"const_0_to_255_operand", {CONST_INT}}, \
{"incdec_operand", {CONST_INT}}, \
{"mmx_reg_operand", {REG}}, \
{"reg_no_sp_operand", {SUBREG, REG}}, \
{"ix86_comparison_operator", {EQ, NE, LE, LT, GE, GT, LEU, LTU, GEU, \
GTU, UNORDERED, ORDERED, UNLE, UNLT, \
UNGE, UNGT, LTGT, UNEQ }}, \
+ {"ix86_carry_flag_operator", {LTU, LT, UNLT, GT, UNGT, LE, UNLE, \
+ GE, UNGE, LTGT, UNEQ}}, \
{"cmp_fp_expander_operand", {CONST_DOUBLE, SUBREG, REG, MEM}}, \
{"ext_register_operand", {SUBREG, REG}}, \
{"binary_fp_operator", {PLUS, MINUS, MULT, DIV}}, \
{"fp_register_operand", {REG}}, \
{"register_and_not_fp_reg_operand", {REG}}, \
{"zero_extended_scalar_load_operand", {MEM}}, \
+ {"vector_move_operand", {CONST_VECTOR, SUBREG, REG, MEM}}, \
+ {"no_seg_address_operand", {CONST_INT, CONST_DOUBLE, CONST, SYMBOL_REF, \
+ LABEL_REF, SUBREG, REG, MEM, PLUS, MULT}},
/* A list of predicates that do special things with modes, and so
should not elicit warnings for VOIDmode match_operand. */
PROCESSOR_max
};
-extern enum processor_type ix86_cpu;
-extern const char *ix86_cpu_string;
+extern enum processor_type ix86_tune;
+extern const char *ix86_tune_string;
extern enum processor_type ix86_arch;
extern const char *ix86_arch_string;
/* Define this macro if the port needs extra instructions inserted
for mode switching in an optimizing compilation. */
-#define OPTIMIZE_MODE_SWITCHING(ENTITY) 1
+#define OPTIMIZE_MODE_SWITCHING(ENTITY) ix86_optimize_mode_switching
/* If you define `OPTIMIZE_MODE_SWITCHING', you have to define this as
initializer for an array of integers. Each initializer element N
((SRC) < FIRST_STACK_REG || (SRC) > LAST_STACK_REG)
\f
-#define MACHINE_DEPENDENT_REORG(X) x86_machine_dependent_reorg(X)
-
#define DLL_IMPORT_EXPORT_PREFIX '#'
#define FASTCALL_PREFIX '@'
+\f
+struct machine_function GTY(())
+{
+ struct stack_local_entry *stack_locals;
+ const char *some_ld_name;
+ int save_varrargs_registers;
+ int accesses_prev_frame;
+ int optimize_mode_switching;
+ /* Set by ix86_compute_frame_layout and used by prologue/epilogue expander to
+ determine the style used. */
+ int use_fast_prologue_epilogue;
+ /* Number of saved registers USE_FAST_PROLOGUE_EPILOGUE has been computed
+ for. */
+ int use_fast_prologue_epilogue_nregs;
+};
+
+#define ix86_stack_locals (cfun->machine->stack_locals)
+#define ix86_save_varrargs_registers (cfun->machine->save_varrargs_registers)
+#define ix86_optimize_mode_switching (cfun->machine->optimize_mode_switching)
+
+/* Control behavior of x86_file_start. */
+#define X86_FILE_START_VERSION_DIRECTIVE false
+#define X86_FILE_START_FLTUSED false
/*
Local variables: