/* Subroutines used for code generation on IA-32.
Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
- 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of GCC.
: 4)
/* Processor costs (relative to an add) */
+/* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
+#define COSTS_N_BYTES(N) ((N) * 2)
+
static const
struct processor_costs size_cost = { /* costs for tunning for size */
- 2, /* cost of an add instruction */
- 3, /* cost of a lea instruction */
- 2, /* variable shift costs */
- 3, /* constant shift costs */
- {3, 3, 3, 3, 5}, /* cost of starting a multiply */
+ COSTS_N_BYTES (2), /* cost of an add instruction */
+ COSTS_N_BYTES (3), /* cost of a lea instruction */
+ COSTS_N_BYTES (2), /* variable shift costs */
+ COSTS_N_BYTES (3), /* constant shift costs */
+ {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
+ COSTS_N_BYTES (3), /* HI */
+ COSTS_N_BYTES (3), /* SI */
+ COSTS_N_BYTES (3), /* DI */
+ COSTS_N_BYTES (5)}, /* other */
0, /* cost of multiply per each bit set */
- {3, 3, 3, 3, 5}, /* cost of a divide/mod */
- 3, /* cost of movsx */
- 3, /* cost of movzx */
+ {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
+ COSTS_N_BYTES (3), /* HI */
+ COSTS_N_BYTES (3), /* SI */
+ COSTS_N_BYTES (3), /* DI */
+ COSTS_N_BYTES (5)}, /* other */
+ COSTS_N_BYTES (3), /* cost of movsx */
+ COSTS_N_BYTES (3), /* cost of movzx */
0, /* "large" insn */
2, /* MOVE_RATIO */
2, /* cost for loading QImode using movzbl */
3, /* MMX or SSE register to integer */
0, /* size of prefetch block */
0, /* number of parallel prefetches */
- 1, /* Branch cost */
- 2, /* cost of FADD and FSUB insns. */
- 2, /* cost of FMUL instruction. */
- 2, /* cost of FDIV instruction. */
- 2, /* cost of FABS instruction. */
- 2, /* cost of FCHS instruction. */
- 2, /* cost of FSQRT instruction. */
+ 2, /* Branch cost */
+ COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
+ COSTS_N_BYTES (2), /* cost of FMUL instruction. */
+ COSTS_N_BYTES (2), /* cost of FDIV instruction. */
+ COSTS_N_BYTES (2), /* cost of FABS instruction. */
+ COSTS_N_BYTES (2), /* cost of FCHS instruction. */
+ COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
};
/* Processor costs (relative to an add) */
static const
struct processor_costs i386_cost = { /* 386 specific costs */
- 1, /* cost of an add instruction */
- 1, /* cost of a lea instruction */
- 3, /* variable shift costs */
- 2, /* constant shift costs */
- {6, 6, 6, 6, 6}, /* cost of starting a multiply */
- 1, /* cost of multiply per each bit set */
- {23, 23, 23, 23, 23}, /* cost of a divide/mod */
- 3, /* cost of movsx */
- 2, /* cost of movzx */
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (1), /* cost of a lea instruction */
+ COSTS_N_INSNS (3), /* variable shift costs */
+ COSTS_N_INSNS (2), /* constant shift costs */
+ {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (6), /* HI */
+ COSTS_N_INSNS (6), /* SI */
+ COSTS_N_INSNS (6), /* DI */
+ COSTS_N_INSNS (6)}, /* other */
+ COSTS_N_INSNS (1), /* cost of multiply per each bit set */
+ {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (23), /* HI */
+ COSTS_N_INSNS (23), /* SI */
+ COSTS_N_INSNS (23), /* DI */
+ COSTS_N_INSNS (23)}, /* other */
+ COSTS_N_INSNS (3), /* cost of movsx */
+ COSTS_N_INSNS (2), /* cost of movzx */
15, /* "large" insn */
3, /* MOVE_RATIO */
4, /* cost for loading QImode using movzbl */
0, /* size of prefetch block */
0, /* number of parallel prefetches */
1, /* Branch cost */
- 23, /* cost of FADD and FSUB insns. */
- 27, /* cost of FMUL instruction. */
- 88, /* cost of FDIV instruction. */
- 22, /* cost of FABS instruction. */
- 24, /* cost of FCHS instruction. */
- 122, /* cost of FSQRT instruction. */
+ COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (27), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (88), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (22), /* cost of FABS instruction. */
+ COSTS_N_INSNS (24), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
};
static const
struct processor_costs i486_cost = { /* 486 specific costs */
- 1, /* cost of an add instruction */
- 1, /* cost of a lea instruction */
- 3, /* variable shift costs */
- 2, /* constant shift costs */
- {12, 12, 12, 12, 12}, /* cost of starting a multiply */
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (1), /* cost of a lea instruction */
+ COSTS_N_INSNS (3), /* variable shift costs */
+ COSTS_N_INSNS (2), /* constant shift costs */
+ {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (12), /* HI */
+ COSTS_N_INSNS (12), /* SI */
+ COSTS_N_INSNS (12), /* DI */
+ COSTS_N_INSNS (12)}, /* other */
1, /* cost of multiply per each bit set */
- {40, 40, 40, 40, 40}, /* cost of a divide/mod */
- 3, /* cost of movsx */
- 2, /* cost of movzx */
+ {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (40), /* HI */
+ COSTS_N_INSNS (40), /* SI */
+ COSTS_N_INSNS (40), /* DI */
+ COSTS_N_INSNS (40)}, /* other */
+ COSTS_N_INSNS (3), /* cost of movsx */
+ COSTS_N_INSNS (2), /* cost of movzx */
15, /* "large" insn */
3, /* MOVE_RATIO */
4, /* cost for loading QImode using movzbl */
0, /* size of prefetch block */
0, /* number of parallel prefetches */
1, /* Branch cost */
- 8, /* cost of FADD and FSUB insns. */
- 16, /* cost of FMUL instruction. */
- 73, /* cost of FDIV instruction. */
- 3, /* cost of FABS instruction. */
- 3, /* cost of FCHS instruction. */
- 83, /* cost of FSQRT instruction. */
+ COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (16), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (73), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (3), /* cost of FABS instruction. */
+ COSTS_N_INSNS (3), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
};
static const
struct processor_costs pentium_cost = {
- 1, /* cost of an add instruction */
- 1, /* cost of a lea instruction */
- 4, /* variable shift costs */
- 1, /* constant shift costs */
- {11, 11, 11, 11, 11}, /* cost of starting a multiply */
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (1), /* cost of a lea instruction */
+ COSTS_N_INSNS (4), /* variable shift costs */
+ COSTS_N_INSNS (1), /* constant shift costs */
+ {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (11), /* HI */
+ COSTS_N_INSNS (11), /* SI */
+ COSTS_N_INSNS (11), /* DI */
+ COSTS_N_INSNS (11)}, /* other */
0, /* cost of multiply per each bit set */
- {25, 25, 25, 25, 25}, /* cost of a divide/mod */
- 3, /* cost of movsx */
- 2, /* cost of movzx */
+ {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (25), /* HI */
+ COSTS_N_INSNS (25), /* SI */
+ COSTS_N_INSNS (25), /* DI */
+ COSTS_N_INSNS (25)}, /* other */
+ COSTS_N_INSNS (3), /* cost of movsx */
+ COSTS_N_INSNS (2), /* cost of movzx */
8, /* "large" insn */
6, /* MOVE_RATIO */
6, /* cost for loading QImode using movzbl */
0, /* size of prefetch block */
0, /* number of parallel prefetches */
2, /* Branch cost */
- 3, /* cost of FADD and FSUB insns. */
- 3, /* cost of FMUL instruction. */
- 39, /* cost of FDIV instruction. */
- 1, /* cost of FABS instruction. */
- 1, /* cost of FCHS instruction. */
- 70, /* cost of FSQRT instruction. */
+ COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (3), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (39), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (1), /* cost of FABS instruction. */
+ COSTS_N_INSNS (1), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
};
static const
struct processor_costs pentiumpro_cost = {
- 1, /* cost of an add instruction */
- 1, /* cost of a lea instruction */
- 1, /* variable shift costs */
- 1, /* constant shift costs */
- {4, 4, 4, 4, 4}, /* cost of starting a multiply */
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (1), /* cost of a lea instruction */
+ COSTS_N_INSNS (1), /* variable shift costs */
+ COSTS_N_INSNS (1), /* constant shift costs */
+ {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (4), /* HI */
+ COSTS_N_INSNS (4), /* SI */
+ COSTS_N_INSNS (4), /* DI */
+ COSTS_N_INSNS (4)}, /* other */
0, /* cost of multiply per each bit set */
- {17, 17, 17, 17, 17}, /* cost of a divide/mod */
- 1, /* cost of movsx */
- 1, /* cost of movzx */
+ {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (17), /* HI */
+ COSTS_N_INSNS (17), /* SI */
+ COSTS_N_INSNS (17), /* DI */
+ COSTS_N_INSNS (17)}, /* other */
+ COSTS_N_INSNS (1), /* cost of movsx */
+ COSTS_N_INSNS (1), /* cost of movzx */
8, /* "large" insn */
6, /* MOVE_RATIO */
2, /* cost for loading QImode using movzbl */
32, /* size of prefetch block */
6, /* number of parallel prefetches */
2, /* Branch cost */
- 3, /* cost of FADD and FSUB insns. */
- 5, /* cost of FMUL instruction. */
- 56, /* cost of FDIV instruction. */
- 2, /* cost of FABS instruction. */
- 2, /* cost of FCHS instruction. */
- 56, /* cost of FSQRT instruction. */
+ COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (5), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (56), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (2), /* cost of FABS instruction. */
+ COSTS_N_INSNS (2), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
};
static const
struct processor_costs k6_cost = {
- 1, /* cost of an add instruction */
- 2, /* cost of a lea instruction */
- 1, /* variable shift costs */
- 1, /* constant shift costs */
- {3, 3, 3, 3, 3}, /* cost of starting a multiply */
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (2), /* cost of a lea instruction */
+ COSTS_N_INSNS (1), /* variable shift costs */
+ COSTS_N_INSNS (1), /* constant shift costs */
+ {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (3), /* HI */
+ COSTS_N_INSNS (3), /* SI */
+ COSTS_N_INSNS (3), /* DI */
+ COSTS_N_INSNS (3)}, /* other */
0, /* cost of multiply per each bit set */
- {18, 18, 18, 18, 18}, /* cost of a divide/mod */
- 2, /* cost of movsx */
- 2, /* cost of movzx */
+ {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (18), /* HI */
+ COSTS_N_INSNS (18), /* SI */
+ COSTS_N_INSNS (18), /* DI */
+ COSTS_N_INSNS (18)}, /* other */
+ COSTS_N_INSNS (2), /* cost of movsx */
+ COSTS_N_INSNS (2), /* cost of movzx */
8, /* "large" insn */
4, /* MOVE_RATIO */
3, /* cost for loading QImode using movzbl */
32, /* size of prefetch block */
1, /* number of parallel prefetches */
1, /* Branch cost */
- 2, /* cost of FADD and FSUB insns. */
- 2, /* cost of FMUL instruction. */
- 56, /* cost of FDIV instruction. */
- 2, /* cost of FABS instruction. */
- 2, /* cost of FCHS instruction. */
- 56, /* cost of FSQRT instruction. */
+ COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (2), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (56), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (2), /* cost of FABS instruction. */
+ COSTS_N_INSNS (2), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
};
static const
struct processor_costs athlon_cost = {
- 1, /* cost of an add instruction */
- 2, /* cost of a lea instruction */
- 1, /* variable shift costs */
- 1, /* constant shift costs */
- {5, 5, 5, 5, 5}, /* cost of starting a multiply */
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (2), /* cost of a lea instruction */
+ COSTS_N_INSNS (1), /* variable shift costs */
+ COSTS_N_INSNS (1), /* constant shift costs */
+ {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (5), /* HI */
+ COSTS_N_INSNS (5), /* SI */
+ COSTS_N_INSNS (5), /* DI */
+ COSTS_N_INSNS (5)}, /* other */
0, /* cost of multiply per each bit set */
- {18, 26, 42, 74, 74}, /* cost of a divide/mod */
- 1, /* cost of movsx */
- 1, /* cost of movzx */
+ {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (26), /* HI */
+ COSTS_N_INSNS (42), /* SI */
+ COSTS_N_INSNS (74), /* DI */
+ COSTS_N_INSNS (74)}, /* other */
+ COSTS_N_INSNS (1), /* cost of movsx */
+ COSTS_N_INSNS (1), /* cost of movzx */
8, /* "large" insn */
9, /* MOVE_RATIO */
4, /* cost for loading QImode using movzbl */
64, /* size of prefetch block */
6, /* number of parallel prefetches */
5, /* Branch cost */
- 4, /* cost of FADD and FSUB insns. */
- 4, /* cost of FMUL instruction. */
- 24, /* cost of FDIV instruction. */
- 2, /* cost of FABS instruction. */
- 2, /* cost of FCHS instruction. */
- 35, /* cost of FSQRT instruction. */
+ COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (4), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (24), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (2), /* cost of FABS instruction. */
+ COSTS_N_INSNS (2), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
};
static const
struct processor_costs k8_cost = {
- 1, /* cost of an add instruction */
- 2, /* cost of a lea instruction */
- 1, /* variable shift costs */
- 1, /* constant shift costs */
- {3, 4, 3, 4, 5}, /* cost of starting a multiply */
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (2), /* cost of a lea instruction */
+ COSTS_N_INSNS (1), /* variable shift costs */
+ COSTS_N_INSNS (1), /* constant shift costs */
+ {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (4), /* HI */
+ COSTS_N_INSNS (3), /* SI */
+ COSTS_N_INSNS (4), /* DI */
+ COSTS_N_INSNS (5)}, /* other */
0, /* cost of multiply per each bit set */
- {18, 26, 42, 74, 74}, /* cost of a divide/mod */
- 1, /* cost of movsx */
- 1, /* cost of movzx */
+ {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (26), /* HI */
+ COSTS_N_INSNS (42), /* SI */
+ COSTS_N_INSNS (74), /* DI */
+ COSTS_N_INSNS (74)}, /* other */
+ COSTS_N_INSNS (1), /* cost of movsx */
+ COSTS_N_INSNS (1), /* cost of movzx */
8, /* "large" insn */
9, /* MOVE_RATIO */
4, /* cost for loading QImode using movzbl */
64, /* size of prefetch block */
6, /* number of parallel prefetches */
5, /* Branch cost */
- 4, /* cost of FADD and FSUB insns. */
- 4, /* cost of FMUL instruction. */
- 19, /* cost of FDIV instruction. */
- 2, /* cost of FABS instruction. */
- 2, /* cost of FCHS instruction. */
- 35, /* cost of FSQRT instruction. */
+ COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (4), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (19), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (2), /* cost of FABS instruction. */
+ COSTS_N_INSNS (2), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
};
static const
struct processor_costs pentium4_cost = {
- 1, /* cost of an add instruction */
- 3, /* cost of a lea instruction */
- 4, /* variable shift costs */
- 4, /* constant shift costs */
- {15, 15, 15, 15, 15}, /* cost of starting a multiply */
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (3), /* cost of a lea instruction */
+ COSTS_N_INSNS (4), /* variable shift costs */
+ COSTS_N_INSNS (4), /* constant shift costs */
+ {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (15), /* HI */
+ COSTS_N_INSNS (15), /* SI */
+ COSTS_N_INSNS (15), /* DI */
+ COSTS_N_INSNS (15)}, /* other */
0, /* cost of multiply per each bit set */
- {56, 56, 56, 56, 56}, /* cost of a divide/mod */
- 1, /* cost of movsx */
- 1, /* cost of movzx */
+ {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (56), /* HI */
+ COSTS_N_INSNS (56), /* SI */
+ COSTS_N_INSNS (56), /* DI */
+ COSTS_N_INSNS (56)}, /* other */
+ COSTS_N_INSNS (1), /* cost of movsx */
+ COSTS_N_INSNS (1), /* cost of movzx */
16, /* "large" insn */
6, /* MOVE_RATIO */
2, /* cost for loading QImode using movzbl */
64, /* size of prefetch block */
6, /* number of parallel prefetches */
2, /* Branch cost */
- 5, /* cost of FADD and FSUB insns. */
- 7, /* cost of FMUL instruction. */
- 43, /* cost of FDIV instruction. */
- 2, /* cost of FABS instruction. */
- 2, /* cost of FCHS instruction. */
- 43, /* cost of FSQRT instruction. */
+ COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (7), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (43), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (2), /* cost of FABS instruction. */
+ COSTS_N_INSNS (2), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
};
static const
struct processor_costs nocona_cost = {
- 1, /* cost of an add instruction */
- 1, /* cost of a lea instruction */
- 1, /* variable shift costs */
- 1, /* constant shift costs */
- {10, 10, 10, 10, 10}, /* cost of starting a multiply */
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (1), /* cost of a lea instruction */
+ COSTS_N_INSNS (1), /* variable shift costs */
+ COSTS_N_INSNS (1), /* constant shift costs */
+ {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (10), /* HI */
+ COSTS_N_INSNS (10), /* SI */
+ COSTS_N_INSNS (10), /* DI */
+ COSTS_N_INSNS (10)}, /* other */
0, /* cost of multiply per each bit set */
- {66, 66, 66, 66, 66}, /* cost of a divide/mod */
- 1, /* cost of movsx */
- 1, /* cost of movzx */
+ {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (66), /* HI */
+ COSTS_N_INSNS (66), /* SI */
+ COSTS_N_INSNS (66), /* DI */
+ COSTS_N_INSNS (66)}, /* other */
+ COSTS_N_INSNS (1), /* cost of movsx */
+ COSTS_N_INSNS (1), /* cost of movzx */
16, /* "large" insn */
17, /* MOVE_RATIO */
4, /* cost for loading QImode using movzbl */
128, /* size of prefetch block */
8, /* number of parallel prefetches */
1, /* Branch cost */
- 6, /* cost of FADD and FSUB insns. */
- 8, /* cost of FMUL instruction. */
- 40, /* cost of FDIV instruction. */
- 3, /* cost of FABS instruction. */
- 3, /* cost of FCHS instruction. */
- 44, /* cost of FSQRT instruction. */
+ COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (8), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (40), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (3), /* cost of FABS instruction. */
+ COSTS_N_INSNS (3), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
+};
+
+/* Generic64 should produce code tuned for Nocona and K8. */
+static const
+struct processor_costs generic64_cost = {
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ /* On all chips taken into consideration lea is 2 cycles and more. With
+ this cost however our current implementation of synth_mult results in
+ use of unnecesary temporary registers causing regression on several
+ SPECfp benchmarks. */
+ COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
+ COSTS_N_INSNS (1), /* variable shift costs */
+ COSTS_N_INSNS (1), /* constant shift costs */
+ {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (4), /* HI */
+ COSTS_N_INSNS (3), /* SI */
+ COSTS_N_INSNS (4), /* DI */
+ COSTS_N_INSNS (2)}, /* other */
+ 0, /* cost of multiply per each bit set */
+ {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (26), /* HI */
+ COSTS_N_INSNS (42), /* SI */
+ COSTS_N_INSNS (74), /* DI */
+ COSTS_N_INSNS (74)}, /* other */
+ COSTS_N_INSNS (1), /* cost of movsx */
+ COSTS_N_INSNS (1), /* cost of movzx */
+ 8, /* "large" insn */
+ 17, /* MOVE_RATIO */
+ 4, /* cost for loading QImode using movzbl */
+ {4, 4, 4}, /* cost of loading integer registers
+ in QImode, HImode and SImode.
+ Relative to reg-reg move (2). */
+ {4, 4, 4}, /* cost of storing integer registers */
+ 4, /* cost of reg,reg fld/fst */
+ {12, 12, 12}, /* cost of loading fp registers
+ in SFmode, DFmode and XFmode */
+ {6, 6, 8}, /* cost of loading integer registers */
+ 2, /* cost of moving MMX register */
+ {8, 8}, /* cost of loading MMX registers
+ in SImode and DImode */
+ {8, 8}, /* cost of storing MMX registers
+ in SImode and DImode */
+ 2, /* cost of moving SSE register */
+ {8, 8, 8}, /* cost of loading SSE registers
+ in SImode, DImode and TImode */
+ {8, 8, 8}, /* cost of storing SSE registers
+ in SImode, DImode and TImode */
+ 5, /* MMX or SSE register to integer */
+ 64, /* size of prefetch block */
+ 6, /* number of parallel prefetches */
+ /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
+ is increased to perhaps more appropriate value of 5. */
+ 3, /* Branch cost */
+ COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (8), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (20), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (8), /* cost of FABS instruction. */
+ COSTS_N_INSNS (8), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
+};
+
+/* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
+static const
+struct processor_costs generic32_cost = {
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
+ COSTS_N_INSNS (1), /* variable shift costs */
+ COSTS_N_INSNS (1), /* constant shift costs */
+ {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (4), /* HI */
+ COSTS_N_INSNS (3), /* SI */
+ COSTS_N_INSNS (4), /* DI */
+ COSTS_N_INSNS (2)}, /* other */
+ 0, /* cost of multiply per each bit set */
+ {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (26), /* HI */
+ COSTS_N_INSNS (42), /* SI */
+ COSTS_N_INSNS (74), /* DI */
+ COSTS_N_INSNS (74)}, /* other */
+ COSTS_N_INSNS (1), /* cost of movsx */
+ COSTS_N_INSNS (1), /* cost of movzx */
+ 8, /* "large" insn */
+ 17, /* MOVE_RATIO */
+ 4, /* cost for loading QImode using movzbl */
+ {4, 4, 4}, /* cost of loading integer registers
+ in QImode, HImode and SImode.
+ Relative to reg-reg move (2). */
+ {4, 4, 4}, /* cost of storing integer registers */
+ 4, /* cost of reg,reg fld/fst */
+ {12, 12, 12}, /* cost of loading fp registers
+ in SFmode, DFmode and XFmode */
+ {6, 6, 8}, /* cost of loading integer registers */
+ 2, /* cost of moving MMX register */
+ {8, 8}, /* cost of loading MMX registers
+ in SImode and DImode */
+ {8, 8}, /* cost of storing MMX registers
+ in SImode and DImode */
+ 2, /* cost of moving SSE register */
+ {8, 8, 8}, /* cost of loading SSE registers
+ in SImode, DImode and TImode */
+ {8, 8, 8}, /* cost of storing SSE registers
+ in SImode, DImode and TImode */
+ 5, /* MMX or SSE register to integer */
+ 64, /* size of prefetch block */
+ 6, /* number of parallel prefetches */
+ 3, /* Branch cost */
+ COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (8), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (20), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (8), /* cost of FABS instruction. */
+ COSTS_N_INSNS (8), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
};
const struct processor_costs *ix86_cost = &pentium_cost;
#define m_K8 (1<<PROCESSOR_K8)
#define m_ATHLON_K8 (m_K8 | m_ATHLON)
#define m_NOCONA (1<<PROCESSOR_NOCONA)
-
-const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8;
-const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
+#define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
+#define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
+#define m_GENERIC (m_GENERIC32 | m_GENERIC64)
+
+/* Generic instruction choice should be common subset of supported CPUs
+ (PPro/PENT4/NOCONA/Athlon/K8). */
+
+/* Leave is not affecting Nocona SPEC2000 results negatively, so enabling for
+ Generic64 seems like good code size tradeoff. We can't enable it for 32bit
+ generic because it is not working well with PPro base chips. */
+const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8 | m_GENERIC64;
+const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
const int x86_zero_extend_with_and = m_486 | m_PENT;
-const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA /* m_386 | m_K6 */;
+const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_GENERIC /* m_386 | m_K6 */;
const int x86_double_with_add = ~m_386;
const int x86_use_bit_test = m_386;
-const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6;
-const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
+const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6 | m_GENERIC;
+const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
const int x86_fisttp = m_NOCONA;
const int x86_3dnow_a = m_ATHLON_K8;
-const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
+const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
/* Branch hints were put in P4 based on simulation result. But
after P4 was made, no performance benefit was observed with
branch hints. It also increases the code size. As the result,
icc never generates branch hints. */
const int x86_branch_hints = 0;
-const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA;
+const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA | m_GENERIC32; /*m_GENERIC | m_ATHLON_K8 ? */
+/* We probably ought to watch for partial register stalls on Generic32
+ compilation setting as well. However in current implementation the
+ partial register stalls are not eliminated very well - they can
+ be introduced via subregs synthesized by combine and can happen
+ in caller/callee saving sequences.
+ Because this option pays back little on PPro based chips and is in conflict
+ with partial reg. dependencies used by Athlon/P4 based chips, it is better
+ to leave it off for generic32 for now. */
const int x86_partial_reg_stall = m_PPRO;
const int x86_use_himode_fiop = m_386 | m_486 | m_K6;
-const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT);
+const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT | m_GENERIC);
const int x86_use_mov0 = m_K6;
-const int x86_use_cltd = ~(m_PENT | m_K6);
+const int x86_use_cltd = ~(m_PENT | m_K6 | m_GENERIC);
const int x86_read_modify_write = ~m_PENT;
const int x86_read_modify = ~(m_PENT | m_PPRO);
const int x86_split_long_moves = m_PPRO;
-const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8;
+const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8 | m_GENERIC; /* m_PENT4 ? */
const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
const int x86_qimode_math = ~(0);
const int x86_promote_qi_regs = 0;
+/* On PPro this flag is meant to avoid partial register stalls. Just like
+ the x86_partial_reg_stall this option might be considered for Generic32
+ if our scheme for avoiding partial stalls was more effective. */
const int x86_himode_math = ~(m_PPRO);
const int x86_promote_hi_regs = m_PPRO;
-const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA;
-const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA;
-const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA;
-const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA;
-const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO);
-const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
-const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
-const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO;
-const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO;
-const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO;
-const int x86_decompose_lea = m_PENT4 | m_NOCONA;
+const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_GENERIC;
+const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA | m_GENERIC;
+const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA | m_GENERIC;
+const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA | m_GENERIC;
+const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC);
+const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
+const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
+const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC;
+const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO | m_GENERIC;
+const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO | m_GENERIC;
const int x86_shift1 = ~m_486;
-const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
-const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO;
+const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
+/* In Generic model we have an conflict here in between PPro/Pentium4 based chips
+ that thread 128bit SSE registers as single units versus K8 based chips that
+ divide SSE registers to two 64bit halves.
+ x86_sse_partial_reg_dependency promote all store destinations to be 128bit
+ to allow register renaming on 128bit SSE units, but usually results in one
+ extra microop on 64bit SSE units. Experimental results shows that disabling
+ this option on P4 brings over 20% SPECfp regression, while enabling it on
+ K8 brings roughly 2.4% regression that can be partly masked by careful scheduling
+ of moves. */
+const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC;
/* Set for machines where the type and dependencies are resolved on SSE
register parts instead of whole registers, so we may maintain just
lower part of scalar values in proper format leaving the upper part
const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
const int x86_use_ffreep = m_ATHLON_K8;
const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
+const int x86_use_incdec = ~(m_PENT4 | m_NOCONA | m_GENERIC);
/* ??? Allowing interunit moves makes it all too easy for the compiler to put
integer data in xmm registers. Which results in pretty abysmal code. */
const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
-const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO;
+const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC32;
/* Some CPU cores are not able to predict more than 4 branch instructions in
the 16 byte window. */
-const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
-const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT;
+const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
+const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT | m_GENERIC;
const int x86_use_bt = m_ATHLON_K8;
/* Compare and exchange was added for 80486. */
const int x86_cmpxchg = ~m_386;
+/* Compare and exchange 8 bytes was added for pentium. */
+const int x86_cmpxchg8b = ~(m_386 | m_486);
+/* Compare and exchange 16 bytes was added for nocona. */
+const int x86_cmpxchg16b = m_NOCONA;
/* Exchange and add was added for 80486. */
const int x86_xadd = ~m_386;
+const int x86_pad_returns = m_ATHLON_K8 | m_GENERIC;
/* In case the average insn count for single function invocation is
lower than this constant, emit fast (but longer) prologue and
enum cmodel ix86_cmodel;
/* Asm dialect. */
enum asm_dialect ix86_asm_dialect = ASM_ATT;
-/* TLS dialext. */
+/* TLS dialects. */
enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
/* Which unit we are generating floating point math for. */
/* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
char internal_label_prefix[16];
int internal_label_prefix_len;
+
+/* Table for BUILT_IN_NORMAL to BUILT_IN_MD mapping. */
+static GTY(()) tree ix86_builtin_function_variants[(int) END_BUILTINS];
\f
static bool ix86_handle_option (size_t, const char *, int);
static void output_pic_addr_const (FILE *, rtx, int);
static int ix86_adjust_cost (rtx, rtx, rtx, int);
static int ia32_multipass_dfa_lookahead (void);
static void ix86_init_mmx_sse_builtins (void);
+static void ix86_init_sse_abi_builtins (void);
static rtx x86_this_parameter (tree);
static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
HOST_WIDE_INT, tree);
static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
tree, int *, int);
static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
+static bool ix86_scalar_mode_supported_p (enum machine_mode);
static bool ix86_vector_mode_supported_p (enum machine_mode);
static int ix86_address_cost (rtx);
tree, bool);
static void ix86_init_builtins (void);
static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
+static rtx ix86_expand_library_builtin (tree, rtx, rtx, enum machine_mode, int);
static const char *ix86_mangle_fundamental_type (tree);
static tree ix86_stack_protect_fail (void);
+static rtx ix86_internal_arg_pointer (void);
+static void ix86_dwarf_handle_frame_unspec (const char *, rtx, int);
/* This function is only used on Solaris. */
static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
static bool ix86_in_large_data_p (tree) ATTRIBUTE_UNUSED;
static void ix86_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
static void x86_64_elf_unique_section (tree decl, int reloc) ATTRIBUTE_UNUSED;
-static void x86_64_elf_select_section (tree decl, int reloc,
- unsigned HOST_WIDE_INT align)
- ATTRIBUTE_UNUSED;
+static section *x86_64_elf_select_section (tree decl, int reloc,
+ unsigned HOST_WIDE_INT align)
+ ATTRIBUTE_UNUSED;
\f
/* Initialize the GCC target structure. */
#undef TARGET_ATTRIBUTE_TABLE
#define TARGET_INIT_BUILTINS ix86_init_builtins
#undef TARGET_EXPAND_BUILTIN
#define TARGET_EXPAND_BUILTIN ix86_expand_builtin
+#undef TARGET_EXPAND_LIBRARY_BUILTIN
+#define TARGET_EXPAND_LIBRARY_BUILTIN ix86_expand_library_builtin
#undef TARGET_ASM_FUNCTION_EPILOGUE
#define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
#endif
#undef TARGET_CANNOT_FORCE_CONST_MEM
#define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_rtx_true
#undef TARGET_DELEGITIMIZE_ADDRESS
#define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
#define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
#undef TARGET_PASS_BY_REFERENCE
#define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
+#undef TARGET_INTERNAL_ARG_POINTER
+#define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
+#undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
+#define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
#define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
+
#undef TARGET_VECTOR_MODE_SUPPORTED_P
#define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
{&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
{&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
{&k8_cost, 0, 0, 16, 7, 16, 7, 16},
- {&nocona_cost, 0, 0, 0, 0, 0, 0, 0}
+ {&nocona_cost, 0, 0, 0, 0, 0, 0, 0},
+ {&generic32_cost, 0, 0, 16, 7, 16, 7, 16},
+ {&generic64_cost, 0, 0, 16, 7, 16, 7, 16}
};
static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
| PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
{"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
| PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
+ {"generic32", PROCESSOR_GENERIC32, 0 /* flags are only used for -march switch. */ },
+ {"generic64", PROCESSOR_GENERIC64, PTA_64BIT /* flags are only used for -march switch. */ },
};
int const pta_size = ARRAY_SIZE (processor_alias_table);
flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
}
- if (!ix86_tune_string && ix86_arch_string)
- ix86_tune_string = ix86_arch_string;
- if (!ix86_tune_string)
+ /* Need to check -mtune=generic first. */
+ if (ix86_tune_string)
+ {
+ if (!strcmp (ix86_tune_string, "generic")
+ || !strcmp (ix86_tune_string, "i686"))
+ {
+ if (TARGET_64BIT)
+ ix86_tune_string = "generic64";
+ else
+ ix86_tune_string = "generic32";
+ }
+ else if (!strncmp (ix86_tune_string, "generic", 7))
+ error ("bad value (%s) for -mtune= switch", ix86_tune_string);
+ }
+ else
{
- ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
- ix86_tune_defaulted = 1;
+ if (ix86_arch_string)
+ ix86_tune_string = ix86_arch_string;
+ if (!ix86_tune_string)
+ {
+ ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
+ ix86_tune_defaulted = 1;
+ }
+
+ /* ix86_tune_string is set to ix86_arch_string or defaulted. We
+ need to use a sensible tune option. */
+ if (!strcmp (ix86_tune_string, "generic")
+ || !strcmp (ix86_tune_string, "x86-64")
+ || !strcmp (ix86_tune_string, "i686"))
+ {
+ if (TARGET_64BIT)
+ ix86_tune_string = "generic64";
+ else
+ ix86_tune_string = "generic32";
+ }
}
+ if (!strcmp (ix86_tune_string, "x86-64"))
+ warning (OPT_Wdeprecated, "-mtune=x86-64 is deprecated. Use -mtune=k8 or "
+ "-mtune=generic instead as appropriate.");
+
if (!ix86_arch_string)
ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
+ if (!strcmp (ix86_arch_string, "generic"))
+ error ("generic CPU can be used only for -mtune= switch");
+ if (!strncmp (ix86_arch_string, "generic", 7))
+ error ("bad value (%s) for -march= switch", ix86_arch_string);
if (ix86_cmodel_string != 0)
{
}
if (ix86_asm_string != 0)
{
- if (!strcmp (ix86_asm_string, "intel"))
+ if (! TARGET_MACHO
+ && !strcmp (ix86_asm_string, "intel"))
ix86_asm_dialect = ASM_INTEL;
else if (!strcmp (ix86_asm_string, "att"))
ix86_asm_dialect = ASM_ATT;
The default of 128 bits is for Pentium III's SSE __m128, but we
don't want additional code to keep the stack aligned when
optimizing for code size. */
- ix86_preferred_stack_boundary = (optimize_size
- ? TARGET_64BIT ? 128 : 32
- : 128);
+ ix86_preferred_stack_boundary = ((TARGET_64BIT || TARGET_MACHO || !optimize_size)
+ ? 128 : 32);
if (ix86_preferred_stack_boundary_string)
{
i = atoi (ix86_preferred_stack_boundary_string);
}
/* Validate -mbranch-cost= value, or provide default. */
- ix86_branch_cost = processor_target_table[ix86_tune].cost->branch_cost;
+ ix86_branch_cost = ix86_cost->branch_cost;
if (ix86_branch_cost_string)
{
i = atoi (ix86_branch_cost_string);
{
if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
ix86_tls_dialect = TLS_DIALECT_GNU;
+ else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
+ ix86_tls_dialect = TLS_DIALECT_GNU2;
else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
ix86_tls_dialect = TLS_DIALECT_SUN;
else
&& ! TARGET_SSE)
error ("-msseregparm used without SSE enabled");
+ /* Accept -msselibm only if at least SSE support is enabled. */
+ if (TARGET_SSELIBM
+ && ! TARGET_SSE2)
+ error ("-msselibm used without SSE2 enabled");
+
+ /* Ignore -msselibm on 64bit targets. */
+ if (TARGET_SSELIBM
+ && TARGET_64BIT)
+ error ("-msselibm used on a 64bit target");
+
ix86_fpmath = TARGET_FPMATH_DEFAULT;
if (ix86_fpmath_string != 0)
&& !optimize_size)
target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
+ /* ??? Unwind info is not correct around the CFG unless either a frame
+ pointer is present or M_A_O_A is set. Fixing this requires rewriting
+ unwind info generation to be aware of the CFG and propagating states
+ around edges. */
+ if ((flag_unwind_tables || flag_asynchronous_unwind_tables
+ || flag_exceptions || flag_non_call_exceptions)
+ && flag_omit_frame_pointer
+ && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
+ {
+ if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
+ warning (0, "unwind tables currently require either a frame pointer "
+ "or -maccumulate-outgoing-args for correctness");
+ target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
+ }
+
/* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
{
char *p;
RELOC indicates whether forming the initial value of DECL requires
link-time relocations. */
-static void
+static section *
x86_64_elf_select_section (tree decl, int reloc,
- unsigned HOST_WIDE_INT align)
+ unsigned HOST_WIDE_INT align)
{
if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
&& ix86_in_large_data_p (decl))
{
const char *sname = NULL;
+ unsigned int flags = SECTION_WRITE;
switch (categorize_decl_for_section (decl, reloc, flag_pic))
{
case SECCAT_DATA:
break;
case SECCAT_BSS:
sname = ".lbss";
+ flags |= SECTION_BSS;
break;
case SECCAT_RODATA:
case SECCAT_RODATA_MERGE_STR:
case SECCAT_RODATA_MERGE_STR_INIT:
case SECCAT_RODATA_MERGE_CONST:
sname = ".lrodata";
+ flags = 0;
break;
case SECCAT_SRODATA:
case SECCAT_SDATA:
}
if (sname)
{
- named_section (decl, sname, reloc);
- return;
+ /* We might get called with string constants, but get_named_section
+ doesn't like them as they are not DECLs. Also, we need to set
+ flags in that case. */
+ if (!DECL_P (decl))
+ return get_section (sname, flags, NULL);
+ return get_named_section (decl, sname, reloc);
}
}
- default_elf_select_section (decl, reloc, align);
+ return default_elf_select_section (decl, reloc, align);
}
/* Build up a unique section name, expressed as a
{
if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
&& size > (unsigned int)ix86_section_threshold)
- named_section (decl, ".lbss", 0);
+ switch_to_section (get_named_section (decl, ".lbss", 0));
else
- bss_section ();
+ switch_to_section (bss_section);
ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
#ifdef ASM_DECLARE_OBJECT_NAME
last_assemble_variable_decl = decl;
{
tree func;
rtx a, b;
- bool one_void, one_reg;
/* If we are generating position-independent code, we cannot sibcall
optimize any indirect call, or a direct call to a global function,
as the PLT requires %ebx be live. */
- if (!TARGET_64BIT && flag_pic && (!decl || TREE_PUBLIC (decl)))
+ if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
return false;
if (decl)
a = ix86_function_value (TREE_TYPE (exp), func, false);
b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
cfun->decl, false);
- one_void = (VOID_TYPE_P (TREE_TYPE (exp))
- || VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))));
- one_reg = ((REG_P (a) && !STACK_REG_P (a))
- || (REG_P (b) && !STACK_REG_P (b)));
- if (!(one_void && one_reg)
- && !rtx_equal_p (a, b))
+ if (STACK_REG_P (a) || STACK_REG_P (b))
+ {
+ if (!rtx_equal_p (a, b))
+ return false;
+ }
+ else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
+ ;
+ else if (!rtx_equal_p (a, b))
return false;
/* If this call is indirect, we'll need to be able to use a call-clobbered
#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
/* Dllimport'd functions are also called indirectly. */
- if (decl && lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl))
+ if (decl && DECL_DLLIMPORT_P (decl)
&& ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
return false;
#endif
+ /* If we forced aligned the stack, then sibcalling would unalign the
+ stack, which may break the called function. */
+ if (cfun->machine->force_align_arg_pointer)
+ return false;
+
/* Otherwise okay. That also includes certain types of indirect calls. */
return true;
}
struct cgraph_local_info *i = cgraph_local_info (decl);
if (i && i->local)
{
+ int local_regparm, globals = 0, regno;
+
+ /* Make sure no regparm register is taken by a global register
+ variable. */
+ for (local_regparm = 0; local_regparm < 3; local_regparm++)
+ if (global_regs[local_regparm])
+ break;
/* We can't use regparm(3) for nested functions as these use
static chain pointer in third argument. */
- if (DECL_CONTEXT (decl) && !DECL_NO_STATIC_CHAIN (decl))
- regparm = 2;
- else
- regparm = 3;
+ if (local_regparm == 3
+ && decl_function_context (decl)
+ && !DECL_NO_STATIC_CHAIN (decl))
+ local_regparm = 2;
+ /* Each global register variable increases register preassure,
+ so the more global reg vars there are, the smaller regparm
+ optimization use, unless requested by the user explicitly. */
+ for (regno = 0; regno < 6; regno++)
+ if (global_regs[regno])
+ globals++;
+ local_regparm
+ = globals < local_regparm ? local_regparm - globals : 0;
+
+ if (local_regparm > regparm)
+ regparm = local_regparm;
}
}
}
misaligned integers. */
if (DECL_BIT_FIELD (field))
{
- for (i = int_bit_position (field) / 8 / 8;
- i < (int_bit_position (field)
+ for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
+ i < ((int_bit_position (field) + (bit_offset % 64))
+ tree_low_cst (DECL_SIZE (field), 0)
+ 63) / 8 / 8; i++)
classes[i] =
/* Classification of atomic types. */
switch (mode)
{
+ case SDmode:
+ case DDmode:
+ classes[0] = X86_64_SSE_CLASS;
+ return 1;
+ case TDmode:
+ classes[0] = X86_64_SSE_CLASS;
+ classes[1] = X86_64_SSEUP_CLASS;
+ return 2;
case DImode:
case SImode:
case HImode:
/* Just for use if some languages passes arrays by value. */
if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
return true;
+ break;
default:
gcc_unreachable ();
}
}
-/* Return false iff type is returned in memory. */
+/* Return true iff type is returned in memory. */
int
ix86_return_in_memory (tree type)
{
if (mode == XFmode)
return 0;
+ if (mode == TDmode)
+ return 1;
+
if (size > 12)
return 1;
return 0;
case DFmode:
case DCmode:
case TFmode:
+ case SDmode:
+ case DDmode:
+ case TDmode:
return gen_rtx_REG (mode, FIRST_SSE_REG);
case XFmode:
case XCmode:
if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
return FIRST_SSE_REG;
+ /* Decimal floating point values can go in %eax, unlike other float modes. */
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ return 0;
+
/* Most things go in %eax, except (unless -mno-fp-ret-in-387) fp values. */
- if (GET_MODE_CLASS (mode) != MODE_FLOAT || !TARGET_FLOAT_RETURNS_IN_80387)
+ if (!SCALAR_FLOAT_MODE_P (mode) || !TARGET_FLOAT_RETURNS_IN_80387)
return 0;
/* Floating point return values in %st(0), except for local functions when
{
mem = gen_rtx_MEM (Pmode,
plus_constant (save_area, i * UNITS_PER_WORD));
+ MEM_NOTRAP_P (mem) = 1;
set_mem_alias_set (mem, set);
emit_move_insn (mem, gen_rtx_REG (Pmode,
x86_64_int_parameter_registers[i]));
plus_constant (save_area,
8 * REGPARM_MAX + 127)));
mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
+ MEM_NOTRAP_P (mem) = 1;
set_mem_alias_set (mem, set);
set_mem_align (mem, BITS_PER_WORD);
f_sav = TREE_CHAIN (f_ovf);
valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
- gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
- fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
- ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
- sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
+ gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
+ fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
+ ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
+ sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
/* Count number of gp and fp argument registers used. */
words = current_function_args_info.words;
if (cfun->va_list_gpr_size)
{
- t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
- build_int_cst (NULL_TREE, n_gpr * 8));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
+ build_int_cst (NULL_TREE, n_gpr * 8));
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
if (cfun->va_list_fpr_size)
{
- t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
- build_int_cst (NULL_TREE, n_fpr * 16 + 8*REGPARM_MAX));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
+ build_int_cst (NULL_TREE, n_fpr * 16 + 8*REGPARM_MAX));
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
/* Find the overflow area. */
t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
if (words != 0)
- t = build (PLUS_EXPR, TREE_TYPE (ovf), t,
- build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
- t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
+ t = build2 (PLUS_EXPR, TREE_TYPE (ovf), t,
+ build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
/* Find the register save area.
Prologue of the function save it right above stack frame. */
t = make_tree (TREE_TYPE (sav), frame_pointer_rtx);
- t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
f_sav = TREE_CHAIN (f_ovf);
valist = build_va_arg_indirect_ref (valist);
- gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
- fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
- ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
- sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
+ gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
+ fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
+ ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
+ sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
if (indirect_p)
(REGPARM_MAX - needed_intregs + 1) * 8);
t = build2 (GE_EXPR, boolean_type_node, gpr, t);
t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
- t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
+ t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
gimplify_and_add (t, pre_p);
}
if (needed_sseregs)
+ REGPARM_MAX * 8);
t = build2 (GE_EXPR, boolean_type_node, fpr, t);
t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
- t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
+ t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
gimplify_and_add (t, pre_p);
}
/* ... otherwise out of the overflow area. */
/* Care for on-stack alignment if needed. */
- if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64)
+ if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64
+ || integer_zerop (TYPE_SIZE (type)))
t = ovf;
else
{
HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
- t = build (PLUS_EXPR, TREE_TYPE (ovf), ovf,
- build_int_cst (TREE_TYPE (ovf), align - 1));
- t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
- build_int_cst (TREE_TYPE (t), -align));
+ t = build2 (PLUS_EXPR, TREE_TYPE (ovf), ovf,
+ build_int_cst (TREE_TYPE (ovf), align - 1));
+ t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
+ build_int_cst (TREE_TYPE (t), -align));
}
gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
the frame pointer by default. Turn it back on now if we've not
got a leaf function. */
if (TARGET_OMIT_LEAF_FRAME_POINTER
- && (!current_function_is_leaf))
+ && (!current_function_is_leaf
+ || ix86_current_function_calls_tls_descriptor))
return 1;
if (current_function_profile)
cfun->machine->accesses_prev_frame = 1;
}
\f
-#if defined(HAVE_GAS_HIDDEN) && defined(SUPPORTS_ONE_ONLY)
+#if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
# define USE_HIDDEN_LINKONCE 1
#else
# define USE_HIDDEN_LINKONCE 0
get_pc_thunk_name (name, regno);
+#if TARGET_MACHO
+ if (TARGET_MACHO)
+ {
+ switch_to_section (darwin_sections[text_coal_section]);
+ fputs ("\t.weak_definition\t", asm_out_file);
+ assemble_name (asm_out_file, name);
+ fputs ("\n\t.private_extern\t", asm_out_file);
+ assemble_name (asm_out_file, name);
+ fputs ("\n", asm_out_file);
+ ASM_OUTPUT_LABEL (asm_out_file, name);
+ }
+ else
+#endif
if (USE_HIDDEN_LINKONCE)
{
tree decl;
DECL_ONE_ONLY (decl) = 1;
(*targetm.asm_out.unique_section) (decl, 0);
- named_section (decl, NULL, 0);
+ switch_to_section (get_named_section (decl, NULL, 0));
(*targetm.asm_out.globalize_label) (asm_out_file, name);
fputs ("\t.hidden\t", asm_out_file);
}
else
{
- text_section ();
+ switch_to_section (text_section);
ASM_OUTPUT_LABEL (asm_out_file, name);
}
/* Emit code for the SET_GOT patterns. */
const char *
-output_set_got (rtx dest)
+output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
{
rtx xops[3];
if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
{
- xops[2] = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ());
+ xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
if (!flag_pic)
output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
output_asm_insn ("call\t%a2", xops);
#if TARGET_MACHO
- /* Output the "canonical" label name ("Lxx$pb") here too. This
- is what will be referred to by the Mach-O PIC subsystem. */
- ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
+ /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
+ is what will be referenced by the Mach-O PIC subsystem. */
+ if (!label)
+ ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
#endif
+
(*targetm.asm_out.internal_label) (asm_out_file, "L",
CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
xops[2] = gen_rtx_MEM (QImode, xops[2]);
output_asm_insn ("call\t%X2", xops);
+ /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
+ is what will be referenced by the Mach-O PIC subsystem. */
+#if TARGET_MACHO
+ if (!label)
+ ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
+ else
+ targetm.asm_out.internal_label (asm_out_file, "L",
+ CODE_LABEL_NUMBER (label));
+#endif
}
+ if (TARGET_MACHO)
+ return "";
+
if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
- else if (!TARGET_MACHO)
+ else
output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
return "";
static unsigned int
ix86_select_alt_pic_regnum (void)
{
- if (current_function_is_leaf && !current_function_profile)
+ if (current_function_is_leaf && !current_function_profile
+ && !ix86_current_function_calls_tls_descriptor)
{
int i;
for (i = 2; i >= 0; --i)
}
}
+ if (cfun->machine->force_align_arg_pointer
+ && regno == REGNO (cfun->machine->force_align_arg_pointer))
+ return 1;
+
return (regs_ever_live[regno]
&& !call_used_regs[regno]
&& !fixed_regs[regno]
expander assumes that last current_function_outgoing_args_size
of stack frame are unused. */
if (ACCUMULATE_OUTGOING_ARGS
- && (!current_function_is_leaf || current_function_calls_alloca))
+ && (!current_function_is_leaf || current_function_calls_alloca
+ || ix86_current_function_calls_tls_descriptor))
{
offset += current_function_outgoing_args_size;
frame->outgoing_arguments_size = current_function_outgoing_args_size;
/* Align stack boundary. Only needed if we're calling another function
or using alloca. */
- if (!current_function_is_leaf || current_function_calls_alloca)
+ if (!current_function_is_leaf || current_function_calls_alloca
+ || ix86_current_function_calls_tls_descriptor)
frame->padding2 = ((offset + preferred_alignment - 1)
& -preferred_alignment) - offset;
else
frame->save_regs_using_mov = false;
if (TARGET_RED_ZONE && current_function_sp_is_unchanging
- && current_function_is_leaf)
+ && current_function_is_leaf
+ && !ix86_current_function_calls_tls_descriptor)
{
frame->red_zone_size = frame->to_allocate;
if (frame->save_regs_using_mov)
static void
ix86_emit_save_regs (void)
{
- int regno;
+ unsigned int regno;
rtx insn;
- for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
+ for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
if (ix86_save_reg (regno, true))
{
insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
static void
ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
{
- int regno;
+ unsigned int regno;
rtx insn;
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
RTX_FRAME_RELATED_P (insn) = 1;
}
+/* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
+
+static rtx
+ix86_internal_arg_pointer (void)
+{
+ if (FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
+ && DECL_NAME (current_function_decl)
+ && MAIN_NAME_P (DECL_NAME (current_function_decl))
+ && DECL_FILE_SCOPE_P (current_function_decl))
+ {
+ cfun->machine->force_align_arg_pointer = gen_rtx_REG (Pmode, 2);
+ return copy_to_reg (cfun->machine->force_align_arg_pointer);
+ }
+ else
+ return virtual_incoming_args_rtx;
+}
+
+/* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
+ This is called from dwarf2out.c to emit call frame instructions
+ for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
+static void
+ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
+{
+ rtx unspec = SET_SRC (pattern);
+ gcc_assert (GET_CODE (unspec) == UNSPEC);
+
+ switch (index)
+ {
+ case UNSPEC_REG_SAVE:
+ dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
+ SET_DEST (pattern));
+ break;
+ case UNSPEC_DEF_CFA:
+ dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
+ INTVAL (XVECEXP (unspec, 0, 0)));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+}
+
/* Expand the prologue into a bunch of separate insns. */
void
ix86_compute_frame_layout (&frame);
+ if (cfun->machine->force_align_arg_pointer)
+ {
+ rtx x, y;
+
+ /* Grab the argument pointer. */
+ x = plus_constant (stack_pointer_rtx, 4);
+ y = cfun->machine->force_align_arg_pointer;
+ insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ /* The unwind info consists of two parts: install the fafp as the cfa,
+ and record the fafp as the "save register" of the stack pointer.
+ The later is there in order that the unwinder can see where it
+ should restore the stack pointer across the and insn. */
+ x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_DEF_CFA);
+ x = gen_rtx_SET (VOIDmode, y, x);
+ RTX_FRAME_RELATED_P (x) = 1;
+ y = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, stack_pointer_rtx),
+ UNSPEC_REG_SAVE);
+ y = gen_rtx_SET (VOIDmode, cfun->machine->force_align_arg_pointer, y);
+ RTX_FRAME_RELATED_P (y) = 1;
+ x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x, y));
+ x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
+ REG_NOTES (insn) = x;
+
+ /* Align the stack. */
+ emit_insn (gen_andsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-16)));
+
+ /* And here we cheat like madmen with the unwind info. We force the
+ cfa register back to sp+4, which is exactly what it was at the
+ start of the function. Re-pushing the return address results in
+ the return at the same spot relative to the cfa, and thus is
+ correct wrt the unwind info. */
+ x = cfun->machine->force_align_arg_pointer;
+ x = gen_frame_mem (Pmode, plus_constant (x, -4));
+ insn = emit_insn (gen_push (x));
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ x = GEN_INT (4);
+ x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, x), UNSPEC_DEF_CFA);
+ x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
+ x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
+ REG_NOTES (insn) = x;
+ }
+
/* Note: AT&T enter does NOT have reversed args. Enter is probably
slower on all targets. Also sdb doesn't like it. */
}
}
+ if (cfun->machine->force_align_arg_pointer)
+ {
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ cfun->machine->force_align_arg_pointer,
+ GEN_INT (-4)));
+ }
+
/* Sibcall epilogues don't want a return instruction. */
if (style == 0)
return;
return TARGET_64BIT;
case UNSPEC_TPOFF:
case UNSPEC_NTPOFF:
- return local_exec_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
+ x = XVECEXP (x, 0, 0);
+ return (GET_CODE (x) == SYMBOL_REF
+ && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
case UNSPEC_DTPOFF:
- return local_dynamic_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
+ x = XVECEXP (x, 0, 0);
+ return (GET_CODE (x) == SYMBOL_REF
+ && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
default:
return false;
}
/* We must have drilled down to a symbol. */
- if (!symbolic_operand (x, Pmode))
+ if (GET_CODE (x) == LABEL_REF)
+ return true;
+ if (GET_CODE (x) != SYMBOL_REF)
return false;
/* FALLTHRU */
case SYMBOL_REF:
/* TLS symbols are never valid. */
- if (tls_symbolic_operand (x, Pmode))
+ if (SYMBOL_REF_TLS_MODEL (x))
return false;
break;
case UNSPEC_GOTOFF:
return TARGET_64BIT;
case UNSPEC_TPOFF:
- return local_exec_symbolic_operand (XVECEXP (inner, 0, 0), Pmode);
+ x = XVECEXP (inner, 0, 0);
+ return (GET_CODE (x) == SYMBOL_REF
+ && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
default:
return false;
}
when they are not dynamic symbols. */
if (TARGET_64BIT)
{
- /* TLS references should always be enclosed in UNSPEC. */
- if (tls_symbolic_operand (disp, GET_MODE (disp)))
- return 0;
- if (GET_CODE (disp) == SYMBOL_REF
- && !SYMBOL_REF_FAR_ADDR_P (disp)
- && SYMBOL_REF_LOCAL_P (disp))
- return 1;
- if (GET_CODE (disp) == LABEL_REF)
- return 1;
- if (GET_CODE (disp) == CONST
- && GET_CODE (XEXP (disp, 0)) == PLUS)
+ rtx op0 = disp, op1;
+
+ switch (GET_CODE (disp))
{
- rtx op0 = XEXP (XEXP (disp, 0), 0);
- rtx op1 = XEXP (XEXP (disp, 0), 1);
+ case LABEL_REF:
+ return true;
+
+ case CONST:
+ if (GET_CODE (XEXP (disp, 0)) != PLUS)
+ break;
+ op0 = XEXP (XEXP (disp, 0), 0);
+ op1 = XEXP (XEXP (disp, 0), 1);
+ if (GET_CODE (op1) != CONST_INT
+ || INTVAL (op1) >= 16*1024*1024
+ || INTVAL (op1) < -16*1024*1024)
+ break;
+ if (GET_CODE (op0) == LABEL_REF)
+ return true;
+ if (GET_CODE (op0) != SYMBOL_REF)
+ break;
+ /* FALLTHRU */
+ case SYMBOL_REF:
/* TLS references should always be enclosed in UNSPEC. */
- if (tls_symbolic_operand (op0, GET_MODE (op0)))
- return 0;
- if (((GET_CODE (op0) == SYMBOL_REF
- && !SYMBOL_REF_FAR_ADDR_P (op0)
- && SYMBOL_REF_LOCAL_P (op0))
- || GET_CODE (op0) == LABEL_REF)
- && GET_CODE (op1) == CONST_INT
- && INTVAL (op1) < 16*1024*1024
- && INTVAL (op1) >= -16*1024*1024)
- return 1;
+ if (SYMBOL_REF_TLS_MODEL (op0))
+ return false;
+ if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0))
+ return true;
+ break;
+
+ default:
+ break;
}
}
if (GET_CODE (disp) != CONST)
case UNSPEC_INDNTPOFF:
if (saw_plus)
return false;
- return initial_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
+ disp = XVECEXP (disp, 0, 0);
+ return (GET_CODE (disp) == SYMBOL_REF
+ && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
case UNSPEC_NTPOFF:
- return local_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
+ disp = XVECEXP (disp, 0, 0);
+ return (GET_CODE (disp) == SYMBOL_REF
+ && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
case UNSPEC_DTPOFF:
- return local_dynamic_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
+ disp = XVECEXP (disp, 0, 0);
+ return (GET_CODE (disp) == SYMBOL_REF
+ && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
}
return 0;
}
else
{
- if (GET_CODE (addr) == CONST)
+ if (GET_CODE (addr) == CONST_INT
+ && !x86_64_immediate_operand (addr, VOIDmode))
+ {
+ if (reg)
+ {
+ emit_move_insn (reg, addr);
+ new = reg;
+ }
+ else
+ new = force_reg (Pmode, addr);
+ }
+ else if (GET_CODE (addr) == CONST)
{
addr = XEXP (addr, 0);
{
if (INTVAL (op1) < -16*1024*1024
|| INTVAL (op1) >= 16*1024*1024)
- new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
+ {
+ if (!x86_64_immediate_operand (op1, Pmode))
+ op1 = force_reg (Pmode, op1);
+ new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
+ }
}
}
else
static rtx
legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
{
- rtx dest, base, off, pic;
+ rtx dest, base, off, pic, tp;
int type;
switch (model)
{
case TLS_MODEL_GLOBAL_DYNAMIC:
dest = gen_reg_rtx (Pmode);
- if (TARGET_64BIT)
+ tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
+
+ if (TARGET_64BIT && ! TARGET_GNU2_TLS)
{
rtx rax = gen_rtx_REG (Pmode, 0), insns;
emit_libcall_block (insns, dest, rax, x);
}
+ else if (TARGET_64BIT && TARGET_GNU2_TLS)
+ emit_insn (gen_tls_global_dynamic_64 (dest, x));
else
emit_insn (gen_tls_global_dynamic_32 (dest, x));
+
+ if (TARGET_GNU2_TLS)
+ {
+ dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
+
+ set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
+ }
break;
case TLS_MODEL_LOCAL_DYNAMIC:
base = gen_reg_rtx (Pmode);
- if (TARGET_64BIT)
+ tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
+
+ if (TARGET_64BIT && ! TARGET_GNU2_TLS)
{
rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
emit_libcall_block (insns, base, rax, note);
}
+ else if (TARGET_64BIT && TARGET_GNU2_TLS)
+ emit_insn (gen_tls_local_dynamic_base_64 (base));
else
emit_insn (gen_tls_local_dynamic_base_32 (base));
+ if (TARGET_GNU2_TLS)
+ {
+ rtx x = ix86_tls_module_base ();
+
+ base = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, base));
+
+ set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
+ }
+
off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
off = gen_rtx_CONST (Pmode, off);
- return gen_rtx_PLUS (Pmode, base, off);
+ dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
+ break;
case TLS_MODEL_INITIAL_EXEC:
if (TARGET_64BIT)
if (reload_in_progress)
regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
pic = pic_offset_table_rtx;
- type = TARGET_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
+ type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
}
- else if (!TARGET_GNU_TLS)
+ else if (!TARGET_ANY_GNU_TLS)
{
pic = gen_reg_rtx (Pmode);
emit_insn (gen_set_got (pic));
off = gen_const_mem (Pmode, off);
set_mem_alias_set (off, ix86_GOT_alias_set ());
- if (TARGET_64BIT || TARGET_GNU_TLS)
+ if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
{
base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
off = force_reg (Pmode, off);
case TLS_MODEL_LOCAL_EXEC:
off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
- (TARGET_64BIT || TARGET_GNU_TLS)
+ (TARGET_64BIT || TARGET_ANY_GNU_TLS)
? UNSPEC_NTPOFF : UNSPEC_TPOFF);
off = gen_rtx_CONST (Pmode, off);
- if (TARGET_64BIT || TARGET_GNU_TLS)
+ if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
{
base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
return gen_rtx_PLUS (Pmode, base, off);
break;
case SYMBOL_REF:
- assemble_name (file, XSTR (x, 0));
+ output_addr_const (file, x);
if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
fputs ("@PLT", file);
break;
/* In the name of slightly smaller debug output, and to cater to
general assembler lossage, recognize PIC+GOTOFF and turn it back
- into a direct symbol reference. */
+ into a direct symbol reference.
+
+ On Darwin, this is necessary to avoid a crash, because Darwin
+ has a different PIC label for each routine but the DWARF debugging
+ information is not associated with any particular routine, so it's
+ necessary to remove references to the PIC label from RTL stored by
+ the DWARF output code. */
static rtx
ix86_delegitimize_address (rtx orig_x)
{
- rtx x = orig_x, y;
+ rtx x = orig_x;
+ /* reg_addend is NULL or a multiple of some register. */
+ rtx reg_addend = NULL_RTX;
+ /* const_addend is NULL or a const_int. */
+ rtx const_addend = NULL_RTX;
+ /* This is the result, or NULL. */
+ rtx result = NULL_RTX;
if (GET_CODE (x) == MEM)
x = XEXP (x, 0);
if (GET_CODE (XEXP (x, 0)) == REG
&& REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
/* %ebx + GOT/GOTOFF */
- y = NULL;
+ ;
else if (GET_CODE (XEXP (x, 0)) == PLUS)
{
/* %ebx + %reg * scale + GOT/GOTOFF */
- y = XEXP (x, 0);
- if (GET_CODE (XEXP (y, 0)) == REG
- && REGNO (XEXP (y, 0)) == PIC_OFFSET_TABLE_REGNUM)
- y = XEXP (y, 1);
- else if (GET_CODE (XEXP (y, 1)) == REG
- && REGNO (XEXP (y, 1)) == PIC_OFFSET_TABLE_REGNUM)
- y = XEXP (y, 0);
+ reg_addend = XEXP (x, 0);
+ if (GET_CODE (XEXP (reg_addend, 0)) == REG
+ && REGNO (XEXP (reg_addend, 0)) == PIC_OFFSET_TABLE_REGNUM)
+ reg_addend = XEXP (reg_addend, 1);
+ else if (GET_CODE (XEXP (reg_addend, 1)) == REG
+ && REGNO (XEXP (reg_addend, 1)) == PIC_OFFSET_TABLE_REGNUM)
+ reg_addend = XEXP (reg_addend, 0);
else
return orig_x;
- if (GET_CODE (y) != REG
- && GET_CODE (y) != MULT
- && GET_CODE (y) != ASHIFT)
+ if (GET_CODE (reg_addend) != REG
+ && GET_CODE (reg_addend) != MULT
+ && GET_CODE (reg_addend) != ASHIFT)
return orig_x;
}
else
return orig_x;
x = XEXP (XEXP (x, 1), 0);
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ const_addend = XEXP (x, 1);
+ x = XEXP (x, 0);
+ }
+
if (GET_CODE (x) == UNSPEC
&& ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
|| (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
- {
- if (y)
- return gen_rtx_PLUS (Pmode, y, XVECEXP (x, 0, 0));
- return XVECEXP (x, 0, 0);
- }
+ result = XVECEXP (x, 0, 0);
- if (GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 0)) == UNSPEC
- && GET_CODE (XEXP (x, 1)) == CONST_INT
- && ((XINT (XEXP (x, 0), 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
- || (XINT (XEXP (x, 0), 1) == UNSPEC_GOTOFF
- && GET_CODE (orig_x) != MEM)))
- {
- x = gen_rtx_PLUS (VOIDmode, XVECEXP (XEXP (x, 0), 0, 0), XEXP (x, 1));
- if (y)
- return gen_rtx_PLUS (Pmode, y, x);
- return x;
- }
+ if (TARGET_MACHO && darwin_local_data_pic (x)
+ && GET_CODE (orig_x) != MEM)
+ result = XEXP (x, 0);
- return orig_x;
+ if (! result)
+ return orig_x;
+
+ if (const_addend)
+ result = gen_rtx_PLUS (Pmode, result, const_addend);
+ if (reg_addend)
+ result = gen_rtx_PLUS (Pmode, reg_addend, result);
+ return result;
}
\f
static void
rtx x = *px;
if (GET_CODE (x) == SYMBOL_REF
- && local_dynamic_symbolic_operand (x, Pmode))
+ && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
{
cfun->machine->some_ld_name = XSTR (x, 0);
return 1;
output_addr_const (file, disp);
/* Use one byte shorter RIP relative addressing for 64bit mode. */
- if (TARGET_64BIT
- && ((GET_CODE (disp) == SYMBOL_REF
- && ! tls_symbolic_operand (disp, GET_MODE (disp)))
- || GET_CODE (disp) == LABEL_REF
- || (GET_CODE (disp) == CONST
- && GET_CODE (XEXP (disp, 0)) == PLUS
- && (GET_CODE (XEXP (XEXP (disp, 0), 0)) == SYMBOL_REF
- || GET_CODE (XEXP (XEXP (disp, 0), 0)) == LABEL_REF)
- && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)))
- fputs ("(%rip)", file);
+ if (TARGET_64BIT)
+ {
+ if (GET_CODE (disp) == CONST
+ && GET_CODE (XEXP (disp, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
+ disp = XEXP (XEXP (disp, 0), 0);
+ if (GET_CODE (disp) == LABEL_REF
+ || (GET_CODE (disp) == SYMBOL_REF
+ && SYMBOL_REF_TLS_MODEL (disp) == 0))
+ fputs ("(%rip)", file);
+ }
}
else
{
enum machine_mode
ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
{
- if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
+ if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
return ix86_fp_compare_mode (code);
switch (code)
{
int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
/* All of the unordered compare instructions only work on registers.
- The same is true of the fcomi compare instructions. The same is
- true of the XFmode compare instructions if not comparing with
- zero (ftst insn is used in this case). */
+ The same is true of the fcomi compare instructions. The XFmode
+ compare instructions require registers except when comparing
+ against zero or when converting operand 1 from fixed point to
+ floating point. */
if (!is_sse
&& (fpcmp_mode == CCFPUmode
|| (op_mode == XFmode
&& ! (standard_80387_constant_p (op0) == 1
- || standard_80387_constant_p (op1) == 1))
+ || standard_80387_constant_p (op1) == 1)
+ && GET_CODE (op1) != FLOAT)
|| ix86_use_fcomi_compare (code)))
{
op0 = force_reg (op_mode, op0);
ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
ix86_compare_emitted = NULL_RTX;
}
- else if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
+ else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
second_test, bypass_test);
else
f = ggc_alloc_cleared (sizeof (struct machine_function));
f->use_fast_prologue_epilogue_nregs = -1;
+ f->tls_descriptor_call_expanded_p = 0;
return f;
}
if (!ix86_tls_symbol)
{
ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
- (TARGET_GNU_TLS && !TARGET_64BIT)
+ (TARGET_ANY_GNU_TLS
+ && !TARGET_64BIT)
? "___tls_get_addr"
: "__tls_get_addr");
}
return ix86_tls_symbol;
}
+
+/* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
+
+static GTY(()) rtx ix86_tls_module_base_symbol;
+rtx
+ix86_tls_module_base (void)
+{
+
+ if (!ix86_tls_module_base_symbol)
+ {
+ ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
+ "_TLS_MODULE_BASE_");
+ SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
+ |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
+ }
+
+ return ix86_tls_module_base_symbol;
+}
\f
/* Calculate the length of the memory address in the instruction
encoding. Does not include the one-byte modrm, opcode, or prefix. */
/* Find the length of the displacement constant. */
if (disp)
{
- if (GET_CODE (disp) == CONST_INT
- && CONST_OK_FOR_LETTER_P (INTVAL (disp), 'K')
- && base)
+ if (base && satisfies_constraint_K (disp))
len = 1;
else
len = 4;
if (CONSTANT_P (recog_data.operand[i]))
{
gcc_assert (!len);
- if (shortform
- && GET_CODE (recog_data.operand[i]) == CONST_INT
- && CONST_OK_FOR_LETTER_P (INTVAL (recog_data.operand[i]), 'K'))
+ if (shortform && satisfies_constraint_K (recog_data.operand[i]))
len = 1;
else
{
case PROCESSOR_ATHLON:
case PROCESSOR_K8:
case PROCESSOR_NOCONA:
+ case PROCESSOR_GENERIC32:
+ case PROCESSOR_GENERIC64:
return 3;
default:
case PROCESSOR_ATHLON:
case PROCESSOR_K8:
+ case PROCESSOR_GENERIC32:
+ case PROCESSOR_GENERIC64:
memory = get_attr_memory (insn);
/* Show ability of reorder buffer to hide latency of load by executing
int
ix86_data_alignment (tree type, int align)
{
+ int max_align = optimize_size ? BITS_PER_WORD : 256;
+
if (AGGREGATE_TYPE_P (type)
- && TYPE_SIZE (type)
- && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 256
- || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 256)
- return 256;
+ && TYPE_SIZE (type)
+ && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
+ || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
+ && align < max_align)
+ align = max_align;
/* x86-64 ABI requires arrays greater than 16 bytes to be aligned
to 16byte boundary. */
IX86_BUILTIN_CMPNGEPS,
IX86_BUILTIN_CMPORDPS,
IX86_BUILTIN_CMPUNORDPS,
- IX86_BUILTIN_CMPNEPS,
IX86_BUILTIN_CMPEQSS,
IX86_BUILTIN_CMPLTSS,
IX86_BUILTIN_CMPLESS,
IX86_BUILTIN_CMPNGESS,
IX86_BUILTIN_CMPORDSS,
IX86_BUILTIN_CMPUNORDSS,
- IX86_BUILTIN_CMPNESS,
IX86_BUILTIN_COMIEQSS,
IX86_BUILTIN_COMILTSS,
IX86_BUILTIN_VEC_SET_V8HI,
IX86_BUILTIN_VEC_SET_V4HI,
+ /* SSE2 ABI functions. */
+ IX86_BUILTIN_SSE2_ACOS,
+ IX86_BUILTIN_SSE2_ACOSF,
+ IX86_BUILTIN_SSE2_ASIN,
+ IX86_BUILTIN_SSE2_ASINF,
+ IX86_BUILTIN_SSE2_ATAN,
+ IX86_BUILTIN_SSE2_ATANF,
+ IX86_BUILTIN_SSE2_ATAN2,
+ IX86_BUILTIN_SSE2_ATAN2F,
+ IX86_BUILTIN_SSE2_COS,
+ IX86_BUILTIN_SSE2_COSF,
+ IX86_BUILTIN_SSE2_EXP,
+ IX86_BUILTIN_SSE2_EXPF,
+ IX86_BUILTIN_SSE2_LOG10,
+ IX86_BUILTIN_SSE2_LOG10F,
+ IX86_BUILTIN_SSE2_LOG,
+ IX86_BUILTIN_SSE2_LOGF,
+ IX86_BUILTIN_SSE2_SIN,
+ IX86_BUILTIN_SSE2_SINF,
+ IX86_BUILTIN_SSE2_TAN,
+ IX86_BUILTIN_SSE2_TANF,
+
IX86_BUILTIN_MAX
};
{ MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
{ MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
{ MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
- { MASK_MMX, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
+ { MASK_SSE2, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
{ MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
{ MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
{ MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
- { MASK_MMX, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
+ { MASK_SSE2, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
{ MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
{ MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
{
if (TARGET_MMX)
ix86_init_mmx_sse_builtins ();
+ if (TARGET_SSE2)
+ ix86_init_sse_abi_builtins ();
}
/* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
= build_function_type_list (integer_type_node,
V2DF_type_node, V2DF_type_node, NULL_TREE);
- tree ti_ftype_ti_ti
- = build_function_type_list (intTI_type_node,
- intTI_type_node, intTI_type_node, NULL_TREE);
tree void_ftype_pcvoid
= build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
tree v4sf_ftype_v4si
(*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
}
- float128_type = make_node (REAL_TYPE);
- TYPE_PRECISION (float128_type) = 128;
- layout_type (float128_type);
- (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
+ if (TARGET_64BIT)
+ {
+ float128_type = make_node (REAL_TYPE);
+ TYPE_PRECISION (float128_type) = 128;
+ layout_type (float128_type);
+ (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
+ }
/* Add all builtins that are more or less simple operations on two
operands. */
case V2DFmode:
type = v2df_ftype_v2df_v2df;
break;
- case TImode:
- type = ti_ftype_ti_ti;
- break;
case V4SFmode:
type = v4sf_ftype_v4sf_v4sf;
break;
def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
- def_builtin (MASK_MMX, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
+ def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
/* comi/ucomi insns. */
def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
ftype, IX86_BUILTIN_VEC_SET_V4HI);
}
+#undef def_builtin
+
+/* Set up all the SSE ABI builtins that we may use to override
+ the normal builtins. */
+static void
+ix86_init_sse_abi_builtins (void)
+{
+ tree dbl, flt, dbl2, flt2;
+
+ /* Bail out in case the template definitions are not available. */
+ if (! built_in_decls [BUILT_IN_SIN]
+ || ! built_in_decls [BUILT_IN_SINF]
+ || ! built_in_decls [BUILT_IN_ATAN2]
+ || ! built_in_decls [BUILT_IN_ATAN2F])
+ return;
+
+ /* Build the function types as variants of the existing ones. */
+ dbl = build_variant_type_copy (TREE_TYPE (built_in_decls [BUILT_IN_SIN]));
+ TYPE_ATTRIBUTES (dbl)
+ = tree_cons (get_identifier ("sseregparm"),
+ NULL_TREE, TYPE_ATTRIBUTES (dbl));
+ flt = build_variant_type_copy (TREE_TYPE (built_in_decls [BUILT_IN_SINF]));
+ TYPE_ATTRIBUTES (flt)
+ = tree_cons (get_identifier ("sseregparm"),
+ NULL_TREE, TYPE_ATTRIBUTES (flt));
+ dbl2 = build_variant_type_copy (TREE_TYPE (built_in_decls [BUILT_IN_ATAN2]));
+ TYPE_ATTRIBUTES (dbl2)
+ = tree_cons (get_identifier ("sseregparm"),
+ NULL_TREE, TYPE_ATTRIBUTES (dbl2));
+ flt2 = build_variant_type_copy (TREE_TYPE (built_in_decls [BUILT_IN_ATAN2F]));
+ TYPE_ATTRIBUTES (flt2)
+ = tree_cons (get_identifier ("sseregparm"),
+ NULL_TREE, TYPE_ATTRIBUTES (flt2));
+
+#define def_builtin(capname, name, type) \
+ ix86_builtin_function_variants [BUILT_IN_ ## capname] \
+ = lang_hooks.builtin_function ("__builtin_sse2_" # name, type, \
+ IX86_BUILTIN_SSE2_ ## capname, \
+ BUILT_IN_NORMAL, \
+ "__libm_sse2_" # name, NULL_TREE)
+
+ def_builtin (ACOS, acos, dbl);
+ def_builtin (ACOSF, acosf, flt);
+ def_builtin (ASIN, asin, dbl);
+ def_builtin (ASINF, asinf, flt);
+ def_builtin (ATAN, atan, dbl);
+ def_builtin (ATANF, atanf, flt);
+ def_builtin (ATAN2, atan2, dbl2);
+ def_builtin (ATAN2F, atan2f, flt2);
+ def_builtin (COS, cos, dbl);
+ def_builtin (COSF, cosf, flt);
+ def_builtin (EXP, exp, dbl);
+ def_builtin (EXPF, expf, flt);
+ def_builtin (LOG10, log10, dbl);
+ def_builtin (LOG10F, log10f, flt);
+ def_builtin (LOG, log, dbl);
+ def_builtin (LOGF, logf, flt);
+ def_builtin (SIN, sin, dbl);
+ def_builtin (SINF, sinf, flt);
+ def_builtin (TAN, tan, dbl);
+ def_builtin (TANF, tanf, flt);
+
+#undef def_builtin
+}
/* Errors in the source file can cause expand_expr to return const0_rtx
where we expect a vector. To avoid crashing, use one of the vector
rtx pat, xops[3];
tree arg0 = TREE_VALUE (arglist);
tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
enum machine_mode mode1 = insn_data[icode].operand[2].mode;
rtx pat;
tree arg0 = TREE_VALUE (arglist);
tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
enum machine_mode mode0 = insn_data[icode].operand[0].mode;
enum machine_mode mode1 = insn_data[icode].operand[1].mode;
{
rtx pat;
tree arg0 = TREE_VALUE (arglist);
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
{
rtx pat;
tree arg0 = TREE_VALUE (arglist);
- rtx op1, op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ rtx op1, op0 = expand_normal (arg0);
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
rtx pat;
tree arg0 = TREE_VALUE (arglist);
tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
rtx op2;
enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
rtx pat;
tree arg0 = TREE_VALUE (arglist);
tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
rtx op2;
enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
{
- rtx x = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
+ rtx x = expand_normal (TREE_VALUE (arglist));
RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
}
arg0 = TREE_VALUE (arglist);
arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
elt = get_element_number (TREE_TYPE (arg0), arg1);
tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
arg1 = TREE_VALUE (arglist);
arg2 = TREE_VALUE (TREE_CHAIN (arglist));
arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
- op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ op2 = expand_normal (arg2);
mode0 = insn_data[icode].operand[0].mode;
mode1 = insn_data[icode].operand[1].mode;
mode2 = insn_data[icode].operand[2].mode;
: CODE_FOR_sse2_loadlpd);
arg0 = TREE_VALUE (arglist);
arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
tmode = insn_data[icode].operand[0].mode;
mode0 = insn_data[icode].operand[1].mode;
mode1 = insn_data[icode].operand[2].mode;
: CODE_FOR_sse_storelps);
arg0 = TREE_VALUE (arglist);
arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
mode0 = insn_data[icode].operand[0].mode;
mode1 = insn_data[icode].operand[1].mode;
return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
case IX86_BUILTIN_LDMXCSR:
- op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (TREE_VALUE (arglist));
target = assign_386_stack_local (SImode, SLOT_TEMP);
emit_move_insn (target, op0);
emit_insn (gen_sse_ldmxcsr (target));
arg0 = TREE_VALUE (arglist);
arg1 = TREE_VALUE (TREE_CHAIN (arglist));
arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
- op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ op2 = expand_normal (arg2);
tmode = insn_data[icode].operand[0].mode;
mode0 = insn_data[icode].operand[1].mode;
mode1 = insn_data[icode].operand[2].mode;
: CODE_FOR_mmx_pshufw);
arg0 = TREE_VALUE (arglist);
arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
tmode = insn_data[icode].operand[0].mode;
mode1 = insn_data[icode].operand[1].mode;
mode2 = insn_data[icode].operand[2].mode;
: CODE_FOR_sse2_lshrti3);
arg0 = TREE_VALUE (arglist);
arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
tmode = insn_data[icode].operand[0].mode;
mode1 = insn_data[icode].operand[1].mode;
mode2 = insn_data[icode].operand[2].mode;
case IX86_BUILTIN_CLFLUSH:
arg0 = TREE_VALUE (arglist);
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
icode = CODE_FOR_sse2_clflush;
if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
op0 = copy_to_mode_reg (Pmode, op0);
arg0 = TREE_VALUE (arglist);
arg1 = TREE_VALUE (TREE_CHAIN (arglist));
arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
- op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ op2 = expand_normal (arg2);
if (!REG_P (op0))
op0 = copy_to_mode_reg (SImode, op0);
if (!REG_P (op1))
case IX86_BUILTIN_MWAIT:
arg0 = TREE_VALUE (arglist);
arg1 = TREE_VALUE (TREE_CHAIN (arglist));
- op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
if (!REG_P (op0))
op0 = copy_to_mode_reg (SImode, op0);
if (!REG_P (op1))
gcc_unreachable ();
}
+/* Expand an expression EXP that calls a built-in library function,
+ with result going to TARGET if that's convenient
+ (and in mode MODE if that's convenient).
+ SUBTARGET may be used as the target for computing one of EXP's operands.
+ IGNORE is nonzero if the value is to be ignored. */
+
+static rtx
+ix86_expand_library_builtin (tree exp, rtx target,
+ rtx subtarget ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int ignore)
+{
+ enum built_in_function fncode;
+ tree fndecl, newfn, call;
+
+ /* Try expanding builtin math functions to the SSE2 ABI variants. */
+ if (!TARGET_SSELIBM)
+ return NULL_RTX;
+
+ fncode = builtin_mathfn_code (exp);
+ if (!ix86_builtin_function_variants [(int)fncode])
+ return NULL_RTX;
+
+ fndecl = get_callee_fndecl (exp);
+ if (DECL_RTL_SET_P (fndecl))
+ return NULL_RTX;
+
+ /* Build the redirected call and expand it. */
+ newfn = ix86_builtin_function_variants [(int)fncode];
+ call = build_function_call_expr (newfn, TREE_OPERAND (exp, 1));
+ return expand_call (call, target, ignore);
+}
+
/* Store OPERAND to the memory after reload is completed. This means
that we can't easily use assign_stack_local. */
rtx
}
break;
case HImode:
- /* It is better to store HImodes as SImodes. */
- if (!TARGET_PARTIAL_REG_STALL)
- operand = gen_lowpart (SImode, operand);
+ /* Store HImodes as SImodes. */
+ operand = gen_lowpart (SImode, operand);
/* FALLTHRU */
case SImode:
emit_insn (
if (mode == DImode || TARGET_64BIT)
size = 8;
- else if (mode == HImode && TARGET_PARTIAL_REG_STALL)
- size = 2;
else
size = 4;
/* Use LEA to deallocate stack space. In peephole2 it will be converted
&& GET_MODE (XEXP (x, 0)) == SImode)
*total = 1;
else if (TARGET_ZERO_EXTEND_WITH_AND)
- *total = COSTS_N_INSNS (ix86_cost->add);
+ *total = ix86_cost->add;
else
- *total = COSTS_N_INSNS (ix86_cost->movzx);
+ *total = ix86_cost->movzx;
return false;
case SIGN_EXTEND:
- *total = COSTS_N_INSNS (ix86_cost->movsx);
+ *total = ix86_cost->movsx;
return false;
case ASHIFT:
HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
if (value == 1)
{
- *total = COSTS_N_INSNS (ix86_cost->add);
+ *total = ix86_cost->add;
return false;
}
if ((value == 2 || value == 3)
&& ix86_cost->lea <= ix86_cost->shift_const)
{
- *total = COSTS_N_INSNS (ix86_cost->lea);
+ *total = ix86_cost->lea;
return false;
}
}
if (GET_CODE (XEXP (x, 1)) == CONST_INT)
{
if (INTVAL (XEXP (x, 1)) > 32)
- *total = COSTS_N_INSNS(ix86_cost->shift_const + 2);
+ *total = ix86_cost->shift_const + COSTS_N_INSNS (2);
else
- *total = COSTS_N_INSNS(ix86_cost->shift_const * 2);
+ *total = ix86_cost->shift_const * 2;
}
else
{
if (GET_CODE (XEXP (x, 1)) == AND)
- *total = COSTS_N_INSNS(ix86_cost->shift_var * 2);
+ *total = ix86_cost->shift_var * 2;
else
- *total = COSTS_N_INSNS(ix86_cost->shift_var * 6 + 2);
+ *total = ix86_cost->shift_var * 6 + COSTS_N_INSNS (2);
}
}
else
{
if (GET_CODE (XEXP (x, 1)) == CONST_INT)
- *total = COSTS_N_INSNS (ix86_cost->shift_const);
+ *total = ix86_cost->shift_const;
else
- *total = COSTS_N_INSNS (ix86_cost->shift_var);
+ *total = ix86_cost->shift_var;
}
return false;
case MULT:
if (FLOAT_MODE_P (mode))
{
- *total = COSTS_N_INSNS (ix86_cost->fmul);
+ *total = ix86_cost->fmul;
return false;
}
else
op0 = XEXP (op0, 0), mode = GET_MODE (op0);
}
- *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)]
- + nbits * ix86_cost->mult_bit)
- + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code);
+ *total = (ix86_cost->mult_init[MODE_INDEX (mode)]
+ + nbits * ix86_cost->mult_bit
+ + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code));
return true;
}
case MOD:
case UMOD:
if (FLOAT_MODE_P (mode))
- *total = COSTS_N_INSNS (ix86_cost->fdiv);
+ *total = ix86_cost->fdiv;
else
- *total = COSTS_N_INSNS (ix86_cost->divide[MODE_INDEX (mode)]);
+ *total = ix86_cost->divide[MODE_INDEX (mode)];
return false;
case PLUS:
if (FLOAT_MODE_P (mode))
- *total = COSTS_N_INSNS (ix86_cost->fadd);
+ *total = ix86_cost->fadd;
else if (GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
{
HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
if (val == 2 || val == 4 || val == 8)
{
- *total = COSTS_N_INSNS (ix86_cost->lea);
+ *total = ix86_cost->lea;
*total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
*total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
outer_code);
HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
if (val == 2 || val == 4 || val == 8)
{
- *total = COSTS_N_INSNS (ix86_cost->lea);
+ *total = ix86_cost->lea;
*total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
*total += rtx_cost (XEXP (x, 1), outer_code);
return true;
}
else if (GET_CODE (XEXP (x, 0)) == PLUS)
{
- *total = COSTS_N_INSNS (ix86_cost->lea);
+ *total = ix86_cost->lea;
*total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
*total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
*total += rtx_cost (XEXP (x, 1), outer_code);
case MINUS:
if (FLOAT_MODE_P (mode))
{
- *total = COSTS_N_INSNS (ix86_cost->fadd);
+ *total = ix86_cost->fadd;
return false;
}
/* FALLTHRU */
case XOR:
if (!TARGET_64BIT && mode == DImode)
{
- *total = (COSTS_N_INSNS (ix86_cost->add) * 2
+ *total = (ix86_cost->add * 2
+ (rtx_cost (XEXP (x, 0), outer_code)
<< (GET_MODE (XEXP (x, 0)) != DImode))
+ (rtx_cost (XEXP (x, 1), outer_code)
case NEG:
if (FLOAT_MODE_P (mode))
{
- *total = COSTS_N_INSNS (ix86_cost->fchs);
+ *total = ix86_cost->fchs;
return false;
}
/* FALLTHRU */
case NOT:
if (!TARGET_64BIT && mode == DImode)
- *total = COSTS_N_INSNS (ix86_cost->add * 2);
+ *total = ix86_cost->add * 2;
else
- *total = COSTS_N_INSNS (ix86_cost->add);
+ *total = ix86_cost->add;
return false;
case COMPARE:
{
/* This kind of construct is implemented using test[bwl].
Treat it as if we had an AND. */
- *total = (COSTS_N_INSNS (ix86_cost->add)
+ *total = (ix86_cost->add
+ rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
+ rtx_cost (const1_rtx, outer_code));
return true;
case ABS:
if (FLOAT_MODE_P (mode))
- *total = COSTS_N_INSNS (ix86_cost->fabs);
+ *total = ix86_cost->fabs;
return false;
case SQRT:
if (FLOAT_MODE_P (mode))
- *total = COSTS_N_INSNS (ix86_cost->fsqrt);
+ *total = ix86_cost->fsqrt;
return false;
case UNSPEC:
sprintf (lazy_ptr_name, "L%d$lz", label);
if (MACHOPIC_PURE)
- machopic_picsymbol_stub_section ();
+ switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
else
- machopic_symbol_stub_section ();
+ switch_to_section (darwin_sections[machopic_symbol_stub_section]);
fprintf (file, "%s:\n", stub);
fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
{
fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%eax\n", label, label);
fprintf (file, "\tmovl %s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
- fprintf (file, "\tjmp %%edx\n");
+ fprintf (file, "\tjmp *%%edx\n");
}
else
fprintf (file, "\tjmp *%s\n", lazy_ptr_name);
fprintf (file, "\tjmp dyld_stub_binding_helper\n");
- machopic_lazy_symbol_ptr_section ();
+ switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
fprintf (file, "%s:\n", lazy_ptr_name);
fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
fprintf (file, "\t.long %s\n", binder_name);
}
+
+void
+darwin_x86_file_end (void)
+{
+ darwin_file_end ();
+ ix86_file_end ();
+}
#endif /* TARGET_MACHO */
/* Order the registers for register allocator. */
#endif /* TARGET_MACHO */
{
tmp = gen_rtx_REG (SImode, 2 /* ECX */);
- output_set_got (tmp);
+ output_set_got (tmp, NULL_RTX);
xops[1] = tmp;
output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
static void
ix86_reorg (void)
{
- if (TARGET_ATHLON_K8 && optimize && !optimize_size)
+ if (TARGET_PAD_RETURNS && optimize && !optimize_size)
ix86_pad_returns ();
if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
ix86_avoid_jump_misspredicts ();
const_vec = copy_rtx (vals);
XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
+ const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
switch (mode)
{
emit_insn (fn (dest, tmp2, tmp3));
}
\f
+/* Target hook for scalar_mode_supported_p. */
+static bool
+ix86_scalar_mode_supported_p (enum machine_mode mode)
+{
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ return true;
+ else
+ return default_scalar_mode_supported_p (mode);
+}
+
/* Implements target hook vector_mode_supported_p. */
static bool
ix86_vector_mode_supported_p (enum machine_mode mode)
emit_label (label2);
}
-/* Solaris named-section hook. Parameters are as for
- named_section_real. */
+/* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
static void
i386_solaris_elf_named_section (const char *name, unsigned int flags,