1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "langhooks.h"
50 #include "tree-gimple.h"
52 #include "tm-constrs.h"
55 #ifndef CHECK_STACK_LIMIT
56 #define CHECK_STACK_LIMIT (-1)
59 /* Return index of given mode in mult and division cost tables. */
60 #define MODE_INDEX(mode) \
61 ((mode) == QImode ? 0 \
62 : (mode) == HImode ? 1 \
63 : (mode) == SImode ? 2 \
64 : (mode) == DImode ? 3 \
67 /* Processor costs (relative to an add) */
68 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
69 #define COSTS_N_BYTES(N) ((N) * 2)
71 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
74 struct processor_costs size_cost = { /* costs for tuning for size */
75 COSTS_N_BYTES (2), /* cost of an add instruction */
76 COSTS_N_BYTES (3), /* cost of a lea instruction */
77 COSTS_N_BYTES (2), /* variable shift costs */
78 COSTS_N_BYTES (3), /* constant shift costs */
79 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
80 COSTS_N_BYTES (3), /* HI */
81 COSTS_N_BYTES (3), /* SI */
82 COSTS_N_BYTES (3), /* DI */
83 COSTS_N_BYTES (5)}, /* other */
84 0, /* cost of multiply per each bit set */
85 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 COSTS_N_BYTES (3), /* cost of movsx */
91 COSTS_N_BYTES (3), /* cost of movzx */
94 2, /* cost for loading QImode using movzbl */
95 {2, 2, 2}, /* cost of loading integer registers
96 in QImode, HImode and SImode.
97 Relative to reg-reg move (2). */
98 {2, 2, 2}, /* cost of storing integer registers */
99 2, /* cost of reg,reg fld/fst */
100 {2, 2, 2}, /* cost of loading fp registers
101 in SFmode, DFmode and XFmode */
102 {2, 2, 2}, /* cost of storing fp registers
103 in SFmode, DFmode and XFmode */
104 3, /* cost of moving MMX register */
105 {3, 3}, /* cost of loading MMX registers
106 in SImode and DImode */
107 {3, 3}, /* cost of storing MMX registers
108 in SImode and DImode */
109 3, /* cost of moving SSE register */
110 {3, 3, 3}, /* cost of loading SSE registers
111 in SImode, DImode and TImode */
112 {3, 3, 3}, /* cost of storing SSE registers
113 in SImode, DImode and TImode */
114 3, /* MMX or SSE register to integer */
115 0, /* size of prefetch block */
116 0, /* number of parallel prefetches */
118 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
119 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
120 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
121 COSTS_N_BYTES (2), /* cost of FABS instruction. */
122 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
123 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
124 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
125 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
126 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
127 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}}
130 /* Processor costs (relative to an add) */
132 struct processor_costs i386_cost = { /* 386 specific costs */
133 COSTS_N_INSNS (1), /* cost of an add instruction */
134 COSTS_N_INSNS (1), /* cost of a lea instruction */
135 COSTS_N_INSNS (3), /* variable shift costs */
136 COSTS_N_INSNS (2), /* constant shift costs */
137 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
138 COSTS_N_INSNS (6), /* HI */
139 COSTS_N_INSNS (6), /* SI */
140 COSTS_N_INSNS (6), /* DI */
141 COSTS_N_INSNS (6)}, /* other */
142 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
143 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
144 COSTS_N_INSNS (23), /* HI */
145 COSTS_N_INSNS (23), /* SI */
146 COSTS_N_INSNS (23), /* DI */
147 COSTS_N_INSNS (23)}, /* other */
148 COSTS_N_INSNS (3), /* cost of movsx */
149 COSTS_N_INSNS (2), /* cost of movzx */
150 15, /* "large" insn */
152 4, /* cost for loading QImode using movzbl */
153 {2, 4, 2}, /* cost of loading integer registers
154 in QImode, HImode and SImode.
155 Relative to reg-reg move (2). */
156 {2, 4, 2}, /* cost of storing integer registers */
157 2, /* cost of reg,reg fld/fst */
158 {8, 8, 8}, /* cost of loading fp registers
159 in SFmode, DFmode and XFmode */
160 {8, 8, 8}, /* cost of storing fp registers
161 in SFmode, DFmode and XFmode */
162 2, /* cost of moving MMX register */
163 {4, 8}, /* cost of loading MMX registers
164 in SImode and DImode */
165 {4, 8}, /* cost of storing MMX registers
166 in SImode and DImode */
167 2, /* cost of moving SSE register */
168 {4, 8, 16}, /* cost of loading SSE registers
169 in SImode, DImode and TImode */
170 {4, 8, 16}, /* cost of storing SSE registers
171 in SImode, DImode and TImode */
172 3, /* MMX or SSE register to integer */
173 0, /* size of prefetch block */
174 0, /* number of parallel prefetches */
176 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
177 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
178 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
179 COSTS_N_INSNS (22), /* cost of FABS instruction. */
180 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
181 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
182 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
183 DUMMY_STRINGOP_ALGS},
184 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
185 DUMMY_STRINGOP_ALGS},
189 struct processor_costs i486_cost = { /* 486 specific costs */
190 COSTS_N_INSNS (1), /* cost of an add instruction */
191 COSTS_N_INSNS (1), /* cost of a lea instruction */
192 COSTS_N_INSNS (3), /* variable shift costs */
193 COSTS_N_INSNS (2), /* constant shift costs */
194 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
195 COSTS_N_INSNS (12), /* HI */
196 COSTS_N_INSNS (12), /* SI */
197 COSTS_N_INSNS (12), /* DI */
198 COSTS_N_INSNS (12)}, /* other */
199 1, /* cost of multiply per each bit set */
200 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
201 COSTS_N_INSNS (40), /* HI */
202 COSTS_N_INSNS (40), /* SI */
203 COSTS_N_INSNS (40), /* DI */
204 COSTS_N_INSNS (40)}, /* other */
205 COSTS_N_INSNS (3), /* cost of movsx */
206 COSTS_N_INSNS (2), /* cost of movzx */
207 15, /* "large" insn */
209 4, /* cost for loading QImode using movzbl */
210 {2, 4, 2}, /* cost of loading integer registers
211 in QImode, HImode and SImode.
212 Relative to reg-reg move (2). */
213 {2, 4, 2}, /* cost of storing integer registers */
214 2, /* cost of reg,reg fld/fst */
215 {8, 8, 8}, /* cost of loading fp registers
216 in SFmode, DFmode and XFmode */
217 {8, 8, 8}, /* cost of storing fp registers
218 in SFmode, DFmode and XFmode */
219 2, /* cost of moving MMX register */
220 {4, 8}, /* cost of loading MMX registers
221 in SImode and DImode */
222 {4, 8}, /* cost of storing MMX registers
223 in SImode and DImode */
224 2, /* cost of moving SSE register */
225 {4, 8, 16}, /* cost of loading SSE registers
226 in SImode, DImode and TImode */
227 {4, 8, 16}, /* cost of storing SSE registers
228 in SImode, DImode and TImode */
229 3, /* MMX or SSE register to integer */
230 0, /* size of prefetch block */
231 0, /* number of parallel prefetches */
233 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
234 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
235 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
236 COSTS_N_INSNS (3), /* cost of FABS instruction. */
237 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
238 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
239 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
240 DUMMY_STRINGOP_ALGS},
241 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
246 struct processor_costs pentium_cost = {
247 COSTS_N_INSNS (1), /* cost of an add instruction */
248 COSTS_N_INSNS (1), /* cost of a lea instruction */
249 COSTS_N_INSNS (4), /* variable shift costs */
250 COSTS_N_INSNS (1), /* constant shift costs */
251 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
252 COSTS_N_INSNS (11), /* HI */
253 COSTS_N_INSNS (11), /* SI */
254 COSTS_N_INSNS (11), /* DI */
255 COSTS_N_INSNS (11)}, /* other */
256 0, /* cost of multiply per each bit set */
257 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
258 COSTS_N_INSNS (25), /* HI */
259 COSTS_N_INSNS (25), /* SI */
260 COSTS_N_INSNS (25), /* DI */
261 COSTS_N_INSNS (25)}, /* other */
262 COSTS_N_INSNS (3), /* cost of movsx */
263 COSTS_N_INSNS (2), /* cost of movzx */
264 8, /* "large" insn */
266 6, /* cost for loading QImode using movzbl */
267 {2, 4, 2}, /* cost of loading integer registers
268 in QImode, HImode and SImode.
269 Relative to reg-reg move (2). */
270 {2, 4, 2}, /* cost of storing integer registers */
271 2, /* cost of reg,reg fld/fst */
272 {2, 2, 6}, /* cost of loading fp registers
273 in SFmode, DFmode and XFmode */
274 {4, 4, 6}, /* cost of storing fp registers
275 in SFmode, DFmode and XFmode */
276 8, /* cost of moving MMX register */
277 {8, 8}, /* cost of loading MMX registers
278 in SImode and DImode */
279 {8, 8}, /* cost of storing MMX registers
280 in SImode and DImode */
281 2, /* cost of moving SSE register */
282 {4, 8, 16}, /* cost of loading SSE registers
283 in SImode, DImode and TImode */
284 {4, 8, 16}, /* cost of storing SSE registers
285 in SImode, DImode and TImode */
286 3, /* MMX or SSE register to integer */
287 0, /* size of prefetch block */
288 0, /* number of parallel prefetches */
290 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
291 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
292 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
293 COSTS_N_INSNS (1), /* cost of FABS instruction. */
294 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
295 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
296 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
297 DUMMY_STRINGOP_ALGS},
298 {{libcall, {{-1, rep_prefix_4_byte}}},
303 struct processor_costs pentiumpro_cost = {
304 COSTS_N_INSNS (1), /* cost of an add instruction */
305 COSTS_N_INSNS (1), /* cost of a lea instruction */
306 COSTS_N_INSNS (1), /* variable shift costs */
307 COSTS_N_INSNS (1), /* constant shift costs */
308 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
309 COSTS_N_INSNS (4), /* HI */
310 COSTS_N_INSNS (4), /* SI */
311 COSTS_N_INSNS (4), /* DI */
312 COSTS_N_INSNS (4)}, /* other */
313 0, /* cost of multiply per each bit set */
314 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
315 COSTS_N_INSNS (17), /* HI */
316 COSTS_N_INSNS (17), /* SI */
317 COSTS_N_INSNS (17), /* DI */
318 COSTS_N_INSNS (17)}, /* other */
319 COSTS_N_INSNS (1), /* cost of movsx */
320 COSTS_N_INSNS (1), /* cost of movzx */
321 8, /* "large" insn */
323 2, /* cost for loading QImode using movzbl */
324 {4, 4, 4}, /* cost of loading integer registers
325 in QImode, HImode and SImode.
326 Relative to reg-reg move (2). */
327 {2, 2, 2}, /* cost of storing integer registers */
328 2, /* cost of reg,reg fld/fst */
329 {2, 2, 6}, /* cost of loading fp registers
330 in SFmode, DFmode and XFmode */
331 {4, 4, 6}, /* cost of storing fp registers
332 in SFmode, DFmode and XFmode */
333 2, /* cost of moving MMX register */
334 {2, 2}, /* cost of loading MMX registers
335 in SImode and DImode */
336 {2, 2}, /* cost of storing MMX registers
337 in SImode and DImode */
338 2, /* cost of moving SSE register */
339 {2, 2, 8}, /* cost of loading SSE registers
340 in SImode, DImode and TImode */
341 {2, 2, 8}, /* cost of storing SSE registers
342 in SImode, DImode and TImode */
343 3, /* MMX or SSE register to integer */
344 32, /* size of prefetch block */
345 6, /* number of parallel prefetches */
347 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
348 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
349 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
350 COSTS_N_INSNS (2), /* cost of FABS instruction. */
351 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
352 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
353 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
354 the alignment). For small blocks inline loop is still a noticeable win, for bigger
355 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
356 more expensive startup time in CPU, but after 4K the difference is down in the noise.
358 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
359 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
360 DUMMY_STRINGOP_ALGS},
361 {{rep_prefix_4_byte, {{1024, unrolled_loop},
362 {8192, rep_prefix_4_byte}, {-1, libcall}}},
367 struct processor_costs geode_cost = {
368 COSTS_N_INSNS (1), /* cost of an add instruction */
369 COSTS_N_INSNS (1), /* cost of a lea instruction */
370 COSTS_N_INSNS (2), /* variable shift costs */
371 COSTS_N_INSNS (1), /* constant shift costs */
372 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
373 COSTS_N_INSNS (4), /* HI */
374 COSTS_N_INSNS (7), /* SI */
375 COSTS_N_INSNS (7), /* DI */
376 COSTS_N_INSNS (7)}, /* other */
377 0, /* cost of multiply per each bit set */
378 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
379 COSTS_N_INSNS (23), /* HI */
380 COSTS_N_INSNS (39), /* SI */
381 COSTS_N_INSNS (39), /* DI */
382 COSTS_N_INSNS (39)}, /* other */
383 COSTS_N_INSNS (1), /* cost of movsx */
384 COSTS_N_INSNS (1), /* cost of movzx */
385 8, /* "large" insn */
387 1, /* cost for loading QImode using movzbl */
388 {1, 1, 1}, /* cost of loading integer registers
389 in QImode, HImode and SImode.
390 Relative to reg-reg move (2). */
391 {1, 1, 1}, /* cost of storing integer registers */
392 1, /* cost of reg,reg fld/fst */
393 {1, 1, 1}, /* cost of loading fp registers
394 in SFmode, DFmode and XFmode */
395 {4, 6, 6}, /* cost of storing fp registers
396 in SFmode, DFmode and XFmode */
398 1, /* cost of moving MMX register */
399 {1, 1}, /* cost of loading MMX registers
400 in SImode and DImode */
401 {1, 1}, /* cost of storing MMX registers
402 in SImode and DImode */
403 1, /* cost of moving SSE register */
404 {1, 1, 1}, /* cost of loading SSE registers
405 in SImode, DImode and TImode */
406 {1, 1, 1}, /* cost of storing SSE registers
407 in SImode, DImode and TImode */
408 1, /* MMX or SSE register to integer */
409 32, /* size of prefetch block */
410 1, /* number of parallel prefetches */
412 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
413 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
414 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
415 COSTS_N_INSNS (1), /* cost of FABS instruction. */
416 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
417 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
418 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
419 DUMMY_STRINGOP_ALGS},
420 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
425 struct processor_costs k6_cost = {
426 COSTS_N_INSNS (1), /* cost of an add instruction */
427 COSTS_N_INSNS (2), /* cost of a lea instruction */
428 COSTS_N_INSNS (1), /* variable shift costs */
429 COSTS_N_INSNS (1), /* constant shift costs */
430 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
431 COSTS_N_INSNS (3), /* HI */
432 COSTS_N_INSNS (3), /* SI */
433 COSTS_N_INSNS (3), /* DI */
434 COSTS_N_INSNS (3)}, /* other */
435 0, /* cost of multiply per each bit set */
436 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
437 COSTS_N_INSNS (18), /* HI */
438 COSTS_N_INSNS (18), /* SI */
439 COSTS_N_INSNS (18), /* DI */
440 COSTS_N_INSNS (18)}, /* other */
441 COSTS_N_INSNS (2), /* cost of movsx */
442 COSTS_N_INSNS (2), /* cost of movzx */
443 8, /* "large" insn */
445 3, /* cost for loading QImode using movzbl */
446 {4, 5, 4}, /* cost of loading integer registers
447 in QImode, HImode and SImode.
448 Relative to reg-reg move (2). */
449 {2, 3, 2}, /* cost of storing integer registers */
450 4, /* cost of reg,reg fld/fst */
451 {6, 6, 6}, /* cost of loading fp registers
452 in SFmode, DFmode and XFmode */
453 {4, 4, 4}, /* cost of storing fp registers
454 in SFmode, DFmode and XFmode */
455 2, /* cost of moving MMX register */
456 {2, 2}, /* cost of loading MMX registers
457 in SImode and DImode */
458 {2, 2}, /* cost of storing MMX registers
459 in SImode and DImode */
460 2, /* cost of moving SSE register */
461 {2, 2, 8}, /* cost of loading SSE registers
462 in SImode, DImode and TImode */
463 {2, 2, 8}, /* cost of storing SSE registers
464 in SImode, DImode and TImode */
465 6, /* MMX or SSE register to integer */
466 32, /* size of prefetch block */
467 1, /* number of parallel prefetches */
469 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
470 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
471 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
472 COSTS_N_INSNS (2), /* cost of FABS instruction. */
473 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
474 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
475 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
476 DUMMY_STRINGOP_ALGS},
477 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
482 struct processor_costs athlon_cost = {
483 COSTS_N_INSNS (1), /* cost of an add instruction */
484 COSTS_N_INSNS (2), /* cost of a lea instruction */
485 COSTS_N_INSNS (1), /* variable shift costs */
486 COSTS_N_INSNS (1), /* constant shift costs */
487 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
488 COSTS_N_INSNS (5), /* HI */
489 COSTS_N_INSNS (5), /* SI */
490 COSTS_N_INSNS (5), /* DI */
491 COSTS_N_INSNS (5)}, /* other */
492 0, /* cost of multiply per each bit set */
493 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
494 COSTS_N_INSNS (26), /* HI */
495 COSTS_N_INSNS (42), /* SI */
496 COSTS_N_INSNS (74), /* DI */
497 COSTS_N_INSNS (74)}, /* other */
498 COSTS_N_INSNS (1), /* cost of movsx */
499 COSTS_N_INSNS (1), /* cost of movzx */
500 8, /* "large" insn */
502 4, /* cost for loading QImode using movzbl */
503 {3, 4, 3}, /* cost of loading integer registers
504 in QImode, HImode and SImode.
505 Relative to reg-reg move (2). */
506 {3, 4, 3}, /* cost of storing integer registers */
507 4, /* cost of reg,reg fld/fst */
508 {4, 4, 12}, /* cost of loading fp registers
509 in SFmode, DFmode and XFmode */
510 {6, 6, 8}, /* cost of storing fp registers
511 in SFmode, DFmode and XFmode */
512 2, /* cost of moving MMX register */
513 {4, 4}, /* cost of loading MMX registers
514 in SImode and DImode */
515 {4, 4}, /* cost of storing MMX registers
516 in SImode and DImode */
517 2, /* cost of moving SSE register */
518 {4, 4, 6}, /* cost of loading SSE registers
519 in SImode, DImode and TImode */
520 {4, 4, 5}, /* cost of storing SSE registers
521 in SImode, DImode and TImode */
522 5, /* MMX or SSE register to integer */
523 64, /* size of prefetch block */
524 6, /* number of parallel prefetches */
526 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
527 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
528 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
529 COSTS_N_INSNS (2), /* cost of FABS instruction. */
530 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
531 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
532 /* For some reason, Athlon deals better with REP prefix (relative to loops)
533 compared to K8. Alignment becomes important after 8 bytes for memcpy and
534 128 bytes for memset. */
535 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
536 DUMMY_STRINGOP_ALGS},
537 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
542 struct processor_costs k8_cost = {
543 COSTS_N_INSNS (1), /* cost of an add instruction */
544 COSTS_N_INSNS (2), /* cost of a lea instruction */
545 COSTS_N_INSNS (1), /* variable shift costs */
546 COSTS_N_INSNS (1), /* constant shift costs */
547 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
548 COSTS_N_INSNS (4), /* HI */
549 COSTS_N_INSNS (3), /* SI */
550 COSTS_N_INSNS (4), /* DI */
551 COSTS_N_INSNS (5)}, /* other */
552 0, /* cost of multiply per each bit set */
553 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
554 COSTS_N_INSNS (26), /* HI */
555 COSTS_N_INSNS (42), /* SI */
556 COSTS_N_INSNS (74), /* DI */
557 COSTS_N_INSNS (74)}, /* other */
558 COSTS_N_INSNS (1), /* cost of movsx */
559 COSTS_N_INSNS (1), /* cost of movzx */
560 8, /* "large" insn */
562 4, /* cost for loading QImode using movzbl */
563 {3, 4, 3}, /* cost of loading integer registers
564 in QImode, HImode and SImode.
565 Relative to reg-reg move (2). */
566 {3, 4, 3}, /* cost of storing integer registers */
567 4, /* cost of reg,reg fld/fst */
568 {4, 4, 12}, /* cost of loading fp registers
569 in SFmode, DFmode and XFmode */
570 {6, 6, 8}, /* cost of storing fp registers
571 in SFmode, DFmode and XFmode */
572 2, /* cost of moving MMX register */
573 {3, 3}, /* cost of loading MMX registers
574 in SImode and DImode */
575 {4, 4}, /* cost of storing MMX registers
576 in SImode and DImode */
577 2, /* cost of moving SSE register */
578 {4, 3, 6}, /* cost of loading SSE registers
579 in SImode, DImode and TImode */
580 {4, 4, 5}, /* cost of storing SSE registers
581 in SImode, DImode and TImode */
582 5, /* MMX or SSE register to integer */
583 64, /* size of prefetch block */
584 /* New AMD processors never drop prefetches; if they cannot be performed
585 immediately, they are queued. We set number of simultaneous prefetches
586 to a large constant to reflect this (it probably is not a good idea not
587 to limit number of prefetches at all, as their execution also takes some
589 100, /* number of parallel prefetches */
591 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
592 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
593 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
594 COSTS_N_INSNS (2), /* cost of FABS instruction. */
595 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
596 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
597 /* K8 has optimized REP instruction for medium sized blocks, but for very small
598 blocks it is better to use loop. For large blocks, libcall can do
599 nontemporary accesses and beat inline considerably. */
600 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
601 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
602 {{libcall, {{8, loop}, {24, unrolled_loop},
603 {2048, rep_prefix_4_byte}, {-1, libcall}}},
604 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}
607 struct processor_costs amdfam10_cost = {
608 COSTS_N_INSNS (1), /* cost of an add instruction */
609 COSTS_N_INSNS (2), /* cost of a lea instruction */
610 COSTS_N_INSNS (1), /* variable shift costs */
611 COSTS_N_INSNS (1), /* constant shift costs */
612 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
613 COSTS_N_INSNS (4), /* HI */
614 COSTS_N_INSNS (3), /* SI */
615 COSTS_N_INSNS (4), /* DI */
616 COSTS_N_INSNS (5)}, /* other */
617 0, /* cost of multiply per each bit set */
618 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
619 COSTS_N_INSNS (35), /* HI */
620 COSTS_N_INSNS (51), /* SI */
621 COSTS_N_INSNS (83), /* DI */
622 COSTS_N_INSNS (83)}, /* other */
623 COSTS_N_INSNS (1), /* cost of movsx */
624 COSTS_N_INSNS (1), /* cost of movzx */
625 8, /* "large" insn */
627 4, /* cost for loading QImode using movzbl */
628 {3, 4, 3}, /* cost of loading integer registers
629 in QImode, HImode and SImode.
630 Relative to reg-reg move (2). */
631 {3, 4, 3}, /* cost of storing integer registers */
632 4, /* cost of reg,reg fld/fst */
633 {4, 4, 12}, /* cost of loading fp registers
634 in SFmode, DFmode and XFmode */
635 {6, 6, 8}, /* cost of storing fp registers
636 in SFmode, DFmode and XFmode */
637 2, /* cost of moving MMX register */
638 {3, 3}, /* cost of loading MMX registers
639 in SImode and DImode */
640 {4, 4}, /* cost of storing MMX registers
641 in SImode and DImode */
642 2, /* cost of moving SSE register */
643 {4, 4, 3}, /* cost of loading SSE registers
644 in SImode, DImode and TImode */
645 {4, 4, 5}, /* cost of storing SSE registers
646 in SImode, DImode and TImode */
647 3, /* MMX or SSE register to integer */
649 MOVD reg64, xmmreg Double FSTORE 4
650 MOVD reg32, xmmreg Double FSTORE 4
652 MOVD reg64, xmmreg Double FADD 3
654 MOVD reg32, xmmreg Double FADD 3
656 64, /* size of prefetch block */
657 /* New AMD processors never drop prefetches; if they cannot be performed
658 immediately, they are queued. We set number of simultaneous prefetches
659 to a large constant to reflect this (it probably is not a good idea not
660 to limit number of prefetches at all, as their execution also takes some
662 100, /* number of parallel prefetches */
664 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
665 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
666 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
667 COSTS_N_INSNS (2), /* cost of FABS instruction. */
668 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
669 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
671 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
672 very small blocks it is better to use loop. For large blocks, libcall can
673 do nontemporary accesses and beat inline considerably. */
674 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
675 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
676 {{libcall, {{8, loop}, {24, unrolled_loop},
677 {2048, rep_prefix_4_byte}, {-1, libcall}}},
678 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}
682 struct processor_costs pentium4_cost = {
683 COSTS_N_INSNS (1), /* cost of an add instruction */
684 COSTS_N_INSNS (3), /* cost of a lea instruction */
685 COSTS_N_INSNS (4), /* variable shift costs */
686 COSTS_N_INSNS (4), /* constant shift costs */
687 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
688 COSTS_N_INSNS (15), /* HI */
689 COSTS_N_INSNS (15), /* SI */
690 COSTS_N_INSNS (15), /* DI */
691 COSTS_N_INSNS (15)}, /* other */
692 0, /* cost of multiply per each bit set */
693 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
694 COSTS_N_INSNS (56), /* HI */
695 COSTS_N_INSNS (56), /* SI */
696 COSTS_N_INSNS (56), /* DI */
697 COSTS_N_INSNS (56)}, /* other */
698 COSTS_N_INSNS (1), /* cost of movsx */
699 COSTS_N_INSNS (1), /* cost of movzx */
700 16, /* "large" insn */
702 2, /* cost for loading QImode using movzbl */
703 {4, 5, 4}, /* cost of loading integer registers
704 in QImode, HImode and SImode.
705 Relative to reg-reg move (2). */
706 {2, 3, 2}, /* cost of storing integer registers */
707 2, /* cost of reg,reg fld/fst */
708 {2, 2, 6}, /* cost of loading fp registers
709 in SFmode, DFmode and XFmode */
710 {4, 4, 6}, /* cost of storing fp registers
711 in SFmode, DFmode and XFmode */
712 2, /* cost of moving MMX register */
713 {2, 2}, /* cost of loading MMX registers
714 in SImode and DImode */
715 {2, 2}, /* cost of storing MMX registers
716 in SImode and DImode */
717 12, /* cost of moving SSE register */
718 {12, 12, 12}, /* cost of loading SSE registers
719 in SImode, DImode and TImode */
720 {2, 2, 8}, /* cost of storing SSE registers
721 in SImode, DImode and TImode */
722 10, /* MMX or SSE register to integer */
723 64, /* size of prefetch block */
724 6, /* number of parallel prefetches */
726 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
727 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
728 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
729 COSTS_N_INSNS (2), /* cost of FABS instruction. */
730 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
731 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
732 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
733 DUMMY_STRINGOP_ALGS},
734 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
736 DUMMY_STRINGOP_ALGS},
740 struct processor_costs nocona_cost = {
741 COSTS_N_INSNS (1), /* cost of an add instruction */
742 COSTS_N_INSNS (1), /* cost of a lea instruction */
743 COSTS_N_INSNS (1), /* variable shift costs */
744 COSTS_N_INSNS (1), /* constant shift costs */
745 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
746 COSTS_N_INSNS (10), /* HI */
747 COSTS_N_INSNS (10), /* SI */
748 COSTS_N_INSNS (10), /* DI */
749 COSTS_N_INSNS (10)}, /* other */
750 0, /* cost of multiply per each bit set */
751 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
752 COSTS_N_INSNS (66), /* HI */
753 COSTS_N_INSNS (66), /* SI */
754 COSTS_N_INSNS (66), /* DI */
755 COSTS_N_INSNS (66)}, /* other */
756 COSTS_N_INSNS (1), /* cost of movsx */
757 COSTS_N_INSNS (1), /* cost of movzx */
758 16, /* "large" insn */
760 4, /* cost for loading QImode using movzbl */
761 {4, 4, 4}, /* cost of loading integer registers
762 in QImode, HImode and SImode.
763 Relative to reg-reg move (2). */
764 {4, 4, 4}, /* cost of storing integer registers */
765 3, /* cost of reg,reg fld/fst */
766 {12, 12, 12}, /* cost of loading fp registers
767 in SFmode, DFmode and XFmode */
768 {4, 4, 4}, /* cost of storing fp registers
769 in SFmode, DFmode and XFmode */
770 6, /* cost of moving MMX register */
771 {12, 12}, /* cost of loading MMX registers
772 in SImode and DImode */
773 {12, 12}, /* cost of storing MMX registers
774 in SImode and DImode */
775 6, /* cost of moving SSE register */
776 {12, 12, 12}, /* cost of loading SSE registers
777 in SImode, DImode and TImode */
778 {12, 12, 12}, /* cost of storing SSE registers
779 in SImode, DImode and TImode */
780 8, /* MMX or SSE register to integer */
781 128, /* size of prefetch block */
782 8, /* number of parallel prefetches */
784 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
785 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
786 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
787 COSTS_N_INSNS (3), /* cost of FABS instruction. */
788 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
789 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
790 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
791 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
792 {100000, unrolled_loop}, {-1, libcall}}}},
793 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
795 {libcall, {{24, loop}, {64, unrolled_loop},
796 {8192, rep_prefix_8_byte}, {-1, libcall}}}}
800 struct processor_costs core2_cost = {
801 COSTS_N_INSNS (1), /* cost of an add instruction */
802 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
803 COSTS_N_INSNS (1), /* variable shift costs */
804 COSTS_N_INSNS (1), /* constant shift costs */
805 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
806 COSTS_N_INSNS (3), /* HI */
807 COSTS_N_INSNS (3), /* SI */
808 COSTS_N_INSNS (3), /* DI */
809 COSTS_N_INSNS (3)}, /* other */
810 0, /* cost of multiply per each bit set */
811 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
812 COSTS_N_INSNS (22), /* HI */
813 COSTS_N_INSNS (22), /* SI */
814 COSTS_N_INSNS (22), /* DI */
815 COSTS_N_INSNS (22)}, /* other */
816 COSTS_N_INSNS (1), /* cost of movsx */
817 COSTS_N_INSNS (1), /* cost of movzx */
818 8, /* "large" insn */
820 2, /* cost for loading QImode using movzbl */
821 {6, 6, 6}, /* cost of loading integer registers
822 in QImode, HImode and SImode.
823 Relative to reg-reg move (2). */
824 {4, 4, 4}, /* cost of storing integer registers */
825 2, /* cost of reg,reg fld/fst */
826 {6, 6, 6}, /* cost of loading fp registers
827 in SFmode, DFmode and XFmode */
828 {4, 4, 4}, /* cost of loading integer registers */
829 2, /* cost of moving MMX register */
830 {6, 6}, /* cost of loading MMX registers
831 in SImode and DImode */
832 {4, 4}, /* cost of storing MMX registers
833 in SImode and DImode */
834 2, /* cost of moving SSE register */
835 {6, 6, 6}, /* cost of loading SSE registers
836 in SImode, DImode and TImode */
837 {4, 4, 4}, /* cost of storing SSE registers
838 in SImode, DImode and TImode */
839 2, /* MMX or SSE register to integer */
840 128, /* size of prefetch block */
841 8, /* number of parallel prefetches */
843 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
844 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
845 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
846 COSTS_N_INSNS (1), /* cost of FABS instruction. */
847 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
848 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
849 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
850 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
851 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
852 {{libcall, {{8, loop}, {15, unrolled_loop},
853 {2048, rep_prefix_4_byte}, {-1, libcall}}},
854 {libcall, {{24, loop}, {32, unrolled_loop},
855 {8192, rep_prefix_8_byte}, {-1, libcall}}}}
858 /* Generic64 should produce code tuned for Nocona and K8. */
860 struct processor_costs generic64_cost = {
861 COSTS_N_INSNS (1), /* cost of an add instruction */
862 /* On all chips taken into consideration lea is 2 cycles and more. With
863 this cost however our current implementation of synth_mult results in
864 use of unnecessary temporary registers causing regression on several
865 SPECfp benchmarks. */
866 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
867 COSTS_N_INSNS (1), /* variable shift costs */
868 COSTS_N_INSNS (1), /* constant shift costs */
869 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
870 COSTS_N_INSNS (4), /* HI */
871 COSTS_N_INSNS (3), /* SI */
872 COSTS_N_INSNS (4), /* DI */
873 COSTS_N_INSNS (2)}, /* other */
874 0, /* cost of multiply per each bit set */
875 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
876 COSTS_N_INSNS (26), /* HI */
877 COSTS_N_INSNS (42), /* SI */
878 COSTS_N_INSNS (74), /* DI */
879 COSTS_N_INSNS (74)}, /* other */
880 COSTS_N_INSNS (1), /* cost of movsx */
881 COSTS_N_INSNS (1), /* cost of movzx */
882 8, /* "large" insn */
884 4, /* cost for loading QImode using movzbl */
885 {4, 4, 4}, /* cost of loading integer registers
886 in QImode, HImode and SImode.
887 Relative to reg-reg move (2). */
888 {4, 4, 4}, /* cost of storing integer registers */
889 4, /* cost of reg,reg fld/fst */
890 {12, 12, 12}, /* cost of loading fp registers
891 in SFmode, DFmode and XFmode */
892 {6, 6, 8}, /* cost of storing fp registers
893 in SFmode, DFmode and XFmode */
894 2, /* cost of moving MMX register */
895 {8, 8}, /* cost of loading MMX registers
896 in SImode and DImode */
897 {8, 8}, /* cost of storing MMX registers
898 in SImode and DImode */
899 2, /* cost of moving SSE register */
900 {8, 8, 8}, /* cost of loading SSE registers
901 in SImode, DImode and TImode */
902 {8, 8, 8}, /* cost of storing SSE registers
903 in SImode, DImode and TImode */
904 5, /* MMX or SSE register to integer */
905 64, /* size of prefetch block */
906 6, /* number of parallel prefetches */
907 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
908 is increased to perhaps more appropriate value of 5. */
910 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
911 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
912 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
913 COSTS_N_INSNS (8), /* cost of FABS instruction. */
914 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
915 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
916 {DUMMY_STRINGOP_ALGS,
917 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
918 {DUMMY_STRINGOP_ALGS,
919 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}
922 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
924 struct processor_costs generic32_cost = {
925 COSTS_N_INSNS (1), /* cost of an add instruction */
926 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
927 COSTS_N_INSNS (1), /* variable shift costs */
928 COSTS_N_INSNS (1), /* constant shift costs */
929 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
930 COSTS_N_INSNS (4), /* HI */
931 COSTS_N_INSNS (3), /* SI */
932 COSTS_N_INSNS (4), /* DI */
933 COSTS_N_INSNS (2)}, /* other */
934 0, /* cost of multiply per each bit set */
935 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
936 COSTS_N_INSNS (26), /* HI */
937 COSTS_N_INSNS (42), /* SI */
938 COSTS_N_INSNS (74), /* DI */
939 COSTS_N_INSNS (74)}, /* other */
940 COSTS_N_INSNS (1), /* cost of movsx */
941 COSTS_N_INSNS (1), /* cost of movzx */
942 8, /* "large" insn */
944 4, /* cost for loading QImode using movzbl */
945 {4, 4, 4}, /* cost of loading integer registers
946 in QImode, HImode and SImode.
947 Relative to reg-reg move (2). */
948 {4, 4, 4}, /* cost of storing integer registers */
949 4, /* cost of reg,reg fld/fst */
950 {12, 12, 12}, /* cost of loading fp registers
951 in SFmode, DFmode and XFmode */
952 {6, 6, 8}, /* cost of storing fp registers
953 in SFmode, DFmode and XFmode */
954 2, /* cost of moving MMX register */
955 {8, 8}, /* cost of loading MMX registers
956 in SImode and DImode */
957 {8, 8}, /* cost of storing MMX registers
958 in SImode and DImode */
959 2, /* cost of moving SSE register */
960 {8, 8, 8}, /* cost of loading SSE registers
961 in SImode, DImode and TImode */
962 {8, 8, 8}, /* cost of storing SSE registers
963 in SImode, DImode and TImode */
964 5, /* MMX or SSE register to integer */
965 64, /* size of prefetch block */
966 6, /* number of parallel prefetches */
968 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
969 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
970 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
971 COSTS_N_INSNS (8), /* cost of FABS instruction. */
972 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
973 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
974 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
975 DUMMY_STRINGOP_ALGS},
976 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
977 DUMMY_STRINGOP_ALGS},
980 const struct processor_costs *ix86_cost = &pentium_cost;
982 /* Processor feature/optimization bitmasks. */
983 #define m_386 (1<<PROCESSOR_I386)
984 #define m_486 (1<<PROCESSOR_I486)
985 #define m_PENT (1<<PROCESSOR_PENTIUM)
986 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
987 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
988 #define m_NOCONA (1<<PROCESSOR_NOCONA)
989 #define m_CORE2 (1<<PROCESSOR_CORE2)
991 #define m_GEODE (1<<PROCESSOR_GEODE)
992 #define m_K6 (1<<PROCESSOR_K6)
993 #define m_K6_GEODE (m_K6 | m_GEODE)
994 #define m_K8 (1<<PROCESSOR_K8)
995 #define m_ATHLON (1<<PROCESSOR_ATHLON)
996 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
997 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
998 #define m_ATHLON_K8_AMDFAM10 (m_K8 | m_ATHLON | m_AMDFAM10)
1000 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1001 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1003 /* Generic instruction choice should be common subset of supported CPUs
1004 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1005 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1007 /* Feature tests against the various tunings. */
1008 unsigned int ix86_tune_features[X86_TUNE_LAST] = {
1009 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1010 negatively, so enabling for Generic64 seems like good code size
1011 tradeoff. We can't enable it for 32bit generic because it does not
1012 work well with PPro base chips. */
1013 m_386 | m_K6_GEODE | m_ATHLON_K8_AMDFAM10 | m_CORE2 | m_GENERIC64,
1015 /* X86_TUNE_PUSH_MEMORY */
1016 m_386 | m_K6_GEODE | m_ATHLON_K8_AMDFAM10 | m_PENT4
1017 | m_NOCONA | m_CORE2 | m_GENERIC,
1019 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1022 /* X86_TUNE_USE_BIT_TEST */
1025 /* X86_TUNE_UNROLL_STRLEN */
1026 m_486 | m_PENT | m_PPRO | m_ATHLON_K8_AMDFAM10 | m_K6 | m_CORE2 | m_GENERIC,
1028 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1029 m_PPRO | m_K6_GEODE | m_ATHLON_K8_AMDFAM10 | m_PENT4
1030 | m_NOCONA | m_CORE2 | m_GENERIC,
1032 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1033 on simulation result. But after P4 was made, no performance benefit
1034 was observed with branch hints. It also increases the code size.
1035 As a result, icc never generates branch hints. */
1038 /* X86_TUNE_DOUBLE_WITH_ADD */
1041 /* X86_TUNE_USE_SAHF */
1042 m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
1043 | m_NOCONA | m_CORE2 | m_GENERIC,
1045 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1046 partial dependencies. */
1047 m_ATHLON_K8_AMDFAM10 | m_PPRO | m_PENT4 | m_NOCONA
1048 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1050 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1051 register stalls on Generic32 compilation setting as well. However
1052 in current implementation the partial register stalls are not eliminated
1053 very well - they can be introduced via subregs synthesized by combine
1054 and can happen in caller/callee saving sequences. Because this option
1055 pays back little on PPro based chips and is in conflict with partial reg
1056 dependencies used by Athlon/P4 based chips, it is better to leave it off
1057 for generic32 for now. */
1060 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1061 m_CORE2 | m_GENERIC,
1063 /* X86_TUNE_USE_HIMODE_FIOP */
1064 m_386 | m_486 | m_K6_GEODE,
1066 /* X86_TUNE_USE_SIMODE_FIOP */
1067 ~(m_PPRO | m_ATHLON_K8_AMDFAM10 | m_PENT | m_CORE2 | m_GENERIC),
1069 /* X86_TUNE_USE_MOV0 */
1072 /* X86_TUNE_USE_CLTD */
1073 ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC),
1075 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1078 /* X86_TUNE_SPLIT_LONG_MOVES */
1081 /* X86_TUNE_READ_MODIFY_WRITE */
1084 /* X86_TUNE_READ_MODIFY */
1087 /* X86_TUNE_PROMOTE_QIMODE */
1088 m_K6_GEODE | m_PENT | m_386 | m_486 | m_ATHLON_K8_AMDFAM10 | m_CORE2
1089 | m_GENERIC /* | m_PENT4 ? */,
1091 /* X86_TUNE_FAST_PREFIX */
1092 ~(m_PENT | m_486 | m_386),
1094 /* X86_TUNE_SINGLE_STRINGOP */
1095 m_386 | m_PENT4 | m_NOCONA,
1097 /* X86_TUNE_QIMODE_MATH */
1100 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1101 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1102 might be considered for Generic32 if our scheme for avoiding partial
1103 stalls was more effective. */
1106 /* X86_TUNE_PROMOTE_QI_REGS */
1109 /* X86_TUNE_PROMOTE_HI_REGS */
1112 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1113 m_ATHLON_K8_AMDFAM10 | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1115 /* X86_TUNE_ADD_ESP_8 */
1116 m_ATHLON_K8_AMDFAM10 | m_PPRO | m_K6_GEODE | m_386
1117 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1119 /* X86_TUNE_SUB_ESP_4 */
1120 m_ATHLON_K8_AMDFAM10 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1122 /* X86_TUNE_SUB_ESP_8 */
1123 m_ATHLON_K8_AMDFAM10 | m_PPRO | m_386 | m_486
1124 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1126 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1127 for DFmode copies */
1128 ~(m_ATHLON_K8_AMDFAM10 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1129 | m_GENERIC | m_GEODE),
1131 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1132 m_ATHLON_K8_AMDFAM10 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1134 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1135 conflict here in between PPro/Pentium4 based chips that thread 128bit
1136 SSE registers as single units versus K8 based chips that divide SSE
1137 registers to two 64bit halves. This knob promotes all store destinations
1138 to be 128bit to allow register renaming on 128bit SSE units, but usually
1139 results in one extra microop on 64bit SSE units. Experimental results
1140 shows that disabling this option on P4 brings over 20% SPECfp regression,
1141 while enabling it on K8 brings roughly 2.4% regression that can be partly
1142 masked by careful scheduling of moves. */
1143 m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_AMDFAM10,
1145 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
1148 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1149 are resolved on SSE register parts instead of whole registers, so we may
1150 maintain just lower part of scalar values in proper format leaving the
1151 upper part undefined. */
1154 /* X86_TUNE_SSE_TYPELESS_STORES */
1155 m_ATHLON_K8_AMDFAM10,
1157 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1158 m_PPRO | m_PENT4 | m_NOCONA,
1160 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1161 m_ATHLON_K8_AMDFAM10 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1163 /* X86_TUNE_PROLOGUE_USING_MOVE */
1164 m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
1166 /* X86_TUNE_EPILOGUE_USING_MOVE */
1167 m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
1169 /* X86_TUNE_SHIFT1 */
1172 /* X86_TUNE_USE_FFREEP */
1173 m_ATHLON_K8_AMDFAM10,
1175 /* X86_TUNE_INTER_UNIT_MOVES */
1176 ~(m_ATHLON_K8_AMDFAM10 | m_GENERIC),
1178 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1179 than 4 branch instructions in the 16 byte window. */
1180 m_PPRO | m_ATHLON_K8_AMDFAM10 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1182 /* X86_TUNE_SCHEDULE */
1183 m_PPRO | m_ATHLON_K8_AMDFAM10 | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC,
1185 /* X86_TUNE_USE_BT */
1186 m_ATHLON_K8_AMDFAM10,
1188 /* X86_TUNE_USE_INCDEC */
1189 ~(m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC),
1191 /* X86_TUNE_PAD_RETURNS */
1192 m_ATHLON_K8_AMDFAM10 | m_CORE2 | m_GENERIC,
1194 /* X86_TUNE_EXT_80387_CONSTANTS */
1195 m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC,
1197 /* X86_TUNE_SHORTEN_X87_SSE */
1200 /* X86_TUNE_AVOID_VECTOR_DECODE */
1203 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1204 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1207 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1208 vector path on AMD machines. */
1209 m_K8 | m_GENERIC64 | m_AMDFAM10,
1211 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1213 m_K8 | m_GENERIC64 | m_AMDFAM10,
1215 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1219 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1220 but one byte longer. */
1223 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1224 operand that cannot be represented using a modRM byte. The XOR
1225 replacement is long decoded, so this split helps here as well. */
1229 /* Feature tests against the various architecture variations. */
1230 unsigned int ix86_arch_features[X86_ARCH_LAST] = {
1231 /* X86_ARCH_CMOVE */
1232 m_PPRO | m_GEODE | m_ATHLON_K8_AMDFAM10 | m_PENT4 | m_NOCONA,
1234 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1237 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1240 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1243 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1247 static const unsigned int x86_accumulate_outgoing_args
1248 = m_ATHLON_K8_AMDFAM10 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
1250 static const unsigned int x86_arch_always_fancy_math_387
1251 = m_PENT | m_PPRO | m_ATHLON_K8_AMDFAM10 | m_PENT4
1252 | m_NOCONA | m_CORE2 | m_GENERIC;
1254 static enum stringop_alg stringop_alg = no_stringop;
1256 /* In case the average insn count for single function invocation is
1257 lower than this constant, emit fast (but longer) prologue and
1259 #define FAST_PROLOGUE_INSN_COUNT 20
1261 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1262 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1263 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1264 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1266 /* Array of the smallest class containing reg number REGNO, indexed by
1267 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1269 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1271 /* ax, dx, cx, bx */
1272 AREG, DREG, CREG, BREG,
1273 /* si, di, bp, sp */
1274 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1276 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1277 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1280 /* flags, fpsr, fpcr, frame */
1281 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1282 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1284 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1286 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1287 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1288 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1292 /* The "default" register map used in 32bit mode. */
1294 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1296 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1297 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1298 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1299 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1300 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1301 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1302 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1305 static int const x86_64_int_parameter_registers[6] =
1307 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
1308 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1311 static int const x86_64_ms_abi_int_parameter_registers[4] =
1313 2 /*RCX*/, 1 /*RDX*/,
1314 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1317 static int const x86_64_int_return_registers[4] =
1319 0 /*RAX*/, 1 /*RDX*/, 5 /*RDI*/, 4 /*RSI*/
1322 /* The "default" register map used in 64bit mode. */
1323 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1325 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1326 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1327 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1328 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1329 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1330 8,9,10,11,12,13,14,15, /* extended integer registers */
1331 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1334 /* Define the register numbers to be used in Dwarf debugging information.
1335 The SVR4 reference port C compiler uses the following register numbers
1336 in its Dwarf output code:
1337 0 for %eax (gcc regno = 0)
1338 1 for %ecx (gcc regno = 2)
1339 2 for %edx (gcc regno = 1)
1340 3 for %ebx (gcc regno = 3)
1341 4 for %esp (gcc regno = 7)
1342 5 for %ebp (gcc regno = 6)
1343 6 for %esi (gcc regno = 4)
1344 7 for %edi (gcc regno = 5)
1345 The following three DWARF register numbers are never generated by
1346 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1347 believes these numbers have these meanings.
1348 8 for %eip (no gcc equivalent)
1349 9 for %eflags (gcc regno = 17)
1350 10 for %trapno (no gcc equivalent)
1351 It is not at all clear how we should number the FP stack registers
1352 for the x86 architecture. If the version of SDB on x86/svr4 were
1353 a bit less brain dead with respect to floating-point then we would
1354 have a precedent to follow with respect to DWARF register numbers
1355 for x86 FP registers, but the SDB on x86/svr4 is so completely
1356 broken with respect to FP registers that it is hardly worth thinking
1357 of it as something to strive for compatibility with.
1358 The version of x86/svr4 SDB I have at the moment does (partially)
1359 seem to believe that DWARF register number 11 is associated with
1360 the x86 register %st(0), but that's about all. Higher DWARF
1361 register numbers don't seem to be associated with anything in
1362 particular, and even for DWARF regno 11, SDB only seems to under-
1363 stand that it should say that a variable lives in %st(0) (when
1364 asked via an `=' command) if we said it was in DWARF regno 11,
1365 but SDB still prints garbage when asked for the value of the
1366 variable in question (via a `/' command).
1367 (Also note that the labels SDB prints for various FP stack regs
1368 when doing an `x' command are all wrong.)
1369 Note that these problems generally don't affect the native SVR4
1370 C compiler because it doesn't allow the use of -O with -g and
1371 because when it is *not* optimizing, it allocates a memory
1372 location for each floating-point variable, and the memory
1373 location is what gets described in the DWARF AT_location
1374 attribute for the variable in question.
1375 Regardless of the severe mental illness of the x86/svr4 SDB, we
1376 do something sensible here and we use the following DWARF
1377 register numbers. Note that these are all stack-top-relative
1379 11 for %st(0) (gcc regno = 8)
1380 12 for %st(1) (gcc regno = 9)
1381 13 for %st(2) (gcc regno = 10)
1382 14 for %st(3) (gcc regno = 11)
1383 15 for %st(4) (gcc regno = 12)
1384 16 for %st(5) (gcc regno = 13)
1385 17 for %st(6) (gcc regno = 14)
1386 18 for %st(7) (gcc regno = 15)
1388 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1390 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1391 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1392 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1393 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1394 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1395 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1396 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1399 /* Test and compare insns in i386.md store the information needed to
1400 generate branch and scc insns here. */
1402 rtx ix86_compare_op0 = NULL_RTX;
1403 rtx ix86_compare_op1 = NULL_RTX;
1404 rtx ix86_compare_emitted = NULL_RTX;
1406 /* Size of the register save area. */
1407 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
1409 /* Define the structure for the machine field in struct function. */
1411 struct stack_local_entry GTY(())
1413 unsigned short mode;
1416 struct stack_local_entry *next;
1419 /* Structure describing stack frame layout.
1420 Stack grows downward:
1426 saved frame pointer if frame_pointer_needed
1427 <- HARD_FRAME_POINTER
1432 [va_arg registers] (
1433 > to_allocate <- FRAME_POINTER
1443 HOST_WIDE_INT frame;
1445 int outgoing_arguments_size;
1448 HOST_WIDE_INT to_allocate;
1449 /* The offsets relative to ARG_POINTER. */
1450 HOST_WIDE_INT frame_pointer_offset;
1451 HOST_WIDE_INT hard_frame_pointer_offset;
1452 HOST_WIDE_INT stack_pointer_offset;
1454 /* When save_regs_using_mov is set, emit prologue using
1455 move instead of push instructions. */
1456 bool save_regs_using_mov;
1459 /* Code model option. */
1460 enum cmodel ix86_cmodel;
1462 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1464 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1466 /* Which unit we are generating floating point math for. */
1467 enum fpmath_unit ix86_fpmath;
1469 /* Which cpu are we scheduling for. */
1470 enum processor_type ix86_tune;
1472 /* Which instruction set architecture to use. */
1473 enum processor_type ix86_arch;
1475 /* true if sse prefetch instruction is not NOOP. */
1476 int x86_prefetch_sse;
1478 /* ix86_regparm_string as a number */
1479 static int ix86_regparm;
1481 /* -mstackrealign option */
1482 extern int ix86_force_align_arg_pointer;
1483 static const char ix86_force_align_arg_pointer_string[] = "force_align_arg_pointer";
1485 /* Preferred alignment for stack boundary in bits. */
1486 unsigned int ix86_preferred_stack_boundary;
1488 /* Values 1-5: see jump.c */
1489 int ix86_branch_cost;
1491 /* Variables which are this size or smaller are put in the data/bss
1492 or ldata/lbss sections. */
1494 int ix86_section_threshold = 65536;
1496 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1497 char internal_label_prefix[16];
1498 int internal_label_prefix_len;
1500 /* Register class used for passing given 64bit part of the argument.
1501 These represent classes as documented by the PS ABI, with the exception
1502 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1503 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1505 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1506 whenever possible (upper half does contain padding). */
1507 enum x86_64_reg_class
1510 X86_64_INTEGER_CLASS,
1511 X86_64_INTEGERSI_CLASS,
1518 X86_64_COMPLEX_X87_CLASS,
1521 static const char * const x86_64_reg_class_name[] =
1523 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
1524 "sseup", "x87", "x87up", "cplx87", "no"
1527 #define MAX_CLASSES 4
1529 /* Table of constants used by fldpi, fldln2, etc.... */
1530 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1531 static bool ext_80387_constants_init = 0;
1534 static struct machine_function * ix86_init_machine_status (void);
1535 static rtx ix86_function_value (tree, tree, bool);
1536 static int ix86_function_regparm (tree, tree);
1537 static void ix86_compute_frame_layout (struct ix86_frame *);
1538 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1542 /* The svr4 ABI for the i386 says that records and unions are returned
1544 #ifndef DEFAULT_PCC_STRUCT_RETURN
1545 #define DEFAULT_PCC_STRUCT_RETURN 1
1548 /* Implement TARGET_HANDLE_OPTION. */
1551 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1558 target_flags &= ~MASK_3DNOW_A;
1559 target_flags_explicit |= MASK_3DNOW_A;
1566 target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
1567 target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
1574 target_flags &= ~(MASK_SSE2 | MASK_SSE3 | MASK_SSE4A);
1575 target_flags_explicit |= MASK_SSE2 | MASK_SSE3 | MASK_SSE4A;
1582 target_flags &= ~(MASK_SSE3 | MASK_SSE4A);
1583 target_flags_explicit |= MASK_SSE3 | MASK_SSE4A;
1590 target_flags &= ~MASK_SSE4A;
1591 target_flags_explicit |= MASK_SSE4A;
1600 /* Sometimes certain combinations of command options do not make
1601 sense on a particular target machine. You can define a macro
1602 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1603 defined, is executed once just after all the command options have
1606 Don't use this macro to turn on various extra optimizations for
1607 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1610 override_options (void)
1613 int ix86_tune_defaulted = 0;
1614 unsigned int ix86_arch_mask, ix86_tune_mask;
1616 /* Comes from final.c -- no real reason to change it. */
1617 #define MAX_CODE_ALIGN 16
1621 const struct processor_costs *cost; /* Processor costs */
1622 const int target_enable; /* Target flags to enable. */
1623 const int target_disable; /* Target flags to disable. */
1624 const int align_loop; /* Default alignments. */
1625 const int align_loop_max_skip;
1626 const int align_jump;
1627 const int align_jump_max_skip;
1628 const int align_func;
1630 const processor_target_table[PROCESSOR_max] =
1632 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1633 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1634 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1635 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1636 {&geode_cost, 0, 0, 0, 0, 0, 0, 0},
1637 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1638 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1639 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1640 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1641 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0},
1642 {&core2_cost, 0, 0, 16, 7, 16, 7, 16},
1643 {&generic32_cost, 0, 0, 16, 7, 16, 7, 16},
1644 {&generic64_cost, 0, 0, 16, 7, 16, 7, 16},
1645 {&amdfam10_cost, 0, 0, 32, 7, 32, 7, 32}
1648 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1651 const char *const name; /* processor name or nickname. */
1652 const enum processor_type processor;
1653 const enum pta_flags
1659 PTA_PREFETCH_SSE = 1 << 4,
1661 PTA_3DNOW_A = 1 << 6,
1665 PTA_POPCNT = 1 << 10,
1667 PTA_SSE4A = 1 << 12,
1668 PTA_NO_SAHF = 1 << 13
1671 const processor_alias_table[] =
1673 {"i386", PROCESSOR_I386, 0},
1674 {"i486", PROCESSOR_I486, 0},
1675 {"i586", PROCESSOR_PENTIUM, 0},
1676 {"pentium", PROCESSOR_PENTIUM, 0},
1677 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1678 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1679 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1680 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1681 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1682 {"i686", PROCESSOR_PENTIUMPRO, 0},
1683 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1684 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1685 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1686 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1687 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1688 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1689 | PTA_MMX | PTA_PREFETCH_SSE},
1690 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1691 | PTA_MMX | PTA_PREFETCH_SSE},
1692 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1693 | PTA_MMX | PTA_PREFETCH_SSE},
1694 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1695 | PTA_MMX | PTA_PREFETCH_SSE
1696 | PTA_CX16 | PTA_NO_SAHF},
1697 {"core2", PROCESSOR_CORE2, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_SSSE3
1698 | PTA_64BIT | PTA_MMX
1699 | PTA_PREFETCH_SSE | PTA_CX16},
1700 {"geode", PROCESSOR_GEODE, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1702 {"k6", PROCESSOR_K6, PTA_MMX},
1703 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1704 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1705 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1707 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1708 | PTA_3DNOW | PTA_3DNOW_A},
1709 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1710 | PTA_3DNOW_A | PTA_SSE},
1711 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1712 | PTA_3DNOW_A | PTA_SSE},
1713 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1714 | PTA_3DNOW_A | PTA_SSE},
1715 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1716 | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
1717 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1718 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2
1720 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1721 | PTA_64BIT | PTA_3DNOW_A | PTA_SSE
1722 | PTA_SSE2 | PTA_NO_SAHF},
1723 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1724 | PTA_64BIT | PTA_3DNOW_A | PTA_SSE
1725 | PTA_SSE2 | PTA_NO_SAHF},
1726 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1727 | PTA_64BIT | PTA_3DNOW_A | PTA_SSE
1728 | PTA_SSE2 | PTA_NO_SAHF},
1729 {"amdfam10", PROCESSOR_AMDFAM10, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1730 | PTA_64BIT | PTA_3DNOW_A | PTA_SSE
1731 | PTA_SSE2 | PTA_SSE3 | PTA_POPCNT
1732 | PTA_ABM | PTA_SSE4A | PTA_CX16},
1733 {"barcelona", PROCESSOR_AMDFAM10, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1734 | PTA_64BIT | PTA_3DNOW_A | PTA_SSE
1735 | PTA_SSE2 | PTA_SSE3 | PTA_POPCNT
1736 | PTA_ABM | PTA_SSE4A | PTA_CX16},
1737 {"generic32", PROCESSOR_GENERIC32, 0 /* flags are only used for -march switch. */ },
1738 {"generic64", PROCESSOR_GENERIC64, PTA_64BIT /* flags are only used for -march switch. */ },
1741 int const pta_size = ARRAY_SIZE (processor_alias_table);
1743 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1744 SUBTARGET_OVERRIDE_OPTIONS;
1747 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
1748 SUBSUBTARGET_OVERRIDE_OPTIONS;
1751 /* -fPIC is the default for x86_64. */
1752 if (TARGET_MACHO && TARGET_64BIT)
1755 /* Set the default values for switches whose default depends on TARGET_64BIT
1756 in case they weren't overwritten by command line options. */
1759 /* Mach-O doesn't support omitting the frame pointer for now. */
1760 if (flag_omit_frame_pointer == 2)
1761 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
1762 if (flag_asynchronous_unwind_tables == 2)
1763 flag_asynchronous_unwind_tables = 1;
1764 if (flag_pcc_struct_return == 2)
1765 flag_pcc_struct_return = 0;
1769 if (flag_omit_frame_pointer == 2)
1770 flag_omit_frame_pointer = 0;
1771 if (flag_asynchronous_unwind_tables == 2)
1772 flag_asynchronous_unwind_tables = 0;
1773 if (flag_pcc_struct_return == 2)
1774 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1777 /* Need to check -mtune=generic first. */
1778 if (ix86_tune_string)
1780 if (!strcmp (ix86_tune_string, "generic")
1781 || !strcmp (ix86_tune_string, "i686")
1782 /* As special support for cross compilers we read -mtune=native
1783 as -mtune=generic. With native compilers we won't see the
1784 -mtune=native, as it was changed by the driver. */
1785 || !strcmp (ix86_tune_string, "native"))
1788 ix86_tune_string = "generic64";
1790 ix86_tune_string = "generic32";
1792 else if (!strncmp (ix86_tune_string, "generic", 7))
1793 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1797 if (ix86_arch_string)
1798 ix86_tune_string = ix86_arch_string;
1799 if (!ix86_tune_string)
1801 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1802 ix86_tune_defaulted = 1;
1805 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
1806 need to use a sensible tune option. */
1807 if (!strcmp (ix86_tune_string, "generic")
1808 || !strcmp (ix86_tune_string, "x86-64")
1809 || !strcmp (ix86_tune_string, "i686"))
1812 ix86_tune_string = "generic64";
1814 ix86_tune_string = "generic32";
1817 if (ix86_stringop_string)
1819 if (!strcmp (ix86_stringop_string, "rep_byte"))
1820 stringop_alg = rep_prefix_1_byte;
1821 else if (!strcmp (ix86_stringop_string, "libcall"))
1822 stringop_alg = libcall;
1823 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
1824 stringop_alg = rep_prefix_4_byte;
1825 else if (!strcmp (ix86_stringop_string, "rep_8byte"))
1826 stringop_alg = rep_prefix_8_byte;
1827 else if (!strcmp (ix86_stringop_string, "byte_loop"))
1828 stringop_alg = loop_1_byte;
1829 else if (!strcmp (ix86_stringop_string, "loop"))
1830 stringop_alg = loop;
1831 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
1832 stringop_alg = unrolled_loop;
1834 error ("bad value (%s) for -mstringop-strategy= switch", ix86_stringop_string);
1836 if (!strcmp (ix86_tune_string, "x86-64"))
1837 warning (OPT_Wdeprecated, "-mtune=x86-64 is deprecated. Use -mtune=k8 or "
1838 "-mtune=generic instead as appropriate.");
1840 if (!ix86_arch_string)
1841 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1842 if (!strcmp (ix86_arch_string, "generic"))
1843 error ("generic CPU can be used only for -mtune= switch");
1844 if (!strncmp (ix86_arch_string, "generic", 7))
1845 error ("bad value (%s) for -march= switch", ix86_arch_string);
1847 if (ix86_cmodel_string != 0)
1849 if (!strcmp (ix86_cmodel_string, "small"))
1850 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1851 else if (!strcmp (ix86_cmodel_string, "medium"))
1852 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
1853 else if (!strcmp (ix86_cmodel_string, "large"))
1854 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
1856 error ("code model %s does not support PIC mode", ix86_cmodel_string);
1857 else if (!strcmp (ix86_cmodel_string, "32"))
1858 ix86_cmodel = CM_32;
1859 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1860 ix86_cmodel = CM_KERNEL;
1862 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1866 /* For TARGET_64BIT_MS_ABI, force pic on, in order to enable the
1867 use of rip-relative addressing. This eliminates fixups that
1868 would otherwise be needed if this object is to be placed in a
1869 DLL, and is essentially just as efficient as direct addressing. */
1870 if (TARGET_64BIT_MS_ABI)
1871 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
1872 else if (TARGET_64BIT)
1873 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1875 ix86_cmodel = CM_32;
1877 if (ix86_asm_string != 0)
1880 && !strcmp (ix86_asm_string, "intel"))
1881 ix86_asm_dialect = ASM_INTEL;
1882 else if (!strcmp (ix86_asm_string, "att"))
1883 ix86_asm_dialect = ASM_ATT;
1885 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1887 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1888 error ("code model %qs not supported in the %s bit mode",
1889 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1890 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1891 sorry ("%i-bit mode not compiled in",
1892 (target_flags & MASK_64BIT) ? 64 : 32);
1894 for (i = 0; i < pta_size; i++)
1895 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1897 ix86_arch = processor_alias_table[i].processor;
1898 /* Default cpu tuning to the architecture. */
1899 ix86_tune = ix86_arch;
1900 if (processor_alias_table[i].flags & PTA_MMX
1901 && !(target_flags_explicit & MASK_MMX))
1902 target_flags |= MASK_MMX;
1903 if (processor_alias_table[i].flags & PTA_3DNOW
1904 && !(target_flags_explicit & MASK_3DNOW))
1905 target_flags |= MASK_3DNOW;
1906 if (processor_alias_table[i].flags & PTA_3DNOW_A
1907 && !(target_flags_explicit & MASK_3DNOW_A))
1908 target_flags |= MASK_3DNOW_A;
1909 if (processor_alias_table[i].flags & PTA_SSE
1910 && !(target_flags_explicit & MASK_SSE))
1911 target_flags |= MASK_SSE;
1912 if (processor_alias_table[i].flags & PTA_SSE2
1913 && !(target_flags_explicit & MASK_SSE2))
1914 target_flags |= MASK_SSE2;
1915 if (processor_alias_table[i].flags & PTA_SSE3
1916 && !(target_flags_explicit & MASK_SSE3))
1917 target_flags |= MASK_SSE3;
1918 if (processor_alias_table[i].flags & PTA_SSSE3
1919 && !(target_flags_explicit & MASK_SSSE3))
1920 target_flags |= MASK_SSSE3;
1921 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1922 x86_prefetch_sse = true;
1923 if (processor_alias_table[i].flags & PTA_CX16)
1924 x86_cmpxchg16b = true;
1925 if (processor_alias_table[i].flags & PTA_POPCNT
1926 && !(target_flags_explicit & MASK_POPCNT))
1927 target_flags |= MASK_POPCNT;
1928 if (processor_alias_table[i].flags & PTA_ABM
1929 && !(target_flags_explicit & MASK_ABM))
1930 target_flags |= MASK_ABM;
1931 if (processor_alias_table[i].flags & PTA_SSE4A
1932 && !(target_flags_explicit & MASK_SSE4A))
1933 target_flags |= MASK_SSE4A;
1934 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF)))
1936 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1937 error ("CPU you selected does not support x86-64 "
1943 error ("bad value (%s) for -march= switch", ix86_arch_string);
1945 ix86_arch_mask = 1u << ix86_arch;
1946 for (i = 0; i < X86_ARCH_LAST; ++i)
1947 ix86_arch_features[i] &= ix86_arch_mask;
1949 for (i = 0; i < pta_size; i++)
1950 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1952 ix86_tune = processor_alias_table[i].processor;
1953 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1955 if (ix86_tune_defaulted)
1957 ix86_tune_string = "x86-64";
1958 for (i = 0; i < pta_size; i++)
1959 if (! strcmp (ix86_tune_string,
1960 processor_alias_table[i].name))
1962 ix86_tune = processor_alias_table[i].processor;
1965 error ("CPU you selected does not support x86-64 "
1968 /* Intel CPUs have always interpreted SSE prefetch instructions as
1969 NOPs; so, we can enable SSE prefetch instructions even when
1970 -mtune (rather than -march) points us to a processor that has them.
1971 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1972 higher processors. */
1973 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1974 x86_prefetch_sse = true;
1978 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1980 ix86_tune_mask = 1u << ix86_tune;
1981 for (i = 0; i < X86_TUNE_LAST; ++i)
1982 ix86_tune_features[i] &= ix86_tune_mask;
1985 ix86_cost = &size_cost;
1987 ix86_cost = processor_target_table[ix86_tune].cost;
1988 target_flags |= processor_target_table[ix86_tune].target_enable;
1989 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1991 /* Arrange to set up i386_stack_locals for all functions. */
1992 init_machine_status = ix86_init_machine_status;
1994 /* Validate -mregparm= value. */
1995 if (ix86_regparm_string)
1998 warning (0, "-mregparm is ignored in 64-bit mode");
1999 i = atoi (ix86_regparm_string);
2000 if (i < 0 || i > REGPARM_MAX)
2001 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
2006 ix86_regparm = REGPARM_MAX;
2008 /* If the user has provided any of the -malign-* options,
2009 warn and use that value only if -falign-* is not set.
2010 Remove this code in GCC 3.2 or later. */
2011 if (ix86_align_loops_string)
2013 warning (0, "-malign-loops is obsolete, use -falign-loops");
2014 if (align_loops == 0)
2016 i = atoi (ix86_align_loops_string);
2017 if (i < 0 || i > MAX_CODE_ALIGN)
2018 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2020 align_loops = 1 << i;
2024 if (ix86_align_jumps_string)
2026 warning (0, "-malign-jumps is obsolete, use -falign-jumps");
2027 if (align_jumps == 0)
2029 i = atoi (ix86_align_jumps_string);
2030 if (i < 0 || i > MAX_CODE_ALIGN)
2031 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2033 align_jumps = 1 << i;
2037 if (ix86_align_funcs_string)
2039 warning (0, "-malign-functions is obsolete, use -falign-functions");
2040 if (align_functions == 0)
2042 i = atoi (ix86_align_funcs_string);
2043 if (i < 0 || i > MAX_CODE_ALIGN)
2044 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2046 align_functions = 1 << i;
2050 /* Default align_* from the processor table. */
2051 if (align_loops == 0)
2053 align_loops = processor_target_table[ix86_tune].align_loop;
2054 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
2056 if (align_jumps == 0)
2058 align_jumps = processor_target_table[ix86_tune].align_jump;
2059 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
2061 if (align_functions == 0)
2063 align_functions = processor_target_table[ix86_tune].align_func;
2066 /* Validate -mbranch-cost= value, or provide default. */
2067 ix86_branch_cost = ix86_cost->branch_cost;
2068 if (ix86_branch_cost_string)
2070 i = atoi (ix86_branch_cost_string);
2072 error ("-mbranch-cost=%d is not between 0 and 5", i);
2074 ix86_branch_cost = i;
2076 if (ix86_section_threshold_string)
2078 i = atoi (ix86_section_threshold_string);
2080 error ("-mlarge-data-threshold=%d is negative", i);
2082 ix86_section_threshold = i;
2085 if (ix86_tls_dialect_string)
2087 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
2088 ix86_tls_dialect = TLS_DIALECT_GNU;
2089 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
2090 ix86_tls_dialect = TLS_DIALECT_GNU2;
2091 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
2092 ix86_tls_dialect = TLS_DIALECT_SUN;
2094 error ("bad value (%s) for -mtls-dialect= switch",
2095 ix86_tls_dialect_string);
2098 /* Keep nonleaf frame pointers. */
2099 if (flag_omit_frame_pointer)
2100 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
2101 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
2102 flag_omit_frame_pointer = 1;
2104 /* If we're doing fast math, we don't care about comparison order
2105 wrt NaNs. This lets us use a shorter comparison sequence. */
2106 if (flag_finite_math_only)
2107 target_flags &= ~MASK_IEEE_FP;
2109 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
2110 since the insns won't need emulation. */
2111 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
2112 target_flags &= ~MASK_NO_FANCY_MATH_387;
2114 /* Likewise, if the target doesn't have a 387, or we've specified
2115 software floating point, don't use 387 inline intrinsics. */
2117 target_flags |= MASK_NO_FANCY_MATH_387;
2119 /* Turn on SSE3 builtins for -mssse3. */
2121 target_flags |= MASK_SSE3;
2123 /* Turn on SSE3 builtins for -msse4a. */
2125 target_flags |= MASK_SSE3;
2127 /* Turn on SSE2 builtins for -msse3. */
2129 target_flags |= MASK_SSE2;
2131 /* Turn on SSE builtins for -msse2. */
2133 target_flags |= MASK_SSE;
2135 /* Turn on MMX builtins for -msse. */
2138 target_flags |= MASK_MMX & ~target_flags_explicit;
2139 x86_prefetch_sse = true;
2142 /* Turn on MMX builtins for 3Dnow. */
2144 target_flags |= MASK_MMX;
2146 /* Turn on POPCNT builtins for -mabm. */
2148 target_flags |= MASK_POPCNT;
2153 warning (0, "-mrtd is ignored in 64bit mode");
2155 /* Enable by default the SSE and MMX builtins. Do allow the user to
2156 explicitly disable any of these. In particular, disabling SSE and
2157 MMX for kernel code is extremely useful. */
2159 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | TARGET_SUBTARGET64_DEFAULT)
2160 & ~target_flags_explicit);
2164 /* i386 ABI does not specify red zone. It still makes sense to use it
2165 when programmer takes care to stack from being destroyed. */
2166 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
2167 target_flags |= MASK_NO_RED_ZONE;
2170 /* Validate -mpreferred-stack-boundary= value, or provide default.
2171 The default of 128 bits is for Pentium III's SSE __m128. We can't
2172 change it because of optimize_size. Otherwise, we can't mix object
2173 files compiled with -Os and -On. */
2174 ix86_preferred_stack_boundary = 128;
2175 if (ix86_preferred_stack_boundary_string)
2177 i = atoi (ix86_preferred_stack_boundary_string);
2178 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
2179 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
2180 TARGET_64BIT ? 4 : 2);
2182 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
2185 /* Accept -msseregparm only if at least SSE support is enabled. */
2186 if (TARGET_SSEREGPARM
2188 error ("-msseregparm used without SSE enabled");
2190 ix86_fpmath = TARGET_FPMATH_DEFAULT;
2191 if (ix86_fpmath_string != 0)
2193 if (! strcmp (ix86_fpmath_string, "387"))
2194 ix86_fpmath = FPMATH_387;
2195 else if (! strcmp (ix86_fpmath_string, "sse"))
2199 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2200 ix86_fpmath = FPMATH_387;
2203 ix86_fpmath = FPMATH_SSE;
2205 else if (! strcmp (ix86_fpmath_string, "387,sse")
2206 || ! strcmp (ix86_fpmath_string, "sse,387"))
2210 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2211 ix86_fpmath = FPMATH_387;
2213 else if (!TARGET_80387)
2215 warning (0, "387 instruction set disabled, using SSE arithmetics");
2216 ix86_fpmath = FPMATH_SSE;
2219 ix86_fpmath = FPMATH_SSE | FPMATH_387;
2222 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
2225 /* If the i387 is disabled, then do not return values in it. */
2227 target_flags &= ~MASK_FLOAT_RETURNS;
2229 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
2230 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2232 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2234 /* ??? Unwind info is not correct around the CFG unless either a frame
2235 pointer is present or M_A_O_A is set. Fixing this requires rewriting
2236 unwind info generation to be aware of the CFG and propagating states
2238 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
2239 || flag_exceptions || flag_non_call_exceptions)
2240 && flag_omit_frame_pointer
2241 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
2243 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2244 warning (0, "unwind tables currently require either a frame pointer "
2245 "or -maccumulate-outgoing-args for correctness");
2246 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2249 /* For sane SSE instruction set generation we need fcomi instruction.
2250 It is safe to enable all CMOVE instructions. */
2254 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
2257 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
2258 p = strchr (internal_label_prefix, 'X');
2259 internal_label_prefix_len = p - internal_label_prefix;
2263 /* When scheduling description is not available, disable scheduler pass
2264 so it won't slow down the compilation and make x87 code slower. */
2265 if (!TARGET_SCHEDULE)
2266 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
2268 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
2269 set_param_value ("simultaneous-prefetches",
2270 ix86_cost->simultaneous_prefetches);
2271 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
2272 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
2275 /* Return true if this goes in large data/bss. */
2278 ix86_in_large_data_p (tree exp)
2280 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
2283 /* Functions are never large data. */
2284 if (TREE_CODE (exp) == FUNCTION_DECL)
2287 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
2289 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
2290 if (strcmp (section, ".ldata") == 0
2291 || strcmp (section, ".lbss") == 0)
2297 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
2299 /* If this is an incomplete type with size 0, then we can't put it
2300 in data because it might be too big when completed. */
2301 if (!size || size > ix86_section_threshold)
2308 /* Switch to the appropriate section for output of DECL.
2309 DECL is either a `VAR_DECL' node or a constant of some sort.
2310 RELOC indicates whether forming the initial value of DECL requires
2311 link-time relocations. */
2313 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
2317 x86_64_elf_select_section (tree decl, int reloc,
2318 unsigned HOST_WIDE_INT align)
2320 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2321 && ix86_in_large_data_p (decl))
2323 const char *sname = NULL;
2324 unsigned int flags = SECTION_WRITE;
2325 switch (categorize_decl_for_section (decl, reloc))
2330 case SECCAT_DATA_REL:
2331 sname = ".ldata.rel";
2333 case SECCAT_DATA_REL_LOCAL:
2334 sname = ".ldata.rel.local";
2336 case SECCAT_DATA_REL_RO:
2337 sname = ".ldata.rel.ro";
2339 case SECCAT_DATA_REL_RO_LOCAL:
2340 sname = ".ldata.rel.ro.local";
2344 flags |= SECTION_BSS;
2347 case SECCAT_RODATA_MERGE_STR:
2348 case SECCAT_RODATA_MERGE_STR_INIT:
2349 case SECCAT_RODATA_MERGE_CONST:
2353 case SECCAT_SRODATA:
2360 /* We don't split these for medium model. Place them into
2361 default sections and hope for best. */
2366 /* We might get called with string constants, but get_named_section
2367 doesn't like them as they are not DECLs. Also, we need to set
2368 flags in that case. */
2370 return get_section (sname, flags, NULL);
2371 return get_named_section (decl, sname, reloc);
2374 return default_elf_select_section (decl, reloc, align);
2377 /* Build up a unique section name, expressed as a
2378 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
2379 RELOC indicates whether the initial value of EXP requires
2380 link-time relocations. */
2382 static void ATTRIBUTE_UNUSED
2383 x86_64_elf_unique_section (tree decl, int reloc)
2385 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2386 && ix86_in_large_data_p (decl))
2388 const char *prefix = NULL;
2389 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
2390 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
2392 switch (categorize_decl_for_section (decl, reloc))
2395 case SECCAT_DATA_REL:
2396 case SECCAT_DATA_REL_LOCAL:
2397 case SECCAT_DATA_REL_RO:
2398 case SECCAT_DATA_REL_RO_LOCAL:
2399 prefix = one_only ? ".gnu.linkonce.ld." : ".ldata.";
2402 prefix = one_only ? ".gnu.linkonce.lb." : ".lbss.";
2405 case SECCAT_RODATA_MERGE_STR:
2406 case SECCAT_RODATA_MERGE_STR_INIT:
2407 case SECCAT_RODATA_MERGE_CONST:
2408 prefix = one_only ? ".gnu.linkonce.lr." : ".lrodata.";
2410 case SECCAT_SRODATA:
2417 /* We don't split these for medium model. Place them into
2418 default sections and hope for best. */
2426 plen = strlen (prefix);
2428 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
2429 name = targetm.strip_name_encoding (name);
2430 nlen = strlen (name);
2432 string = alloca (nlen + plen + 1);
2433 memcpy (string, prefix, plen);
2434 memcpy (string + plen, name, nlen + 1);
2436 DECL_SECTION_NAME (decl) = build_string (nlen + plen, string);
2440 default_unique_section (decl, reloc);
2443 #ifdef COMMON_ASM_OP
2444 /* This says how to output assembler code to declare an
2445 uninitialized external linkage data object.
2447 For medium model x86-64 we need to use .largecomm opcode for
2450 x86_elf_aligned_common (FILE *file,
2451 const char *name, unsigned HOST_WIDE_INT size,
2454 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2455 && size > (unsigned int)ix86_section_threshold)
2456 fprintf (file, ".largecomm\t");
2458 fprintf (file, "%s", COMMON_ASM_OP);
2459 assemble_name (file, name);
2460 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
2461 size, align / BITS_PER_UNIT);
2465 /* Utility function for targets to use in implementing
2466 ASM_OUTPUT_ALIGNED_BSS. */
2469 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
2470 const char *name, unsigned HOST_WIDE_INT size,
2473 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2474 && size > (unsigned int)ix86_section_threshold)
2475 switch_to_section (get_named_section (decl, ".lbss", 0));
2477 switch_to_section (bss_section);
2478 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
2479 #ifdef ASM_DECLARE_OBJECT_NAME
2480 last_assemble_variable_decl = decl;
2481 ASM_DECLARE_OBJECT_NAME (file, name, decl);
2483 /* Standard thing is just output label for the object. */
2484 ASM_OUTPUT_LABEL (file, name);
2485 #endif /* ASM_DECLARE_OBJECT_NAME */
2486 ASM_OUTPUT_SKIP (file, size ? size : 1);
2490 optimization_options (int level, int size ATTRIBUTE_UNUSED)
2492 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
2493 make the problem with not enough registers even worse. */
2494 #ifdef INSN_SCHEDULING
2496 flag_schedule_insns = 0;
2500 /* The Darwin libraries never set errno, so we might as well
2501 avoid calling them when that's the only reason we would. */
2502 flag_errno_math = 0;
2504 /* The default values of these switches depend on the TARGET_64BIT
2505 that is not known at this moment. Mark these values with 2 and
2506 let user the to override these. In case there is no command line option
2507 specifying them, we will set the defaults in override_options. */
2509 flag_omit_frame_pointer = 2;
2510 flag_pcc_struct_return = 2;
2511 flag_asynchronous_unwind_tables = 2;
2512 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
2513 SUBTARGET_OPTIMIZATION_OPTIONS;
2517 /* Decide whether we can make a sibling call to a function. DECL is the
2518 declaration of the function being targeted by the call and EXP is the
2519 CALL_EXPR representing the call. */
2522 ix86_function_ok_for_sibcall (tree decl, tree exp)
2527 /* If we are generating position-independent code, we cannot sibcall
2528 optimize any indirect call, or a direct call to a global function,
2529 as the PLT requires %ebx be live. */
2530 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
2537 func = TREE_TYPE (CALL_EXPR_FN (exp));
2538 if (POINTER_TYPE_P (func))
2539 func = TREE_TYPE (func);
2542 /* Check that the return value locations are the same. Like
2543 if we are returning floats on the 80387 register stack, we cannot
2544 make a sibcall from a function that doesn't return a float to a
2545 function that does or, conversely, from a function that does return
2546 a float to a function that doesn't; the necessary stack adjustment
2547 would not be executed. This is also the place we notice
2548 differences in the return value ABI. Note that it is ok for one
2549 of the functions to have void return type as long as the return
2550 value of the other is passed in a register. */
2551 a = ix86_function_value (TREE_TYPE (exp), func, false);
2552 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
2554 if (STACK_REG_P (a) || STACK_REG_P (b))
2556 if (!rtx_equal_p (a, b))
2559 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
2561 else if (!rtx_equal_p (a, b))
2564 /* If this call is indirect, we'll need to be able to use a call-clobbered
2565 register for the address of the target function. Make sure that all
2566 such registers are not used for passing parameters. */
2567 if (!decl && !TARGET_64BIT)
2571 /* We're looking at the CALL_EXPR, we need the type of the function. */
2572 type = CALL_EXPR_FN (exp); /* pointer expression */
2573 type = TREE_TYPE (type); /* pointer type */
2574 type = TREE_TYPE (type); /* function type */
2576 if (ix86_function_regparm (type, NULL) >= 3)
2578 /* ??? Need to count the actual number of registers to be used,
2579 not the possible number of registers. Fix later. */
2584 /* Dllimport'd functions are also called indirectly. */
2585 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
2586 && decl && DECL_DLLIMPORT_P (decl)
2587 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
2590 /* If we forced aligned the stack, then sibcalling would unalign the
2591 stack, which may break the called function. */
2592 if (cfun->machine->force_align_arg_pointer)
2595 /* Otherwise okay. That also includes certain types of indirect calls. */
2599 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
2600 calling convention attributes;
2601 arguments as in struct attribute_spec.handler. */
2604 ix86_handle_cconv_attribute (tree *node, tree name,
2606 int flags ATTRIBUTE_UNUSED,
2609 if (TREE_CODE (*node) != FUNCTION_TYPE
2610 && TREE_CODE (*node) != METHOD_TYPE
2611 && TREE_CODE (*node) != FIELD_DECL
2612 && TREE_CODE (*node) != TYPE_DECL)
2614 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2615 IDENTIFIER_POINTER (name));
2616 *no_add_attrs = true;
2620 /* Can combine regparm with all attributes but fastcall. */
2621 if (is_attribute_p ("regparm", name))
2625 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2627 error ("fastcall and regparm attributes are not compatible");
2630 cst = TREE_VALUE (args);
2631 if (TREE_CODE (cst) != INTEGER_CST)
2633 warning (OPT_Wattributes,
2634 "%qs attribute requires an integer constant argument",
2635 IDENTIFIER_POINTER (name));
2636 *no_add_attrs = true;
2638 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
2640 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
2641 IDENTIFIER_POINTER (name), REGPARM_MAX);
2642 *no_add_attrs = true;
2646 && lookup_attribute (ix86_force_align_arg_pointer_string,
2647 TYPE_ATTRIBUTES (*node))
2648 && compare_tree_int (cst, REGPARM_MAX-1))
2650 error ("%s functions limited to %d register parameters",
2651 ix86_force_align_arg_pointer_string, REGPARM_MAX-1);
2659 /* Do not warn when emulating the MS ABI. */
2660 if (!TARGET_64BIT_MS_ABI)
2661 warning (OPT_Wattributes, "%qs attribute ignored",
2662 IDENTIFIER_POINTER (name));
2663 *no_add_attrs = true;
2667 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
2668 if (is_attribute_p ("fastcall", name))
2670 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2672 error ("fastcall and cdecl attributes are not compatible");
2674 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2676 error ("fastcall and stdcall attributes are not compatible");
2678 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
2680 error ("fastcall and regparm attributes are not compatible");
2684 /* Can combine stdcall with fastcall (redundant), regparm and
2686 else if (is_attribute_p ("stdcall", name))
2688 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2690 error ("stdcall and cdecl attributes are not compatible");
2692 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2694 error ("stdcall and fastcall attributes are not compatible");
2698 /* Can combine cdecl with regparm and sseregparm. */
2699 else if (is_attribute_p ("cdecl", name))
2701 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2703 error ("stdcall and cdecl attributes are not compatible");
2705 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2707 error ("fastcall and cdecl attributes are not compatible");
2711 /* Can combine sseregparm with all attributes. */
2716 /* Return 0 if the attributes for two types are incompatible, 1 if they
2717 are compatible, and 2 if they are nearly compatible (which causes a
2718 warning to be generated). */
2721 ix86_comp_type_attributes (tree type1, tree type2)
2723 /* Check for mismatch of non-default calling convention. */
2724 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
2726 if (TREE_CODE (type1) != FUNCTION_TYPE)
2729 /* Check for mismatched fastcall/regparm types. */
2730 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
2731 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
2732 || (ix86_function_regparm (type1, NULL)
2733 != ix86_function_regparm (type2, NULL)))
2736 /* Check for mismatched sseregparm types. */
2737 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
2738 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
2741 /* Check for mismatched return types (cdecl vs stdcall). */
2742 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
2743 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
2749 /* Return the regparm value for a function with the indicated TYPE and DECL.
2750 DECL may be NULL when calling function indirectly
2751 or considering a libcall. */
2754 ix86_function_regparm (tree type, tree decl)
2757 int regparm = ix86_regparm;
2762 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
2764 return TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
2766 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
2769 /* Use register calling convention for local functions when possible. */
2770 if (decl && flag_unit_at_a_time && !profile_flag)
2772 struct cgraph_local_info *i = cgraph_local_info (decl);
2775 int local_regparm, globals = 0, regno;
2778 /* Make sure no regparm register is taken by a
2779 global register variable. */
2780 for (local_regparm = 0; local_regparm < 3; local_regparm++)
2781 if (global_regs[local_regparm])
2784 /* We can't use regparm(3) for nested functions as these use
2785 static chain pointer in third argument. */
2786 if (local_regparm == 3
2787 && decl_function_context (decl)
2788 && !DECL_NO_STATIC_CHAIN (decl))
2791 /* If the function realigns its stackpointer, the prologue will
2792 clobber %ecx. If we've already generated code for the callee,
2793 the callee DECL_STRUCT_FUNCTION is gone, so we fall back to
2794 scanning the attributes for the self-realigning property. */
2795 f = DECL_STRUCT_FUNCTION (decl);
2796 if (local_regparm == 3
2797 && (f ? !!f->machine->force_align_arg_pointer
2798 : !!lookup_attribute (ix86_force_align_arg_pointer_string,
2799 TYPE_ATTRIBUTES (TREE_TYPE (decl)))))
2802 /* Each global register variable increases register preassure,
2803 so the more global reg vars there are, the smaller regparm
2804 optimization use, unless requested by the user explicitly. */
2805 for (regno = 0; regno < 6; regno++)
2806 if (global_regs[regno])
2809 = globals < local_regparm ? local_regparm - globals : 0;
2811 if (local_regparm > regparm)
2812 regparm = local_regparm;
2819 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
2820 DFmode (2) arguments in SSE registers for a function with the
2821 indicated TYPE and DECL. DECL may be NULL when calling function
2822 indirectly or considering a libcall. Otherwise return 0. */
2825 ix86_function_sseregparm (tree type, tree decl)
2827 gcc_assert (!TARGET_64BIT);
2829 /* Use SSE registers to pass SFmode and DFmode arguments if requested
2830 by the sseregparm attribute. */
2831 if (TARGET_SSEREGPARM
2832 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
2837 error ("Calling %qD with attribute sseregparm without "
2838 "SSE/SSE2 enabled", decl);
2840 error ("Calling %qT with attribute sseregparm without "
2841 "SSE/SSE2 enabled", type);
2848 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
2849 (and DFmode for SSE2) arguments in SSE registers. */
2850 if (decl && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
2852 struct cgraph_local_info *i = cgraph_local_info (decl);
2854 return TARGET_SSE2 ? 2 : 1;
2860 /* Return true if EAX is live at the start of the function. Used by
2861 ix86_expand_prologue to determine if we need special help before
2862 calling allocate_stack_worker. */
2865 ix86_eax_live_at_start_p (void)
2867 /* Cheat. Don't bother working forward from ix86_function_regparm
2868 to the function type to whether an actual argument is located in
2869 eax. Instead just look at cfg info, which is still close enough
2870 to correct at this point. This gives false positives for broken
2871 functions that might use uninitialized data that happens to be
2872 allocated in eax, but who cares? */
2873 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->il.rtl->global_live_at_end, 0);
2876 /* Return true if TYPE has a variable argument list. */
2879 type_has_variadic_args_p (tree type)
2883 for (t = TYPE_ARG_TYPES (type); t; t = TREE_CHAIN (t))
2884 if (t == void_list_node)
2889 /* Value is the number of bytes of arguments automatically
2890 popped when returning from a subroutine call.
2891 FUNDECL is the declaration node of the function (as a tree),
2892 FUNTYPE is the data type of the function (as a tree),
2893 or for a library call it is an identifier node for the subroutine name.
2894 SIZE is the number of bytes of arguments passed on the stack.
2896 On the 80386, the RTD insn may be used to pop them if the number
2897 of args is fixed, but if the number is variable then the caller
2898 must pop them all. RTD can't be used for library calls now
2899 because the library is compiled with the Unix compiler.
2900 Use of RTD is a selectable option, since it is incompatible with
2901 standard Unix calling sequences. If the option is not selected,
2902 the caller must always pop the args.
2904 The attribute stdcall is equivalent to RTD on a per module basis. */
2907 ix86_return_pops_args (tree fundecl, tree funtype, int size)
2911 /* None of the 64-bit ABIs pop arguments. */
2915 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
2917 /* Cdecl functions override -mrtd, and never pop the stack. */
2918 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
2920 /* Stdcall and fastcall functions will pop the stack if not
2922 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
2923 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
2926 if (rtd && ! type_has_variadic_args_p (funtype))
2930 /* Lose any fake structure return argument if it is passed on the stack. */
2931 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
2932 && !KEEP_AGGREGATE_RETURN_POINTER)
2934 int nregs = ix86_function_regparm (funtype, fundecl);
2936 return GET_MODE_SIZE (Pmode);
2942 /* Argument support functions. */
2944 /* Return true when register may be used to pass function parameters. */
2946 ix86_function_arg_regno_p (int regno)
2949 const int *parm_regs;
2954 return (regno < REGPARM_MAX
2955 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
2957 return (regno < REGPARM_MAX
2958 || (TARGET_MMX && MMX_REGNO_P (regno)
2959 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
2960 || (TARGET_SSE && SSE_REGNO_P (regno)
2961 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
2966 if (SSE_REGNO_P (regno) && TARGET_SSE)
2971 if (TARGET_SSE && SSE_REGNO_P (regno)
2972 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
2976 /* RAX is used as hidden argument to va_arg functions. */
2977 if (!TARGET_64BIT_MS_ABI && regno == 0)
2980 if (TARGET_64BIT_MS_ABI)
2981 parm_regs = x86_64_ms_abi_int_parameter_registers;
2983 parm_regs = x86_64_int_parameter_registers;
2984 for (i = 0; i < REGPARM_MAX; i++)
2985 if (regno == parm_regs[i])
2990 /* Return if we do not know how to pass TYPE solely in registers. */
2993 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
2995 if (must_pass_in_stack_var_size_or_pad (mode, type))
2998 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
2999 The layout_type routine is crafty and tries to trick us into passing
3000 currently unsupported vector types on the stack by using TImode. */
3001 return (!TARGET_64BIT && mode == TImode
3002 && type && TREE_CODE (type) != VECTOR_TYPE);
3005 /* Initialize a variable CUM of type CUMULATIVE_ARGS
3006 for a call to a function whose data type is FNTYPE.
3007 For a library call, FNTYPE is 0. */
3010 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
3011 tree fntype, /* tree ptr for function decl */
3012 rtx libname, /* SYMBOL_REF of library name or 0 */
3015 memset (cum, 0, sizeof (*cum));
3017 /* Set up the number of registers to use for passing arguments. */
3018 cum->nregs = ix86_regparm;
3020 cum->sse_nregs = SSE_REGPARM_MAX;
3022 cum->mmx_nregs = MMX_REGPARM_MAX;
3023 cum->warn_sse = true;
3024 cum->warn_mmx = true;
3025 cum->maybe_vaarg = (fntype ? type_has_variadic_args_p (fntype) : !libname);
3029 /* If there are variable arguments, then we won't pass anything
3030 in registers in 32-bit mode. */
3031 if (cum->maybe_vaarg)
3041 /* Use ecx and edx registers if function has fastcall attribute,
3042 else look for regparm information. */
3045 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
3051 cum->nregs = ix86_function_regparm (fntype, fndecl);
3054 /* Set up the number of SSE registers used for passing SFmode
3055 and DFmode arguments. Warn for mismatching ABI. */
3056 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl);
3060 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
3061 But in the case of vector types, it is some vector mode.
3063 When we have only some of our vector isa extensions enabled, then there
3064 are some modes for which vector_mode_supported_p is false. For these
3065 modes, the generic vector support in gcc will choose some non-vector mode
3066 in order to implement the type. By computing the natural mode, we'll
3067 select the proper ABI location for the operand and not depend on whatever
3068 the middle-end decides to do with these vector types. */
3070 static enum machine_mode
3071 type_natural_mode (tree type)
3073 enum machine_mode mode = TYPE_MODE (type);
3075 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
3077 HOST_WIDE_INT size = int_size_in_bytes (type);
3078 if ((size == 8 || size == 16)
3079 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
3080 && TYPE_VECTOR_SUBPARTS (type) > 1)
3082 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
3084 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
3085 mode = MIN_MODE_VECTOR_FLOAT;
3087 mode = MIN_MODE_VECTOR_INT;
3089 /* Get the mode which has this inner mode and number of units. */
3090 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3091 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
3092 && GET_MODE_INNER (mode) == innermode)
3102 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
3103 this may not agree with the mode that the type system has chosen for the
3104 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
3105 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
3108 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
3113 if (orig_mode != BLKmode)
3114 tmp = gen_rtx_REG (orig_mode, regno);
3117 tmp = gen_rtx_REG (mode, regno);
3118 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
3119 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
3125 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
3126 of this code is to classify each 8bytes of incoming argument by the register
3127 class and assign registers accordingly. */
3129 /* Return the union class of CLASS1 and CLASS2.
3130 See the x86-64 PS ABI for details. */
3132 static enum x86_64_reg_class
3133 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
3135 /* Rule #1: If both classes are equal, this is the resulting class. */
3136 if (class1 == class2)
3139 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
3141 if (class1 == X86_64_NO_CLASS)
3143 if (class2 == X86_64_NO_CLASS)
3146 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
3147 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
3148 return X86_64_MEMORY_CLASS;
3150 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
3151 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
3152 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
3153 return X86_64_INTEGERSI_CLASS;
3154 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
3155 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
3156 return X86_64_INTEGER_CLASS;
3158 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
3160 if (class1 == X86_64_X87_CLASS
3161 || class1 == X86_64_X87UP_CLASS
3162 || class1 == X86_64_COMPLEX_X87_CLASS
3163 || class2 == X86_64_X87_CLASS
3164 || class2 == X86_64_X87UP_CLASS
3165 || class2 == X86_64_COMPLEX_X87_CLASS)
3166 return X86_64_MEMORY_CLASS;
3168 /* Rule #6: Otherwise class SSE is used. */
3169 return X86_64_SSE_CLASS;
3172 /* Classify the argument of type TYPE and mode MODE.
3173 CLASSES will be filled by the register class used to pass each word
3174 of the operand. The number of words is returned. In case the parameter
3175 should be passed in memory, 0 is returned. As a special case for zero
3176 sized containers, classes[0] will be NO_CLASS and 1 is returned.
3178 BIT_OFFSET is used internally for handling records and specifies offset
3179 of the offset in bits modulo 256 to avoid overflow cases.
3181 See the x86-64 PS ABI for details.
3185 classify_argument (enum machine_mode mode, tree type,
3186 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
3188 HOST_WIDE_INT bytes =
3189 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3190 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3192 /* Variable sized entities are always passed/returned in memory. */
3196 if (mode != VOIDmode
3197 && targetm.calls.must_pass_in_stack (mode, type))
3200 if (type && AGGREGATE_TYPE_P (type))
3204 enum x86_64_reg_class subclasses[MAX_CLASSES];
3206 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
3210 for (i = 0; i < words; i++)
3211 classes[i] = X86_64_NO_CLASS;
3213 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
3214 signalize memory class, so handle it as special case. */
3217 classes[0] = X86_64_NO_CLASS;
3221 /* Classify each field of record and merge classes. */
3222 switch (TREE_CODE (type))
3225 /* And now merge the fields of structure. */
3226 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3228 if (TREE_CODE (field) == FIELD_DECL)
3232 if (TREE_TYPE (field) == error_mark_node)
3235 /* Bitfields are always classified as integer. Handle them
3236 early, since later code would consider them to be
3237 misaligned integers. */
3238 if (DECL_BIT_FIELD (field))
3240 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3241 i < ((int_bit_position (field) + (bit_offset % 64))
3242 + tree_low_cst (DECL_SIZE (field), 0)
3245 merge_classes (X86_64_INTEGER_CLASS,
3250 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3251 TREE_TYPE (field), subclasses,
3252 (int_bit_position (field)
3253 + bit_offset) % 256);
3256 for (i = 0; i < num; i++)
3259 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3261 merge_classes (subclasses[i], classes[i + pos]);
3269 /* Arrays are handled as small records. */
3272 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
3273 TREE_TYPE (type), subclasses, bit_offset);
3277 /* The partial classes are now full classes. */
3278 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
3279 subclasses[0] = X86_64_SSE_CLASS;
3280 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
3281 subclasses[0] = X86_64_INTEGER_CLASS;
3283 for (i = 0; i < words; i++)
3284 classes[i] = subclasses[i % num];
3289 case QUAL_UNION_TYPE:
3290 /* Unions are similar to RECORD_TYPE but offset is always 0.
3292 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3294 if (TREE_CODE (field) == FIELD_DECL)
3298 if (TREE_TYPE (field) == error_mark_node)
3301 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3302 TREE_TYPE (field), subclasses,
3306 for (i = 0; i < num; i++)
3307 classes[i] = merge_classes (subclasses[i], classes[i]);
3316 /* Final merger cleanup. */
3317 for (i = 0; i < words; i++)
3319 /* If one class is MEMORY, everything should be passed in
3321 if (classes[i] == X86_64_MEMORY_CLASS)
3324 /* The X86_64_SSEUP_CLASS should be always preceded by
3325 X86_64_SSE_CLASS. */
3326 if (classes[i] == X86_64_SSEUP_CLASS
3327 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
3328 classes[i] = X86_64_SSE_CLASS;
3330 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
3331 if (classes[i] == X86_64_X87UP_CLASS
3332 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
3333 classes[i] = X86_64_SSE_CLASS;
3338 /* Compute alignment needed. We align all types to natural boundaries with
3339 exception of XFmode that is aligned to 64bits. */
3340 if (mode != VOIDmode && mode != BLKmode)
3342 int mode_alignment = GET_MODE_BITSIZE (mode);
3345 mode_alignment = 128;
3346 else if (mode == XCmode)
3347 mode_alignment = 256;
3348 if (COMPLEX_MODE_P (mode))
3349 mode_alignment /= 2;
3350 /* Misaligned fields are always returned in memory. */
3351 if (bit_offset % mode_alignment)
3355 /* for V1xx modes, just use the base mode */
3356 if (VECTOR_MODE_P (mode)
3357 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
3358 mode = GET_MODE_INNER (mode);
3360 /* Classification of atomic types. */
3365 classes[0] = X86_64_SSE_CLASS;
3368 classes[0] = X86_64_SSE_CLASS;
3369 classes[1] = X86_64_SSEUP_CLASS;
3378 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3379 classes[0] = X86_64_INTEGERSI_CLASS;
3381 classes[0] = X86_64_INTEGER_CLASS;
3385 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
3390 if (!(bit_offset % 64))
3391 classes[0] = X86_64_SSESF_CLASS;
3393 classes[0] = X86_64_SSE_CLASS;
3396 classes[0] = X86_64_SSEDF_CLASS;
3399 classes[0] = X86_64_X87_CLASS;
3400 classes[1] = X86_64_X87UP_CLASS;
3403 classes[0] = X86_64_SSE_CLASS;
3404 classes[1] = X86_64_SSEUP_CLASS;
3407 classes[0] = X86_64_SSE_CLASS;
3410 classes[0] = X86_64_SSEDF_CLASS;
3411 classes[1] = X86_64_SSEDF_CLASS;
3414 classes[0] = X86_64_COMPLEX_X87_CLASS;
3417 /* This modes is larger than 16 bytes. */
3425 classes[0] = X86_64_SSE_CLASS;
3426 classes[1] = X86_64_SSEUP_CLASS;
3432 classes[0] = X86_64_SSE_CLASS;
3438 gcc_assert (VECTOR_MODE_P (mode));
3443 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
3445 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3446 classes[0] = X86_64_INTEGERSI_CLASS;
3448 classes[0] = X86_64_INTEGER_CLASS;
3449 classes[1] = X86_64_INTEGER_CLASS;
3450 return 1 + (bytes > 8);
3454 /* Examine the argument and return set number of register required in each
3455 class. Return 0 iff parameter should be passed in memory. */
3457 examine_argument (enum machine_mode mode, tree type, int in_return,
3458 int *int_nregs, int *sse_nregs)
3460 enum x86_64_reg_class class[MAX_CLASSES];
3461 int n = classify_argument (mode, type, class, 0);
3467 for (n--; n >= 0; n--)
3470 case X86_64_INTEGER_CLASS:
3471 case X86_64_INTEGERSI_CLASS:
3474 case X86_64_SSE_CLASS:
3475 case X86_64_SSESF_CLASS:
3476 case X86_64_SSEDF_CLASS:
3479 case X86_64_NO_CLASS:
3480 case X86_64_SSEUP_CLASS:
3482 case X86_64_X87_CLASS:
3483 case X86_64_X87UP_CLASS:
3487 case X86_64_COMPLEX_X87_CLASS:
3488 return in_return ? 2 : 0;
3489 case X86_64_MEMORY_CLASS:
3495 /* Construct container for the argument used by GCC interface. See
3496 FUNCTION_ARG for the detailed description. */
3499 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
3500 tree type, int in_return, int nintregs, int nsseregs,
3501 const int *intreg, int sse_regno)
3503 /* The following variables hold the static issued_error state. */
3504 static bool issued_sse_arg_error;
3505 static bool issued_sse_ret_error;
3506 static bool issued_x87_ret_error;
3508 enum machine_mode tmpmode;
3510 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3511 enum x86_64_reg_class class[MAX_CLASSES];
3515 int needed_sseregs, needed_intregs;
3516 rtx exp[MAX_CLASSES];
3519 n = classify_argument (mode, type, class, 0);
3522 if (!examine_argument (mode, type, in_return, &needed_intregs,
3525 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
3528 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
3529 some less clueful developer tries to use floating-point anyway. */
3530 if (needed_sseregs && !TARGET_SSE)
3534 if (!issued_sse_ret_error)
3536 error ("SSE register return with SSE disabled");
3537 issued_sse_ret_error = true;
3540 else if (!issued_sse_arg_error)
3542 error ("SSE register argument with SSE disabled");
3543 issued_sse_arg_error = true;
3548 /* Likewise, error if the ABI requires us to return values in the
3549 x87 registers and the user specified -mno-80387. */
3550 if (!TARGET_80387 && in_return)
3551 for (i = 0; i < n; i++)
3552 if (class[i] == X86_64_X87_CLASS
3553 || class[i] == X86_64_X87UP_CLASS
3554 || class[i] == X86_64_COMPLEX_X87_CLASS)
3556 if (!issued_x87_ret_error)
3558 error ("x87 register return with x87 disabled");
3559 issued_x87_ret_error = true;
3564 /* First construct simple cases. Avoid SCmode, since we want to use
3565 single register to pass this type. */
3566 if (n == 1 && mode != SCmode)
3569 case X86_64_INTEGER_CLASS:
3570 case X86_64_INTEGERSI_CLASS:
3571 return gen_rtx_REG (mode, intreg[0]);
3572 case X86_64_SSE_CLASS:
3573 case X86_64_SSESF_CLASS:
3574 case X86_64_SSEDF_CLASS:
3575 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
3576 case X86_64_X87_CLASS:
3577 case X86_64_COMPLEX_X87_CLASS:
3578 return gen_rtx_REG (mode, FIRST_STACK_REG);
3579 case X86_64_NO_CLASS:
3580 /* Zero sized array, struct or class. */
3585 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
3587 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
3590 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
3591 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
3592 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
3593 && class[1] == X86_64_INTEGER_CLASS
3594 && (mode == CDImode || mode == TImode || mode == TFmode)
3595 && intreg[0] + 1 == intreg[1])
3596 return gen_rtx_REG (mode, intreg[0]);
3598 /* Otherwise figure out the entries of the PARALLEL. */
3599 for (i = 0; i < n; i++)
3603 case X86_64_NO_CLASS:
3605 case X86_64_INTEGER_CLASS:
3606 case X86_64_INTEGERSI_CLASS:
3607 /* Merge TImodes on aligned occasions here too. */
3608 if (i * 8 + 8 > bytes)
3609 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
3610 else if (class[i] == X86_64_INTEGERSI_CLASS)
3614 /* We've requested 24 bytes we don't have mode for. Use DImode. */
3615 if (tmpmode == BLKmode)
3617 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3618 gen_rtx_REG (tmpmode, *intreg),
3622 case X86_64_SSESF_CLASS:
3623 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3624 gen_rtx_REG (SFmode,
3625 SSE_REGNO (sse_regno)),
3629 case X86_64_SSEDF_CLASS:
3630 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3631 gen_rtx_REG (DFmode,
3632 SSE_REGNO (sse_regno)),
3636 case X86_64_SSE_CLASS:
3637 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
3641 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3642 gen_rtx_REG (tmpmode,
3643 SSE_REGNO (sse_regno)),
3645 if (tmpmode == TImode)
3654 /* Empty aligned struct, union or class. */
3658 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
3659 for (i = 0; i < nexps; i++)
3660 XVECEXP (ret, 0, i) = exp [i];
3664 /* Update the data in CUM to advance over an argument of mode MODE
3665 and data type TYPE. (TYPE is null for libcalls where that information
3666 may not be available.) */
3669 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3670 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
3686 cum->words += words;
3687 cum->nregs -= words;
3688 cum->regno += words;
3690 if (cum->nregs <= 0)
3698 if (cum->float_in_sse < 2)
3701 if (cum->float_in_sse < 1)
3712 if (!type || !AGGREGATE_TYPE_P (type))
3714 cum->sse_words += words;
3715 cum->sse_nregs -= 1;
3716 cum->sse_regno += 1;
3717 if (cum->sse_nregs <= 0)
3729 if (!type || !AGGREGATE_TYPE_P (type))
3731 cum->mmx_words += words;
3732 cum->mmx_nregs -= 1;
3733 cum->mmx_regno += 1;
3734 if (cum->mmx_nregs <= 0)
3745 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3746 tree type, HOST_WIDE_INT words)
3748 int int_nregs, sse_nregs;
3750 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
3751 cum->words += words;
3752 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
3754 cum->nregs -= int_nregs;
3755 cum->sse_nregs -= sse_nregs;
3756 cum->regno += int_nregs;
3757 cum->sse_regno += sse_nregs;
3760 cum->words += words;
3764 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
3765 HOST_WIDE_INT words)
3767 /* Otherwise, this should be passed indirect. */
3768 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
3770 cum->words += words;
3779 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3780 tree type, int named ATTRIBUTE_UNUSED)
3782 HOST_WIDE_INT bytes, words;
3784 if (mode == BLKmode)
3785 bytes = int_size_in_bytes (type);
3787 bytes = GET_MODE_SIZE (mode);
3788 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3791 mode = type_natural_mode (type);
3793 if (TARGET_64BIT_MS_ABI)
3794 function_arg_advance_ms_64 (cum, bytes, words);
3795 else if (TARGET_64BIT)
3796 function_arg_advance_64 (cum, mode, type, words);
3798 function_arg_advance_32 (cum, mode, type, bytes, words);
3801 /* Define where to put the arguments to a function.
3802 Value is zero to push the argument on the stack,
3803 or a hard register in which to store the argument.
3805 MODE is the argument's machine mode.
3806 TYPE is the data type of the argument (as a tree).
3807 This is null for libcalls where that information may
3809 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3810 the preceding args and about the function being called.
3811 NAMED is nonzero if this argument is a named parameter
3812 (otherwise it is an extra parameter matching an ellipsis). */
3815 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3816 enum machine_mode orig_mode, tree type,
3817 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
3819 static bool warnedsse, warnedmmx;
3821 /* Avoid the AL settings for the Unix64 ABI. */
3822 if (mode == VOIDmode)
3838 if (words <= cum->nregs)
3840 int regno = cum->regno;
3842 /* Fastcall allocates the first two DWORD (SImode) or
3843 smaller arguments to ECX and EDX. */
3846 if (mode == BLKmode || mode == DImode)
3849 /* ECX not EAX is the first allocated register. */
3853 return gen_rtx_REG (mode, regno);
3858 if (cum->float_in_sse < 2)
3861 if (cum->float_in_sse < 1)
3871 if (!type || !AGGREGATE_TYPE_P (type))
3873 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
3876 warning (0, "SSE vector argument without SSE enabled "
3880 return gen_reg_or_parallel (mode, orig_mode,
3881 cum->sse_regno + FIRST_SSE_REG);
3889 if (!type || !AGGREGATE_TYPE_P (type))
3891 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
3894 warning (0, "MMX vector argument without MMX enabled "
3898 return gen_reg_or_parallel (mode, orig_mode,
3899 cum->mmx_regno + FIRST_MMX_REG);
3908 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3909 enum machine_mode orig_mode, tree type)
3911 /* Handle a hidden AL argument containing number of registers
3912 for varargs x86-64 functions. */
3913 if (mode == VOIDmode)
3914 return GEN_INT (cum->maybe_vaarg
3915 ? (cum->sse_nregs < 0
3920 return construct_container (mode, orig_mode, type, 0, cum->nregs,
3922 &x86_64_int_parameter_registers [cum->regno],
3927 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3928 enum machine_mode orig_mode, int named)
3932 /* Avoid the AL settings for the Unix64 ABI. */
3933 if (mode == VOIDmode)
3936 /* If we've run out of registers, it goes on the stack. */
3937 if (cum->nregs == 0)
3940 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
3942 /* Only floating point modes are passed in anything but integer regs. */
3943 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
3946 regno = cum->regno + FIRST_SSE_REG;
3951 /* Unnamed floating parameters are passed in both the
3952 SSE and integer registers. */
3953 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
3954 t2 = gen_rtx_REG (mode, regno);
3955 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
3956 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
3957 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
3961 return gen_reg_or_parallel (mode, orig_mode, regno);
3965 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
3966 tree type, int named)
3968 enum machine_mode mode = omode;
3969 HOST_WIDE_INT bytes, words;
3971 if (mode == BLKmode)
3972 bytes = int_size_in_bytes (type);
3974 bytes = GET_MODE_SIZE (mode);
3975 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3977 /* To simplify the code below, represent vector types with a vector mode
3978 even if MMX/SSE are not active. */
3979 if (type && TREE_CODE (type) == VECTOR_TYPE)
3980 mode = type_natural_mode (type);
3982 if (TARGET_64BIT_MS_ABI)
3983 return function_arg_ms_64 (cum, mode, omode, named);
3984 else if (TARGET_64BIT)
3985 return function_arg_64 (cum, mode, omode, type);
3987 return function_arg_32 (cum, mode, omode, type, bytes, words);
3990 /* A C expression that indicates when an argument must be passed by
3991 reference. If nonzero for an argument, a copy of that argument is
3992 made in memory and a pointer to the argument is passed instead of
3993 the argument itself. The pointer is passed in whatever way is
3994 appropriate for passing a pointer to that type. */
3997 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3998 enum machine_mode mode ATTRIBUTE_UNUSED,
3999 tree type, bool named ATTRIBUTE_UNUSED)
4001 if (TARGET_64BIT_MS_ABI)
4005 /* Arrays are passed by reference. */
4006 if (TREE_CODE (type) == ARRAY_TYPE)
4009 if (AGGREGATE_TYPE_P (type))
4011 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
4012 are passed by reference. */
4013 int el2 = exact_log2 (int_size_in_bytes (type));
4014 return !(el2 >= 0 && el2 <= 3);
4018 /* __m128 is passed by reference. */
4019 /* ??? How to handle complex? For now treat them as structs,
4020 and pass them by reference if they're too large. */
4021 if (GET_MODE_SIZE (mode) > 8)
4024 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
4030 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
4031 ABI. Only called if TARGET_SSE. */
4033 contains_128bit_aligned_vector_p (tree type)
4035 enum machine_mode mode = TYPE_MODE (type);
4036 if (SSE_REG_MODE_P (mode)
4037 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
4039 if (TYPE_ALIGN (type) < 128)
4042 if (AGGREGATE_TYPE_P (type))
4044 /* Walk the aggregates recursively. */
4045 switch (TREE_CODE (type))
4049 case QUAL_UNION_TYPE:
4053 /* Walk all the structure fields. */
4054 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4056 if (TREE_CODE (field) == FIELD_DECL
4057 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
4064 /* Just for use if some languages passes arrays by value. */
4065 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
4076 /* Gives the alignment boundary, in bits, of an argument with the
4077 specified mode and type. */
4080 ix86_function_arg_boundary (enum machine_mode mode, tree type)
4084 align = TYPE_ALIGN (type);
4086 align = GET_MODE_ALIGNMENT (mode);
4087 if (align < PARM_BOUNDARY)
4088 align = PARM_BOUNDARY;
4091 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
4092 make an exception for SSE modes since these require 128bit
4095 The handling here differs from field_alignment. ICC aligns MMX
4096 arguments to 4 byte boundaries, while structure fields are aligned
4097 to 8 byte boundaries. */
4099 align = PARM_BOUNDARY;
4102 if (!SSE_REG_MODE_P (mode))
4103 align = PARM_BOUNDARY;
4107 if (!contains_128bit_aligned_vector_p (type))
4108 align = PARM_BOUNDARY;
4116 /* Return true if N is a possible register number of function value. */
4119 ix86_function_value_regno_p (int regno)
4126 case FIRST_FLOAT_REG:
4127 if (TARGET_64BIT_MS_ABI)
4129 return TARGET_FLOAT_RETURNS_IN_80387;
4135 if (TARGET_MACHO || TARGET_64BIT)
4143 /* Define how to find the value returned by a function.
4144 VALTYPE is the data type of the value (as a tree).
4145 If the precise function being called is known, FUNC is its FUNCTION_DECL;
4146 otherwise, FUNC is 0. */
4149 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
4150 tree fntype, tree fn)
4154 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
4155 we normally prevent this case when mmx is not available. However
4156 some ABIs may require the result to be returned like DImode. */
4157 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4158 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
4160 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
4161 we prevent this case when sse is not available. However some ABIs
4162 may require the result to be returned like integer TImode. */
4163 else if (mode == TImode
4164 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4165 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
4167 /* Decimal floating point values can go in %eax, unlike other float modes. */
4168 else if (DECIMAL_FLOAT_MODE_P (mode))
4171 /* Most things go in %eax, except (unless -mno-fp-ret-in-387) fp values. */
4172 else if (!SCALAR_FLOAT_MODE_P (mode) || !TARGET_FLOAT_RETURNS_IN_80387)
4175 /* Floating point return values in %st(0), except for local functions when
4176 SSE math is enabled or for functions with sseregparm attribute. */
4179 regno = FIRST_FLOAT_REG;
4181 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
4183 int sse_level = ix86_function_sseregparm (fntype, fn);
4184 if ((sse_level >= 1 && mode == SFmode)
4185 || (sse_level == 2 && mode == DFmode))
4186 regno = FIRST_SSE_REG;
4190 return gen_rtx_REG (orig_mode, regno);
4194 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
4199 /* Handle libcalls, which don't provide a type node. */
4200 if (valtype == NULL)
4212 return gen_rtx_REG (mode, FIRST_SSE_REG);
4215 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
4219 return gen_rtx_REG (mode, 0);
4223 ret = construct_container (mode, orig_mode, valtype, 1,
4224 REGPARM_MAX, SSE_REGPARM_MAX,
4225 x86_64_int_return_registers, 0);
4227 /* For zero sized structures, construct_container returns NULL, but we
4228 need to keep rest of compiler happy by returning meaningful value. */
4230 ret = gen_rtx_REG (orig_mode, 0);
4236 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
4238 unsigned int regno = 0;
4242 if (mode == SFmode || mode == DFmode)
4243 regno = FIRST_SSE_REG;
4244 else if (VECTOR_MODE_P (mode) || GET_MODE_SIZE (mode) == 16)
4245 regno = FIRST_SSE_REG;
4248 return gen_rtx_REG (orig_mode, regno);
4252 ix86_function_value_1 (tree valtype, tree fntype_or_decl,
4253 enum machine_mode orig_mode, enum machine_mode mode)
4258 if (fntype_or_decl && DECL_P (fntype_or_decl))
4259 fn = fntype_or_decl;
4260 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
4262 if (TARGET_64BIT_MS_ABI)
4263 return function_value_ms_64 (orig_mode, mode);
4264 else if (TARGET_64BIT)
4265 return function_value_64 (orig_mode, mode, valtype);
4267 return function_value_32 (orig_mode, mode, fntype, fn);
4271 ix86_function_value (tree valtype, tree fntype_or_decl,
4272 bool outgoing ATTRIBUTE_UNUSED)
4274 enum machine_mode mode, orig_mode;
4276 orig_mode = TYPE_MODE (valtype);
4277 mode = type_natural_mode (valtype);
4278 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
4282 ix86_libcall_value (enum machine_mode mode)
4284 return ix86_function_value_1 (NULL, NULL, mode, mode);
4287 /* Return true iff type is returned in memory. */
4290 return_in_memory_32 (tree type, enum machine_mode mode)
4294 if (mode == BLKmode)
4297 size = int_size_in_bytes (type);
4299 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
4302 if (VECTOR_MODE_P (mode) || mode == TImode)
4304 /* User-created vectors small enough to fit in EAX. */
4308 /* MMX/3dNow values are returned in MM0,
4309 except when it doesn't exits. */
4311 return (TARGET_MMX ? 0 : 1);
4313 /* SSE values are returned in XMM0, except when it doesn't exist. */
4315 return (TARGET_SSE ? 0 : 1);
4330 return_in_memory_64 (tree type, enum machine_mode mode)
4332 int needed_intregs, needed_sseregs;
4333 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
4337 return_in_memory_ms_64 (tree type, enum machine_mode mode)
4339 HOST_WIDE_INT size = int_size_in_bytes (type);
4341 /* __m128 and friends are returned in xmm0. */
4342 if (size == 16 && VECTOR_MODE_P (mode))
4345 /* Otherwise, the size must be exactly in [1248]. */
4346 return (size != 1 && size != 2 && size != 4 && size != 8);
4350 ix86_return_in_memory (tree type)
4352 enum machine_mode mode = type_natural_mode (type);
4354 if (TARGET_64BIT_MS_ABI)
4355 return return_in_memory_ms_64 (type, mode);
4356 else if (TARGET_64BIT)
4357 return return_in_memory_64 (type, mode);
4359 return return_in_memory_32 (type, mode);
4362 /* When returning SSE vector types, we have a choice of either
4363 (1) being abi incompatible with a -march switch, or
4364 (2) generating an error.
4365 Given no good solution, I think the safest thing is one warning.
4366 The user won't be able to use -Werror, but....
4368 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
4369 called in response to actually generating a caller or callee that
4370 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
4371 via aggregate_value_p for general type probing from tree-ssa. */
4374 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
4376 static bool warnedsse, warnedmmx;
4378 if (!TARGET_64BIT && type)
4380 /* Look at the return type of the function, not the function type. */
4381 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
4383 if (!TARGET_SSE && !warnedsse)
4386 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4389 warning (0, "SSE vector return without SSE enabled "
4394 if (!TARGET_MMX && !warnedmmx)
4396 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4399 warning (0, "MMX vector return without MMX enabled "
4409 /* Create the va_list data type. */
4412 ix86_build_builtin_va_list (void)
4414 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
4416 /* For i386 we use plain pointer to argument area. */
4417 if (!TARGET_64BIT || TARGET_64BIT_MS_ABI)
4418 return build_pointer_type (char_type_node);
4420 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4421 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
4423 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
4424 unsigned_type_node);
4425 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
4426 unsigned_type_node);
4427 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
4429 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
4432 va_list_gpr_counter_field = f_gpr;
4433 va_list_fpr_counter_field = f_fpr;
4435 DECL_FIELD_CONTEXT (f_gpr) = record;
4436 DECL_FIELD_CONTEXT (f_fpr) = record;
4437 DECL_FIELD_CONTEXT (f_ovf) = record;
4438 DECL_FIELD_CONTEXT (f_sav) = record;
4440 TREE_CHAIN (record) = type_decl;
4441 TYPE_NAME (record) = type_decl;
4442 TYPE_FIELDS (record) = f_gpr;
4443 TREE_CHAIN (f_gpr) = f_fpr;
4444 TREE_CHAIN (f_fpr) = f_ovf;
4445 TREE_CHAIN (f_ovf) = f_sav;
4447 layout_type (record);
4449 /* The correct type is an array type of one element. */
4450 return build_array_type (record, build_index_type (size_zero_node));
4453 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
4456 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
4466 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
4469 /* Indicate to allocate space on the stack for varargs save area. */
4470 ix86_save_varrargs_registers = 1;
4471 cfun->stack_alignment_needed = 128;
4473 save_area = frame_pointer_rtx;
4474 set = get_varargs_alias_set ();
4476 for (i = cum->regno;
4478 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
4481 mem = gen_rtx_MEM (Pmode,
4482 plus_constant (save_area, i * UNITS_PER_WORD));
4483 MEM_NOTRAP_P (mem) = 1;
4484 set_mem_alias_set (mem, set);
4485 emit_move_insn (mem, gen_rtx_REG (Pmode,
4486 x86_64_int_parameter_registers[i]));
4489 if (cum->sse_nregs && cfun->va_list_fpr_size)
4491 /* Now emit code to save SSE registers. The AX parameter contains number
4492 of SSE parameter registers used to call this function. We use
4493 sse_prologue_save insn template that produces computed jump across
4494 SSE saves. We need some preparation work to get this working. */
4496 label = gen_label_rtx ();
4497 label_ref = gen_rtx_LABEL_REF (Pmode, label);
4499 /* Compute address to jump to :
4500 label - 5*eax + nnamed_sse_arguments*5 */
4501 tmp_reg = gen_reg_rtx (Pmode);
4502 nsse_reg = gen_reg_rtx (Pmode);
4503 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
4504 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
4505 gen_rtx_MULT (Pmode, nsse_reg,
4510 gen_rtx_CONST (DImode,
4511 gen_rtx_PLUS (DImode,
4513 GEN_INT (cum->sse_regno * 4))));
4515 emit_move_insn (nsse_reg, label_ref);
4516 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
4518 /* Compute address of memory block we save into. We always use pointer
4519 pointing 127 bytes after first byte to store - this is needed to keep
4520 instruction size limited by 4 bytes. */
4521 tmp_reg = gen_reg_rtx (Pmode);
4522 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
4523 plus_constant (save_area,
4524 8 * REGPARM_MAX + 127)));
4525 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
4526 MEM_NOTRAP_P (mem) = 1;
4527 set_mem_alias_set (mem, set);
4528 set_mem_align (mem, BITS_PER_WORD);
4530 /* And finally do the dirty job! */
4531 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
4532 GEN_INT (cum->sse_regno), label));
4537 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
4539 int set = get_varargs_alias_set ();
4542 for (i = cum->regno; i < REGPARM_MAX; i++)
4546 mem = gen_rtx_MEM (Pmode,
4547 plus_constant (virtual_incoming_args_rtx,
4548 i * UNITS_PER_WORD));
4549 MEM_NOTRAP_P (mem) = 1;
4550 set_mem_alias_set (mem, set);
4552 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
4553 emit_move_insn (mem, reg);
4558 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4559 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4562 CUMULATIVE_ARGS next_cum;
4566 /* This argument doesn't appear to be used anymore. Which is good,
4567 because the old code here didn't suppress rtl generation. */
4568 gcc_assert (!no_rtl);
4573 fntype = TREE_TYPE (current_function_decl);
4574 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
4575 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
4576 != void_type_node));
4578 /* For varargs, we do not want to skip the dummy va_dcl argument.
4579 For stdargs, we do want to skip the last named argument. */
4582 function_arg_advance (&next_cum, mode, type, 1);
4584 if (TARGET_64BIT_MS_ABI)
4585 setup_incoming_varargs_ms_64 (&next_cum);
4587 setup_incoming_varargs_64 (&next_cum);
4590 /* Implement va_start. */
4593 ix86_va_start (tree valist, rtx nextarg)
4595 HOST_WIDE_INT words, n_gpr, n_fpr;
4596 tree f_gpr, f_fpr, f_ovf, f_sav;
4597 tree gpr, fpr, ovf, sav, t;
4600 /* Only 64bit target needs something special. */
4601 if (!TARGET_64BIT || TARGET_64BIT_MS_ABI)
4603 std_expand_builtin_va_start (valist, nextarg);
4607 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
4608 f_fpr = TREE_CHAIN (f_gpr);
4609 f_ovf = TREE_CHAIN (f_fpr);
4610 f_sav = TREE_CHAIN (f_ovf);
4612 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
4613 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
4614 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
4615 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
4616 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
4618 /* Count number of gp and fp argument registers used. */
4619 words = current_function_args_info.words;
4620 n_gpr = current_function_args_info.regno;
4621 n_fpr = current_function_args_info.sse_regno;
4623 if (cfun->va_list_gpr_size)
4625 type = TREE_TYPE (gpr);
4626 t = build2 (GIMPLE_MODIFY_STMT, type, gpr,
4627 build_int_cst (type, n_gpr * 8));
4628 TREE_SIDE_EFFECTS (t) = 1;
4629 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4632 if (cfun->va_list_fpr_size)
4634 type = TREE_TYPE (fpr);
4635 t = build2 (GIMPLE_MODIFY_STMT, type, fpr,
4636 build_int_cst (type, n_fpr * 16 + 8*REGPARM_MAX));
4637 TREE_SIDE_EFFECTS (t) = 1;
4638 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4641 /* Find the overflow area. */
4642 type = TREE_TYPE (ovf);
4643 t = make_tree (type, virtual_incoming_args_rtx);
4645 t = build2 (PLUS_EXPR, type, t,
4646 build_int_cst (type, words * UNITS_PER_WORD));
4647 t = build2 (GIMPLE_MODIFY_STMT, type, ovf, t);
4648 TREE_SIDE_EFFECTS (t) = 1;
4649 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4651 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
4653 /* Find the register save area.
4654 Prologue of the function save it right above stack frame. */
4655 type = TREE_TYPE (sav);
4656 t = make_tree (type, frame_pointer_rtx);
4657 t = build2 (GIMPLE_MODIFY_STMT, type, sav, t);
4658 TREE_SIDE_EFFECTS (t) = 1;
4659 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4663 /* Implement va_arg. */
4666 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
4668 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
4669 tree f_gpr, f_fpr, f_ovf, f_sav;
4670 tree gpr, fpr, ovf, sav, t;
4672 tree lab_false, lab_over = NULL_TREE;
4677 enum machine_mode nat_mode;
4679 /* Only 64bit target needs something special. */
4680 if (!TARGET_64BIT || TARGET_64BIT_MS_ABI)
4681 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4683 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
4684 f_fpr = TREE_CHAIN (f_gpr);
4685 f_ovf = TREE_CHAIN (f_fpr);
4686 f_sav = TREE_CHAIN (f_ovf);
4688 valist = build_va_arg_indirect_ref (valist);
4689 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
4690 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
4691 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
4692 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
4694 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
4696 type = build_pointer_type (type);
4697 size = int_size_in_bytes (type);
4698 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4700 nat_mode = type_natural_mode (type);
4701 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
4702 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
4704 /* Pull the value out of the saved registers. */
4706 addr = create_tmp_var (ptr_type_node, "addr");
4707 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
4711 int needed_intregs, needed_sseregs;
4713 tree int_addr, sse_addr;
4715 lab_false = create_artificial_label ();
4716 lab_over = create_artificial_label ();
4718 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
4720 need_temp = (!REG_P (container)
4721 && ((needed_intregs && TYPE_ALIGN (type) > 64)
4722 || TYPE_ALIGN (type) > 128));
4724 /* In case we are passing structure, verify that it is consecutive block
4725 on the register save area. If not we need to do moves. */
4726 if (!need_temp && !REG_P (container))
4728 /* Verify that all registers are strictly consecutive */
4729 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
4733 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
4735 rtx slot = XVECEXP (container, 0, i);
4736 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
4737 || INTVAL (XEXP (slot, 1)) != i * 16)
4745 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
4747 rtx slot = XVECEXP (container, 0, i);
4748 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
4749 || INTVAL (XEXP (slot, 1)) != i * 8)
4761 int_addr = create_tmp_var (ptr_type_node, "int_addr");
4762 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
4763 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
4764 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
4767 /* First ensure that we fit completely in registers. */
4770 t = build_int_cst (TREE_TYPE (gpr),
4771 (REGPARM_MAX - needed_intregs + 1) * 8);
4772 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
4773 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
4774 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
4775 gimplify_and_add (t, pre_p);
4779 t = build_int_cst (TREE_TYPE (fpr),
4780 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
4782 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
4783 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
4784 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
4785 gimplify_and_add (t, pre_p);
4788 /* Compute index to start of area used for integer regs. */
4791 /* int_addr = gpr + sav; */
4792 t = fold_convert (ptr_type_node, gpr);
4793 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4794 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, int_addr, t);
4795 gimplify_and_add (t, pre_p);
4799 /* sse_addr = fpr + sav; */
4800 t = fold_convert (ptr_type_node, fpr);
4801 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4802 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, sse_addr, t);
4803 gimplify_and_add (t, pre_p);
4808 tree temp = create_tmp_var (type, "va_arg_tmp");
4811 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
4812 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
4813 gimplify_and_add (t, pre_p);
4815 for (i = 0; i < XVECLEN (container, 0); i++)
4817 rtx slot = XVECEXP (container, 0, i);
4818 rtx reg = XEXP (slot, 0);
4819 enum machine_mode mode = GET_MODE (reg);
4820 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
4821 tree addr_type = build_pointer_type (piece_type);
4824 tree dest_addr, dest;
4826 if (SSE_REGNO_P (REGNO (reg)))
4828 src_addr = sse_addr;
4829 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
4833 src_addr = int_addr;
4834 src_offset = REGNO (reg) * 8;
4836 src_addr = fold_convert (addr_type, src_addr);
4837 src_addr = fold_build2 (PLUS_EXPR, addr_type, src_addr,
4838 size_int (src_offset));
4839 src = build_va_arg_indirect_ref (src_addr);
4841 dest_addr = fold_convert (addr_type, addr);
4842 dest_addr = fold_build2 (PLUS_EXPR, addr_type, dest_addr,
4843 size_int (INTVAL (XEXP (slot, 1))));
4844 dest = build_va_arg_indirect_ref (dest_addr);
4846 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, dest, src);
4847 gimplify_and_add (t, pre_p);
4853 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
4854 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
4855 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gpr), gpr, t);
4856 gimplify_and_add (t, pre_p);
4860 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
4861 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
4862 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (fpr), fpr, t);
4863 gimplify_and_add (t, pre_p);
4866 t = build1 (GOTO_EXPR, void_type_node, lab_over);
4867 gimplify_and_add (t, pre_p);
4869 t = build1 (LABEL_EXPR, void_type_node, lab_false);
4870 append_to_statement_list (t, pre_p);
4873 /* ... otherwise out of the overflow area. */
4875 /* Care for on-stack alignment if needed. */
4876 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64
4877 || integer_zerop (TYPE_SIZE (type)))
4881 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
4882 t = build2 (PLUS_EXPR, TREE_TYPE (ovf), ovf,
4883 build_int_cst (TREE_TYPE (ovf), align - 1));
4884 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4885 build_int_cst (TREE_TYPE (t), -align));
4887 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
4889 t2 = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
4890 gimplify_and_add (t2, pre_p);
4892 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
4893 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
4894 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovf), ovf, t);
4895 gimplify_and_add (t, pre_p);
4899 t = build1 (LABEL_EXPR, void_type_node, lab_over);
4900 append_to_statement_list (t, pre_p);
4903 ptrtype = build_pointer_type (type);
4904 addr = fold_convert (ptrtype, addr);
4907 addr = build_va_arg_indirect_ref (addr);
4908 return build_va_arg_indirect_ref (addr);
4911 /* Return nonzero if OPNUM's MEM should be matched
4912 in movabs* patterns. */
4915 ix86_check_movabs (rtx insn, int opnum)
4919 set = PATTERN (insn);
4920 if (GET_CODE (set) == PARALLEL)
4921 set = XVECEXP (set, 0, 0);
4922 gcc_assert (GET_CODE (set) == SET);
4923 mem = XEXP (set, opnum);
4924 while (GET_CODE (mem) == SUBREG)
4925 mem = SUBREG_REG (mem);
4926 gcc_assert (MEM_P (mem));
4927 return (volatile_ok || !MEM_VOLATILE_P (mem));
4930 /* Initialize the table of extra 80387 mathematical constants. */
4933 init_ext_80387_constants (void)
4935 static const char * cst[5] =
4937 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
4938 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
4939 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
4940 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
4941 "3.1415926535897932385128089594061862044", /* 4: fldpi */
4945 for (i = 0; i < 5; i++)
4947 real_from_string (&ext_80387_constants_table[i], cst[i]);
4948 /* Ensure each constant is rounded to XFmode precision. */
4949 real_convert (&ext_80387_constants_table[i],
4950 XFmode, &ext_80387_constants_table[i]);
4953 ext_80387_constants_init = 1;
4956 /* Return true if the constant is something that can be loaded with
4957 a special instruction. */
4960 standard_80387_constant_p (rtx x)
4964 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
4967 if (x == CONST0_RTX (GET_MODE (x)))
4969 if (x == CONST1_RTX (GET_MODE (x)))
4972 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4974 /* For XFmode constants, try to find a special 80387 instruction when
4975 optimizing for size or on those CPUs that benefit from them. */
4976 if (GET_MODE (x) == XFmode
4977 && (optimize_size || TARGET_EXT_80387_CONSTANTS))
4981 if (! ext_80387_constants_init)
4982 init_ext_80387_constants ();
4984 for (i = 0; i < 5; i++)
4985 if (real_identical (&r, &ext_80387_constants_table[i]))
4989 /* Load of the constant -0.0 or -1.0 will be split as
4990 fldz;fchs or fld1;fchs sequence. */
4991 if (real_isnegzero (&r))
4993 if (real_identical (&r, &dconstm1))
4999 /* Return the opcode of the special instruction to be used to load
5003 standard_80387_constant_opcode (rtx x)
5005 switch (standard_80387_constant_p (x))
5029 /* Return the CONST_DOUBLE representing the 80387 constant that is
5030 loaded by the specified special instruction. The argument IDX
5031 matches the return value from standard_80387_constant_p. */
5034 standard_80387_constant_rtx (int idx)
5038 if (! ext_80387_constants_init)
5039 init_ext_80387_constants ();
5055 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
5059 /* Return 1 if mode is a valid mode for sse. */
5061 standard_sse_mode_p (enum machine_mode mode)
5078 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
5081 standard_sse_constant_p (rtx x)
5083 enum machine_mode mode = GET_MODE (x);
5085 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
5087 if (vector_all_ones_operand (x, mode)
5088 && standard_sse_mode_p (mode))
5089 return TARGET_SSE2 ? 2 : -1;
5094 /* Return the opcode of the special instruction to be used to load
5098 standard_sse_constant_opcode (rtx insn, rtx x)
5100 switch (standard_sse_constant_p (x))
5103 if (get_attr_mode (insn) == MODE_V4SF)
5104 return "xorps\t%0, %0";
5105 else if (get_attr_mode (insn) == MODE_V2DF)
5106 return "xorpd\t%0, %0";
5108 return "pxor\t%0, %0";
5110 return "pcmpeqd\t%0, %0";
5115 /* Returns 1 if OP contains a symbol reference */
5118 symbolic_reference_mentioned_p (rtx op)
5123 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
5126 fmt = GET_RTX_FORMAT (GET_CODE (op));
5127 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
5133 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
5134 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
5138 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
5145 /* Return 1 if it is appropriate to emit `ret' instructions in the
5146 body of a function. Do this only if the epilogue is simple, needing a
5147 couple of insns. Prior to reloading, we can't tell how many registers
5148 must be saved, so return 0 then. Return 0 if there is no frame
5149 marker to de-allocate. */
5152 ix86_can_use_return_insn_p (void)
5154 struct ix86_frame frame;
5156 if (! reload_completed || frame_pointer_needed)
5159 /* Don't allow more than 32 pop, since that's all we can do
5160 with one instruction. */
5161 if (current_function_pops_args
5162 && current_function_args_size >= 32768)
5165 ix86_compute_frame_layout (&frame);
5166 return frame.to_allocate == 0 && frame.nregs == 0;
5169 /* Value should be nonzero if functions must have frame pointers.
5170 Zero means the frame pointer need not be set up (and parms may
5171 be accessed via the stack pointer) in functions that seem suitable. */
5174 ix86_frame_pointer_required (void)
5176 /* If we accessed previous frames, then the generated code expects
5177 to be able to access the saved ebp value in our frame. */
5178 if (cfun->machine->accesses_prev_frame)
5181 /* Several x86 os'es need a frame pointer for other reasons,
5182 usually pertaining to setjmp. */
5183 if (SUBTARGET_FRAME_POINTER_REQUIRED)
5186 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
5187 the frame pointer by default. Turn it back on now if we've not
5188 got a leaf function. */
5189 if (TARGET_OMIT_LEAF_FRAME_POINTER
5190 && (!current_function_is_leaf
5191 || ix86_current_function_calls_tls_descriptor))
5194 if (current_function_profile)
5200 /* Record that the current function accesses previous call frames. */
5203 ix86_setup_frame_addresses (void)
5205 cfun->machine->accesses_prev_frame = 1;
5208 #if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
5209 # define USE_HIDDEN_LINKONCE 1
5211 # define USE_HIDDEN_LINKONCE 0
5214 static int pic_labels_used;
5216 /* Fills in the label name that should be used for a pc thunk for
5217 the given register. */
5220 get_pc_thunk_name (char name[32], unsigned int regno)
5222 gcc_assert (!TARGET_64BIT);
5224 if (USE_HIDDEN_LINKONCE)
5225 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
5227 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
5231 /* This function generates code for -fpic that loads %ebx with
5232 the return address of the caller and then returns. */
5235 ix86_file_end (void)
5240 for (regno = 0; regno < 8; ++regno)
5244 if (! ((pic_labels_used >> regno) & 1))
5247 get_pc_thunk_name (name, regno);
5252 switch_to_section (darwin_sections[text_coal_section]);
5253 fputs ("\t.weak_definition\t", asm_out_file);
5254 assemble_name (asm_out_file, name);
5255 fputs ("\n\t.private_extern\t", asm_out_file);
5256 assemble_name (asm_out_file, name);
5257 fputs ("\n", asm_out_file);
5258 ASM_OUTPUT_LABEL (asm_out_file, name);
5262 if (USE_HIDDEN_LINKONCE)
5266 decl = build_decl (FUNCTION_DECL, get_identifier (name),
5268 TREE_PUBLIC (decl) = 1;
5269 TREE_STATIC (decl) = 1;
5270 DECL_ONE_ONLY (decl) = 1;
5272 (*targetm.asm_out.unique_section) (decl, 0);
5273 switch_to_section (get_named_section (decl, NULL, 0));
5275 (*targetm.asm_out.globalize_label) (asm_out_file, name);
5276 fputs ("\t.hidden\t", asm_out_file);
5277 assemble_name (asm_out_file, name);
5278 fputc ('\n', asm_out_file);
5279 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
5283 switch_to_section (text_section);
5284 ASM_OUTPUT_LABEL (asm_out_file, name);
5287 xops[0] = gen_rtx_REG (SImode, regno);
5288 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
5289 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
5290 output_asm_insn ("ret", xops);
5293 if (NEED_INDICATE_EXEC_STACK)
5294 file_end_indicate_exec_stack ();
5297 /* Emit code for the SET_GOT patterns. */
5300 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
5306 if (TARGET_VXWORKS_RTP && flag_pic)
5308 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
5309 xops[2] = gen_rtx_MEM (Pmode,
5310 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
5311 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
5313 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
5314 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
5315 an unadorned address. */
5316 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
5317 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
5318 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
5322 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
5324 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
5326 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
5329 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
5331 output_asm_insn ("call\t%a2", xops);
5334 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5335 is what will be referenced by the Mach-O PIC subsystem. */
5337 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5340 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5341 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
5344 output_asm_insn ("pop{l}\t%0", xops);
5349 get_pc_thunk_name (name, REGNO (dest));
5350 pic_labels_used |= 1 << REGNO (dest);
5352 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
5353 xops[2] = gen_rtx_MEM (QImode, xops[2]);
5354 output_asm_insn ("call\t%X2", xops);
5355 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5356 is what will be referenced by the Mach-O PIC subsystem. */
5359 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5361 targetm.asm_out.internal_label (asm_out_file, "L",
5362 CODE_LABEL_NUMBER (label));
5369 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
5370 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
5372 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
5377 /* Generate an "push" pattern for input ARG. */
5382 return gen_rtx_SET (VOIDmode,
5384 gen_rtx_PRE_DEC (Pmode,
5385 stack_pointer_rtx)),
5389 /* Return >= 0 if there is an unused call-clobbered register available
5390 for the entire function. */
5393 ix86_select_alt_pic_regnum (void)
5395 if (current_function_is_leaf && !current_function_profile
5396 && !ix86_current_function_calls_tls_descriptor)
5399 for (i = 2; i >= 0; --i)
5400 if (!regs_ever_live[i])
5404 return INVALID_REGNUM;
5407 /* Return 1 if we need to save REGNO. */
5409 ix86_save_reg (unsigned int regno, int maybe_eh_return)
5411 if (pic_offset_table_rtx
5412 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
5413 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
5414 || current_function_profile
5415 || current_function_calls_eh_return
5416 || current_function_uses_const_pool))
5418 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
5423 if (current_function_calls_eh_return && maybe_eh_return)
5428 unsigned test = EH_RETURN_DATA_REGNO (i);
5429 if (test == INVALID_REGNUM)
5436 if (cfun->machine->force_align_arg_pointer
5437 && regno == REGNO (cfun->machine->force_align_arg_pointer))
5440 return (regs_ever_live[regno]
5441 && !call_used_regs[regno]
5442 && !fixed_regs[regno]
5443 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
5446 /* Return number of registers to be saved on the stack. */
5449 ix86_nsaved_regs (void)
5454 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
5455 if (ix86_save_reg (regno, true))
5460 /* Return the offset between two registers, one to be eliminated, and the other
5461 its replacement, at the start of a routine. */
5464 ix86_initial_elimination_offset (int from, int to)
5466 struct ix86_frame frame;
5467 ix86_compute_frame_layout (&frame);
5469 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
5470 return frame.hard_frame_pointer_offset;
5471 else if (from == FRAME_POINTER_REGNUM
5472 && to == HARD_FRAME_POINTER_REGNUM)
5473 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
5476 gcc_assert (to == STACK_POINTER_REGNUM);
5478 if (from == ARG_POINTER_REGNUM)
5479 return frame.stack_pointer_offset;
5481 gcc_assert (from == FRAME_POINTER_REGNUM);
5482 return frame.stack_pointer_offset - frame.frame_pointer_offset;
5486 /* Fill structure ix86_frame about frame of currently computed function. */
5489 ix86_compute_frame_layout (struct ix86_frame *frame)
5491 HOST_WIDE_INT total_size;
5492 unsigned int stack_alignment_needed;
5493 HOST_WIDE_INT offset;
5494 unsigned int preferred_alignment;
5495 HOST_WIDE_INT size = get_frame_size ();
5497 frame->nregs = ix86_nsaved_regs ();
5500 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
5501 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
5503 /* During reload iteration the amount of registers saved can change.
5504 Recompute the value as needed. Do not recompute when amount of registers
5505 didn't change as reload does multiple calls to the function and does not
5506 expect the decision to change within single iteration. */
5508 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
5510 int count = frame->nregs;
5512 cfun->machine->use_fast_prologue_epilogue_nregs = count;
5513 /* The fast prologue uses move instead of push to save registers. This
5514 is significantly longer, but also executes faster as modern hardware
5515 can execute the moves in parallel, but can't do that for push/pop.
5517 Be careful about choosing what prologue to emit: When function takes
5518 many instructions to execute we may use slow version as well as in
5519 case function is known to be outside hot spot (this is known with
5520 feedback only). Weight the size of function by number of registers
5521 to save as it is cheap to use one or two push instructions but very
5522 slow to use many of them. */
5524 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
5525 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
5526 || (flag_branch_probabilities
5527 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
5528 cfun->machine->use_fast_prologue_epilogue = false;
5530 cfun->machine->use_fast_prologue_epilogue
5531 = !expensive_function_p (count);
5533 if (TARGET_PROLOGUE_USING_MOVE
5534 && cfun->machine->use_fast_prologue_epilogue)
5535 frame->save_regs_using_mov = true;
5537 frame->save_regs_using_mov = false;
5540 /* Skip return address and saved base pointer. */
5541 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
5543 frame->hard_frame_pointer_offset = offset;
5545 /* Do some sanity checking of stack_alignment_needed and
5546 preferred_alignment, since i386 port is the only using those features
5547 that may break easily. */
5549 gcc_assert (!size || stack_alignment_needed);
5550 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
5551 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
5552 gcc_assert (stack_alignment_needed
5553 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
5555 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
5556 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
5558 /* Register save area */
5559 offset += frame->nregs * UNITS_PER_WORD;
5562 if (ix86_save_varrargs_registers)
5564 offset += X86_64_VARARGS_SIZE;
5565 frame->va_arg_size = X86_64_VARARGS_SIZE;
5568 frame->va_arg_size = 0;
5570 /* Align start of frame for local function. */
5571 frame->padding1 = ((offset + stack_alignment_needed - 1)
5572 & -stack_alignment_needed) - offset;
5574 offset += frame->padding1;
5576 /* Frame pointer points here. */
5577 frame->frame_pointer_offset = offset;
5581 /* Add outgoing arguments area. Can be skipped if we eliminated
5582 all the function calls as dead code.
5583 Skipping is however impossible when function calls alloca. Alloca
5584 expander assumes that last current_function_outgoing_args_size
5585 of stack frame are unused. */
5586 if (ACCUMULATE_OUTGOING_ARGS
5587 && (!current_function_is_leaf || current_function_calls_alloca
5588 || ix86_current_function_calls_tls_descriptor))
5590 offset += current_function_outgoing_args_size;
5591 frame->outgoing_arguments_size = current_function_outgoing_args_size;
5594 frame->outgoing_arguments_size = 0;
5596 /* Align stack boundary. Only needed if we're calling another function
5598 if (!current_function_is_leaf || current_function_calls_alloca
5599 || ix86_current_function_calls_tls_descriptor)
5600 frame->padding2 = ((offset + preferred_alignment - 1)
5601 & -preferred_alignment) - offset;
5603 frame->padding2 = 0;
5605 offset += frame->padding2;
5607 /* We've reached end of stack frame. */
5608 frame->stack_pointer_offset = offset;
5610 /* Size prologue needs to allocate. */
5611 frame->to_allocate =
5612 (size + frame->padding1 + frame->padding2
5613 + frame->outgoing_arguments_size + frame->va_arg_size);
5615 if ((!frame->to_allocate && frame->nregs <= 1)
5616 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
5617 frame->save_regs_using_mov = false;
5619 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
5620 && current_function_is_leaf
5621 && !ix86_current_function_calls_tls_descriptor)
5623 frame->red_zone_size = frame->to_allocate;
5624 if (frame->save_regs_using_mov)
5625 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
5626 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
5627 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
5630 frame->red_zone_size = 0;
5631 frame->to_allocate -= frame->red_zone_size;
5632 frame->stack_pointer_offset -= frame->red_zone_size;
5634 fprintf (stderr, "\n");
5635 fprintf (stderr, "nregs: %ld\n", (long)frame->nregs);
5636 fprintf (stderr, "size: %ld\n", (long)size);
5637 fprintf (stderr, "alignment1: %ld\n", (long)stack_alignment_needed);
5638 fprintf (stderr, "padding1: %ld\n", (long)frame->padding1);
5639 fprintf (stderr, "va_arg: %ld\n", (long)frame->va_arg_size);
5640 fprintf (stderr, "padding2: %ld\n", (long)frame->padding2);
5641 fprintf (stderr, "to_allocate: %ld\n", (long)frame->to_allocate);
5642 fprintf (stderr, "red_zone_size: %ld\n", (long)frame->red_zone_size);
5643 fprintf (stderr, "frame_pointer_offset: %ld\n", (long)frame->frame_pointer_offset);
5644 fprintf (stderr, "hard_frame_pointer_offset: %ld\n",
5645 (long)frame->hard_frame_pointer_offset);
5646 fprintf (stderr, "stack_pointer_offset: %ld\n", (long)frame->stack_pointer_offset);
5647 fprintf (stderr, "current_function_is_leaf: %ld\n", (long)current_function_is_leaf);
5648 fprintf (stderr, "current_function_calls_alloca: %ld\n", (long)current_function_calls_alloca);
5649 fprintf (stderr, "x86_current_function_calls_tls_descriptor: %ld\n", (long)ix86_current_function_calls_tls_descriptor);
5653 /* Emit code to save registers in the prologue. */
5656 ix86_emit_save_regs (void)
5661 for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
5662 if (ix86_save_reg (regno, true))
5664 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
5665 RTX_FRAME_RELATED_P (insn) = 1;
5669 /* Emit code to save registers using MOV insns. First register
5670 is restored from POINTER + OFFSET. */
5672 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
5677 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5678 if (ix86_save_reg (regno, true))
5680 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
5682 gen_rtx_REG (Pmode, regno));
5683 RTX_FRAME_RELATED_P (insn) = 1;
5684 offset += UNITS_PER_WORD;
5688 /* Expand prologue or epilogue stack adjustment.
5689 The pattern exist to put a dependency on all ebp-based memory accesses.
5690 STYLE should be negative if instructions should be marked as frame related,
5691 zero if %r11 register is live and cannot be freely used and positive
5695 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
5700 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
5701 else if (x86_64_immediate_operand (offset, DImode))
5702 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
5706 /* r11 is used by indirect sibcall return as well, set before the
5707 epilogue and used after the epilogue. ATM indirect sibcall
5708 shouldn't be used together with huge frame sizes in one
5709 function because of the frame_size check in sibcall.c. */
5711 r11 = gen_rtx_REG (DImode, R11_REG);
5712 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
5714 RTX_FRAME_RELATED_P (insn) = 1;
5715 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
5719 RTX_FRAME_RELATED_P (insn) = 1;
5722 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
5725 ix86_internal_arg_pointer (void)
5727 bool has_force_align_arg_pointer =
5728 (0 != lookup_attribute (ix86_force_align_arg_pointer_string,
5729 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))));
5730 if ((FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
5731 && DECL_NAME (current_function_decl)
5732 && MAIN_NAME_P (DECL_NAME (current_function_decl))
5733 && DECL_FILE_SCOPE_P (current_function_decl))
5734 || ix86_force_align_arg_pointer
5735 || has_force_align_arg_pointer)
5737 /* Nested functions can't realign the stack due to a register
5739 if (DECL_CONTEXT (current_function_decl)
5740 && TREE_CODE (DECL_CONTEXT (current_function_decl)) == FUNCTION_DECL)
5742 if (ix86_force_align_arg_pointer)
5743 warning (0, "-mstackrealign ignored for nested functions");
5744 if (has_force_align_arg_pointer)
5745 error ("%s not supported for nested functions",
5746 ix86_force_align_arg_pointer_string);
5747 return virtual_incoming_args_rtx;
5749 cfun->machine->force_align_arg_pointer = gen_rtx_REG (Pmode, 2);
5750 return copy_to_reg (cfun->machine->force_align_arg_pointer);
5753 return virtual_incoming_args_rtx;
5756 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
5757 This is called from dwarf2out.c to emit call frame instructions
5758 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
5760 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
5762 rtx unspec = SET_SRC (pattern);
5763 gcc_assert (GET_CODE (unspec) == UNSPEC);
5767 case UNSPEC_REG_SAVE:
5768 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
5769 SET_DEST (pattern));
5771 case UNSPEC_DEF_CFA:
5772 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
5773 INTVAL (XVECEXP (unspec, 0, 0)));
5780 /* Expand the prologue into a bunch of separate insns. */
5783 ix86_expand_prologue (void)
5787 struct ix86_frame frame;
5788 HOST_WIDE_INT allocate;
5790 ix86_compute_frame_layout (&frame);
5792 if (cfun->machine->force_align_arg_pointer)
5796 /* Grab the argument pointer. */
5797 x = plus_constant (stack_pointer_rtx, 4);
5798 y = cfun->machine->force_align_arg_pointer;
5799 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
5800 RTX_FRAME_RELATED_P (insn) = 1;
5802 /* The unwind info consists of two parts: install the fafp as the cfa,
5803 and record the fafp as the "save register" of the stack pointer.
5804 The later is there in order that the unwinder can see where it
5805 should restore the stack pointer across the and insn. */
5806 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_DEF_CFA);
5807 x = gen_rtx_SET (VOIDmode, y, x);
5808 RTX_FRAME_RELATED_P (x) = 1;
5809 y = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, stack_pointer_rtx),
5811 y = gen_rtx_SET (VOIDmode, cfun->machine->force_align_arg_pointer, y);
5812 RTX_FRAME_RELATED_P (y) = 1;
5813 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x, y));
5814 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
5815 REG_NOTES (insn) = x;
5817 /* Align the stack. */
5818 emit_insn (gen_andsi3 (stack_pointer_rtx, stack_pointer_rtx,
5821 /* And here we cheat like madmen with the unwind info. We force the
5822 cfa register back to sp+4, which is exactly what it was at the
5823 start of the function. Re-pushing the return address results in
5824 the return at the same spot relative to the cfa, and thus is
5825 correct wrt the unwind info. */
5826 x = cfun->machine->force_align_arg_pointer;
5827 x = gen_frame_mem (Pmode, plus_constant (x, -4));
5828 insn = emit_insn (gen_push (x));
5829 RTX_FRAME_RELATED_P (insn) = 1;
5832 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, x), UNSPEC_DEF_CFA);
5833 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
5834 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
5835 REG_NOTES (insn) = x;
5838 /* Note: AT&T enter does NOT have reversed args. Enter is probably
5839 slower on all targets. Also sdb doesn't like it. */
5841 if (frame_pointer_needed)
5843 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
5844 RTX_FRAME_RELATED_P (insn) = 1;
5846 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
5847 RTX_FRAME_RELATED_P (insn) = 1;
5850 allocate = frame.to_allocate;
5852 if (!frame.save_regs_using_mov)
5853 ix86_emit_save_regs ();
5855 allocate += frame.nregs * UNITS_PER_WORD;
5857 /* When using red zone we may start register saving before allocating
5858 the stack frame saving one cycle of the prologue. */
5859 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
5860 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
5861 : stack_pointer_rtx,
5862 -frame.nregs * UNITS_PER_WORD);
5866 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
5867 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5868 GEN_INT (-allocate), -1);
5871 /* Only valid for Win32. */
5872 rtx eax = gen_rtx_REG (Pmode, 0);
5876 gcc_assert (!TARGET_64BIT || TARGET_64BIT_MS_ABI);
5878 if (TARGET_64BIT_MS_ABI)
5881 eax_live = ix86_eax_live_at_start_p ();
5885 emit_insn (gen_push (eax));
5886 allocate -= UNITS_PER_WORD;
5889 emit_move_insn (eax, GEN_INT (allocate));
5892 insn = gen_allocate_stack_worker_64 (eax);
5894 insn = gen_allocate_stack_worker_32 (eax);
5895 insn = emit_insn (insn);
5896 RTX_FRAME_RELATED_P (insn) = 1;
5897 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
5898 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
5899 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
5900 t, REG_NOTES (insn));
5904 if (frame_pointer_needed)
5905 t = plus_constant (hard_frame_pointer_rtx,
5908 - frame.nregs * UNITS_PER_WORD);
5910 t = plus_constant (stack_pointer_rtx, allocate);
5911 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
5915 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
5917 if (!frame_pointer_needed || !frame.to_allocate)
5918 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
5920 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
5921 -frame.nregs * UNITS_PER_WORD);
5924 pic_reg_used = false;
5925 if (pic_offset_table_rtx
5926 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
5927 || current_function_profile))
5929 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
5931 if (alt_pic_reg_used != INVALID_REGNUM)
5932 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
5934 pic_reg_used = true;
5941 if (ix86_cmodel == CM_LARGE_PIC)
5943 rtx tmp_reg = gen_rtx_REG (DImode,
5944 FIRST_REX_INT_REG + 3 /* R11 */);
5945 rtx label = gen_label_rtx ();
5947 LABEL_PRESERVE_P (label) = 1;
5948 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
5949 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
5950 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
5951 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
5952 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
5953 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
5954 pic_offset_table_rtx, tmp_reg));
5957 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
5960 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
5962 /* Even with accurate pre-reload life analysis, we can wind up
5963 deleting all references to the pic register after reload.
5964 Consider if cross-jumping unifies two sides of a branch
5965 controlled by a comparison vs the only read from a global.
5966 In which case, allow the set_got to be deleted, though we're
5967 too late to do anything about the ebx save in the prologue. */
5968 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
5971 /* Prevent function calls from be scheduled before the call to mcount.
5972 In the pic_reg_used case, make sure that the got load isn't deleted. */
5973 if (current_function_profile)
5974 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
5977 /* Emit code to restore saved registers using MOV insns. First register
5978 is restored from POINTER + OFFSET. */
5980 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
5981 int maybe_eh_return)
5984 rtx base_address = gen_rtx_MEM (Pmode, pointer);
5986 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5987 if (ix86_save_reg (regno, maybe_eh_return))
5989 /* Ensure that adjust_address won't be forced to produce pointer
5990 out of range allowed by x86-64 instruction set. */
5991 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
5995 r11 = gen_rtx_REG (DImode, R11_REG);
5996 emit_move_insn (r11, GEN_INT (offset));
5997 emit_insn (gen_adddi3 (r11, r11, pointer));
5998 base_address = gen_rtx_MEM (Pmode, r11);
6001 emit_move_insn (gen_rtx_REG (Pmode, regno),
6002 adjust_address (base_address, Pmode, offset));
6003 offset += UNITS_PER_WORD;
6007 /* Restore function stack, frame, and registers. */
6010 ix86_expand_epilogue (int style)
6013 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
6014 struct ix86_frame frame;
6015 HOST_WIDE_INT offset;
6017 ix86_compute_frame_layout (&frame);
6019 /* Calculate start of saved registers relative to ebp. Special care
6020 must be taken for the normal return case of a function using
6021 eh_return: the eax and edx registers are marked as saved, but not
6022 restored along this path. */
6023 offset = frame.nregs;
6024 if (current_function_calls_eh_return && style != 2)
6026 offset *= -UNITS_PER_WORD;
6028 /* If we're only restoring one register and sp is not valid then
6029 using a move instruction to restore the register since it's
6030 less work than reloading sp and popping the register.
6032 The default code result in stack adjustment using add/lea instruction,
6033 while this code results in LEAVE instruction (or discrete equivalent),
6034 so it is profitable in some other cases as well. Especially when there
6035 are no registers to restore. We also use this code when TARGET_USE_LEAVE
6036 and there is exactly one register to pop. This heuristic may need some
6037 tuning in future. */
6038 if ((!sp_valid && frame.nregs <= 1)
6039 || (TARGET_EPILOGUE_USING_MOVE
6040 && cfun->machine->use_fast_prologue_epilogue
6041 && (frame.nregs > 1 || frame.to_allocate))
6042 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
6043 || (frame_pointer_needed && TARGET_USE_LEAVE
6044 && cfun->machine->use_fast_prologue_epilogue
6045 && frame.nregs == 1)
6046 || current_function_calls_eh_return)
6048 /* Restore registers. We can use ebp or esp to address the memory
6049 locations. If both are available, default to ebp, since offsets
6050 are known to be small. Only exception is esp pointing directly to the
6051 end of block of saved registers, where we may simplify addressing
6054 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
6055 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
6056 frame.to_allocate, style == 2);
6058 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
6059 offset, style == 2);
6061 /* eh_return epilogues need %ecx added to the stack pointer. */
6064 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
6066 if (frame_pointer_needed)
6068 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
6069 tmp = plus_constant (tmp, UNITS_PER_WORD);
6070 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
6072 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
6073 emit_move_insn (hard_frame_pointer_rtx, tmp);
6075 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
6080 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
6081 tmp = plus_constant (tmp, (frame.to_allocate
6082 + frame.nregs * UNITS_PER_WORD));
6083 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
6086 else if (!frame_pointer_needed)
6087 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
6088 GEN_INT (frame.to_allocate
6089 + frame.nregs * UNITS_PER_WORD),
6091 /* If not an i386, mov & pop is faster than "leave". */
6092 else if (TARGET_USE_LEAVE || optimize_size
6093 || !cfun->machine->use_fast_prologue_epilogue)
6094 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
6097 pro_epilogue_adjust_stack (stack_pointer_rtx,
6098 hard_frame_pointer_rtx,
6101 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
6103 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
6108 /* First step is to deallocate the stack frame so that we can
6109 pop the registers. */
6112 gcc_assert (frame_pointer_needed);
6113 pro_epilogue_adjust_stack (stack_pointer_rtx,
6114 hard_frame_pointer_rtx,
6115 GEN_INT (offset), style);
6117 else if (frame.to_allocate)
6118 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
6119 GEN_INT (frame.to_allocate), style);
6121 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
6122 if (ix86_save_reg (regno, false))
6125 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
6127 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
6129 if (frame_pointer_needed)
6131 /* Leave results in shorter dependency chains on CPUs that are
6132 able to grok it fast. */
6133 if (TARGET_USE_LEAVE)
6134 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
6135 else if (TARGET_64BIT)
6136 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
6138 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
6142 if (cfun->machine->force_align_arg_pointer)
6144 emit_insn (gen_addsi3 (stack_pointer_rtx,
6145 cfun->machine->force_align_arg_pointer,
6149 /* Sibcall epilogues don't want a return instruction. */
6153 if (current_function_pops_args && current_function_args_size)
6155 rtx popc = GEN_INT (current_function_pops_args);
6157 /* i386 can only pop 64K bytes. If asked to pop more, pop
6158 return address, do explicit add, and jump indirectly to the
6161 if (current_function_pops_args >= 65536)
6163 rtx ecx = gen_rtx_REG (SImode, 2);
6165 /* There is no "pascal" calling convention in any 64bit ABI. */
6166 gcc_assert (!TARGET_64BIT);
6168 emit_insn (gen_popsi1 (ecx));
6169 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
6170 emit_jump_insn (gen_return_indirect_internal (ecx));
6173 emit_jump_insn (gen_return_pop_internal (popc));
6176 emit_jump_insn (gen_return_internal ());
6179 /* Reset from the function's potential modifications. */
6182 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6183 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6185 if (pic_offset_table_rtx)
6186 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
6188 /* Mach-O doesn't support labels at the end of objects, so if
6189 it looks like we might want one, insert a NOP. */
6191 rtx insn = get_last_insn ();
6194 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED_LABEL)
6195 insn = PREV_INSN (insn);
6199 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL)))
6200 fputs ("\tnop\n", file);
6206 /* Extract the parts of an RTL expression that is a valid memory address
6207 for an instruction. Return 0 if the structure of the address is
6208 grossly off. Return -1 if the address contains ASHIFT, so it is not
6209 strictly valid, but still used for computing length of lea instruction. */
6212 ix86_decompose_address (rtx addr, struct ix86_address *out)
6214 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
6215 rtx base_reg, index_reg;
6216 HOST_WIDE_INT scale = 1;
6217 rtx scale_rtx = NULL_RTX;
6219 enum ix86_address_seg seg = SEG_DEFAULT;
6221 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
6223 else if (GET_CODE (addr) == PLUS)
6233 addends[n++] = XEXP (op, 1);
6236 while (GET_CODE (op) == PLUS);
6241 for (i = n; i >= 0; --i)
6244 switch (GET_CODE (op))
6249 index = XEXP (op, 0);
6250 scale_rtx = XEXP (op, 1);
6254 if (XINT (op, 1) == UNSPEC_TP
6255 && TARGET_TLS_DIRECT_SEG_REFS
6256 && seg == SEG_DEFAULT)
6257 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
6286 else if (GET_CODE (addr) == MULT)
6288 index = XEXP (addr, 0); /* index*scale */
6289 scale_rtx = XEXP (addr, 1);
6291 else if (GET_CODE (addr) == ASHIFT)
6295 /* We're called for lea too, which implements ashift on occasion. */
6296 index = XEXP (addr, 0);
6297 tmp = XEXP (addr, 1);
6298 if (!CONST_INT_P (tmp))
6300 scale = INTVAL (tmp);
6301 if ((unsigned HOST_WIDE_INT) scale > 3)
6307 disp = addr; /* displacement */
6309 /* Extract the integral value of scale. */
6312 if (!CONST_INT_P (scale_rtx))
6314 scale = INTVAL (scale_rtx);
6317 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
6318 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
6320 /* Allow arg pointer and stack pointer as index if there is not scaling. */
6321 if (base_reg && index_reg && scale == 1
6322 && (index_reg == arg_pointer_rtx
6323 || index_reg == frame_pointer_rtx
6324 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
6327 tmp = base, base = index, index = tmp;
6328 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
6331 /* Special case: %ebp cannot be encoded as a base without a displacement. */
6332 if ((base_reg == hard_frame_pointer_rtx
6333 || base_reg == frame_pointer_rtx
6334 || base_reg == arg_pointer_rtx) && !disp)
6337 /* Special case: on K6, [%esi] makes the instruction vector decoded.
6338 Avoid this by transforming to [%esi+0]. */
6339 if (ix86_tune == PROCESSOR_K6 && !optimize_size
6340 && base_reg && !index_reg && !disp
6342 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
6345 /* Special case: encode reg+reg instead of reg*2. */
6346 if (!base && index && scale && scale == 2)
6347 base = index, base_reg = index_reg, scale = 1;
6349 /* Special case: scaling cannot be encoded without base or displacement. */
6350 if (!base && !disp && index && scale != 1)
6362 /* Return cost of the memory address x.
6363 For i386, it is better to use a complex address than let gcc copy
6364 the address into a reg and make a new pseudo. But not if the address
6365 requires to two regs - that would mean more pseudos with longer
6368 ix86_address_cost (rtx x)
6370 struct ix86_address parts;
6372 int ok = ix86_decompose_address (x, &parts);
6376 if (parts.base && GET_CODE (parts.base) == SUBREG)
6377 parts.base = SUBREG_REG (parts.base);
6378 if (parts.index && GET_CODE (parts.index) == SUBREG)
6379 parts.index = SUBREG_REG (parts.index);
6381 /* More complex memory references are better. */
6382 if (parts.disp && parts.disp != const0_rtx)
6384 if (parts.seg != SEG_DEFAULT)
6387 /* Attempt to minimize number of registers in the address. */
6389 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
6391 && (!REG_P (parts.index)
6392 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
6396 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
6398 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
6399 && parts.base != parts.index)
6402 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
6403 since it's predecode logic can't detect the length of instructions
6404 and it degenerates to vector decoded. Increase cost of such
6405 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
6406 to split such addresses or even refuse such addresses at all.
6408 Following addressing modes are affected:
6413 The first and last case may be avoidable by explicitly coding the zero in
6414 memory address, but I don't have AMD-K6 machine handy to check this
6418 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
6419 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
6420 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
6426 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
6427 this is used for to form addresses to local data when -fPIC is in
6431 darwin_local_data_pic (rtx disp)
6433 if (GET_CODE (disp) == MINUS)
6435 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
6436 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
6437 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
6439 const char *sym_name = XSTR (XEXP (disp, 1), 0);
6440 if (! strcmp (sym_name, "<pic base>"))
6448 /* Determine if a given RTX is a valid constant. We already know this
6449 satisfies CONSTANT_P. */
6452 legitimate_constant_p (rtx x)
6454 switch (GET_CODE (x))
6459 if (GET_CODE (x) == PLUS)
6461 if (!CONST_INT_P (XEXP (x, 1)))
6466 if (TARGET_MACHO && darwin_local_data_pic (x))
6469 /* Only some unspecs are valid as "constants". */
6470 if (GET_CODE (x) == UNSPEC)
6471 switch (XINT (x, 1))
6476 return TARGET_64BIT;
6479 x = XVECEXP (x, 0, 0);
6480 return (GET_CODE (x) == SYMBOL_REF
6481 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
6483 x = XVECEXP (x, 0, 0);
6484 return (GET_CODE (x) == SYMBOL_REF
6485 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
6490 /* We must have drilled down to a symbol. */
6491 if (GET_CODE (x) == LABEL_REF)
6493 if (GET_CODE (x) != SYMBOL_REF)
6498 /* TLS symbols are never valid. */
6499 if (SYMBOL_REF_TLS_MODEL (x))
6502 /* DLLIMPORT symbols are never valid. */
6503 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
6504 && SYMBOL_REF_DLLIMPORT_P (x))
6509 if (GET_MODE (x) == TImode
6510 && x != CONST0_RTX (TImode)
6516 if (x == CONST0_RTX (GET_MODE (x)))
6524 /* Otherwise we handle everything else in the move patterns. */
6528 /* Determine if it's legal to put X into the constant pool. This
6529 is not possible for the address of thread-local symbols, which
6530 is checked above. */
6533 ix86_cannot_force_const_mem (rtx x)
6535 /* We can always put integral constants and vectors in memory. */
6536 switch (GET_CODE (x))
6546 return !legitimate_constant_p (x);
6549 /* Determine if a given RTX is a valid constant address. */
6552 constant_address_p (rtx x)
6554 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
6557 /* Nonzero if the constant value X is a legitimate general operand
6558 when generating PIC code. It is given that flag_pic is on and
6559 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
6562 legitimate_pic_operand_p (rtx x)
6566 switch (GET_CODE (x))
6569 inner = XEXP (x, 0);
6570 if (GET_CODE (inner) == PLUS
6571 && CONST_INT_P (XEXP (inner, 1)))
6572 inner = XEXP (inner, 0);
6574 /* Only some unspecs are valid as "constants". */
6575 if (GET_CODE (inner) == UNSPEC)
6576 switch (XINT (inner, 1))
6581 return TARGET_64BIT;
6583 x = XVECEXP (inner, 0, 0);
6584 return (GET_CODE (x) == SYMBOL_REF
6585 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
6593 return legitimate_pic_address_disp_p (x);
6600 /* Determine if a given CONST RTX is a valid memory displacement
6604 legitimate_pic_address_disp_p (rtx disp)
6608 /* In 64bit mode we can allow direct addresses of symbols and labels
6609 when they are not dynamic symbols. */
6612 rtx op0 = disp, op1;
6614 switch (GET_CODE (disp))
6620 if (GET_CODE (XEXP (disp, 0)) != PLUS)
6622 op0 = XEXP (XEXP (disp, 0), 0);
6623 op1 = XEXP (XEXP (disp, 0), 1);
6624 if (!CONST_INT_P (op1)
6625 || INTVAL (op1) >= 16*1024*1024
6626 || INTVAL (op1) < -16*1024*1024)
6628 if (GET_CODE (op0) == LABEL_REF)
6630 if (GET_CODE (op0) != SYMBOL_REF)
6635 /* TLS references should always be enclosed in UNSPEC. */
6636 if (SYMBOL_REF_TLS_MODEL (op0))
6638 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
6639 && ix86_cmodel != CM_LARGE_PIC)
6647 if (GET_CODE (disp) != CONST)
6649 disp = XEXP (disp, 0);
6653 /* We are unsafe to allow PLUS expressions. This limit allowed distance
6654 of GOT tables. We should not need these anyway. */
6655 if (GET_CODE (disp) != UNSPEC
6656 || (XINT (disp, 1) != UNSPEC_GOTPCREL
6657 && XINT (disp, 1) != UNSPEC_GOTOFF
6658 && XINT (disp, 1) != UNSPEC_PLTOFF))
6661 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
6662 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
6668 if (GET_CODE (disp) == PLUS)
6670 if (!CONST_INT_P (XEXP (disp, 1)))
6672 disp = XEXP (disp, 0);
6676 if (TARGET_MACHO && darwin_local_data_pic (disp))
6679 if (GET_CODE (disp) != UNSPEC)
6682 switch (XINT (disp, 1))
6687 /* We need to check for both symbols and labels because VxWorks loads
6688 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
6690 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
6691 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
6693 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
6694 While ABI specify also 32bit relocation but we don't produce it in
6695 small PIC model at all. */
6696 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
6697 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
6699 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
6701 case UNSPEC_GOTTPOFF:
6702 case UNSPEC_GOTNTPOFF:
6703 case UNSPEC_INDNTPOFF:
6706 disp = XVECEXP (disp, 0, 0);
6707 return (GET_CODE (disp) == SYMBOL_REF
6708 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
6710 disp = XVECEXP (disp, 0, 0);
6711 return (GET_CODE (disp) == SYMBOL_REF
6712 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
6714 disp = XVECEXP (disp, 0, 0);
6715 return (GET_CODE (disp) == SYMBOL_REF
6716 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
6722 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
6723 memory address for an instruction. The MODE argument is the machine mode
6724 for the MEM expression that wants to use this address.
6726 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
6727 convert common non-canonical forms to canonical form so that they will
6731 legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
6732 rtx addr, int strict)
6734 struct ix86_address parts;
6735 rtx base, index, disp;
6736 HOST_WIDE_INT scale;
6737 const char *reason = NULL;
6738 rtx reason_rtx = NULL_RTX;
6740 if (ix86_decompose_address (addr, &parts) <= 0)
6742 reason = "decomposition failed";
6747 index = parts.index;
6749 scale = parts.scale;
6751 /* Validate base register.
6753 Don't allow SUBREG's that span more than a word here. It can lead to spill
6754 failures when the base is one word out of a two word structure, which is
6755 represented internally as a DImode int. */
6764 else if (GET_CODE (base) == SUBREG
6765 && REG_P (SUBREG_REG (base))
6766 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
6768 reg = SUBREG_REG (base);
6771 reason = "base is not a register";
6775 if (GET_MODE (base) != Pmode)
6777 reason = "base is not in Pmode";
6781 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
6782 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
6784 reason = "base is not valid";
6789 /* Validate index register.
6791 Don't allow SUBREG's that span more than a word here -- same as above. */
6800 else if (GET_CODE (index) == SUBREG
6801 && REG_P (SUBREG_REG (index))
6802 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
6804 reg = SUBREG_REG (index);
6807 reason = "index is not a register";
6811 if (GET_MODE (index) != Pmode)
6813 reason = "index is not in Pmode";
6817 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
6818 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
6820 reason = "index is not valid";
6825 /* Validate scale factor. */
6828 reason_rtx = GEN_INT (scale);
6831 reason = "scale without index";
6835 if (scale != 2 && scale != 4 && scale != 8)
6837 reason = "scale is not a valid multiplier";
6842 /* Validate displacement. */
6847 if (GET_CODE (disp) == CONST
6848 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
6849 switch (XINT (XEXP (disp, 0), 1))
6851 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
6852 used. While ABI specify also 32bit relocations, we don't produce
6853 them at all and use IP relative instead. */
6856 gcc_assert (flag_pic);
6858 goto is_legitimate_pic;
6859 reason = "64bit address unspec";
6862 case UNSPEC_GOTPCREL:
6863 gcc_assert (flag_pic);
6864 goto is_legitimate_pic;
6866 case UNSPEC_GOTTPOFF:
6867 case UNSPEC_GOTNTPOFF:
6868 case UNSPEC_INDNTPOFF:
6874 reason = "invalid address unspec";
6878 else if (SYMBOLIC_CONST (disp)
6882 && MACHOPIC_INDIRECT
6883 && !machopic_operand_p (disp)
6889 if (TARGET_64BIT && (index || base))
6891 /* foo@dtpoff(%rX) is ok. */
6892 if (GET_CODE (disp) != CONST
6893 || GET_CODE (XEXP (disp, 0)) != PLUS
6894 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
6895 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
6896 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
6897 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
6899 reason = "non-constant pic memory reference";
6903 else if (! legitimate_pic_address_disp_p (disp))
6905 reason = "displacement is an invalid pic construct";
6909 /* This code used to verify that a symbolic pic displacement
6910 includes the pic_offset_table_rtx register.
6912 While this is good idea, unfortunately these constructs may
6913 be created by "adds using lea" optimization for incorrect
6922 This code is nonsensical, but results in addressing
6923 GOT table with pic_offset_table_rtx base. We can't
6924 just refuse it easily, since it gets matched by
6925 "addsi3" pattern, that later gets split to lea in the
6926 case output register differs from input. While this
6927 can be handled by separate addsi pattern for this case
6928 that never results in lea, this seems to be easier and
6929 correct fix for crash to disable this test. */
6931 else if (GET_CODE (disp) != LABEL_REF
6932 && !CONST_INT_P (disp)
6933 && (GET_CODE (disp) != CONST
6934 || !legitimate_constant_p (disp))
6935 && (GET_CODE (disp) != SYMBOL_REF
6936 || !legitimate_constant_p (disp)))
6938 reason = "displacement is not constant";
6941 else if (TARGET_64BIT
6942 && !x86_64_immediate_operand (disp, VOIDmode))
6944 reason = "displacement is out of range";
6949 /* Everything looks valid. */
6956 /* Return a unique alias set for the GOT. */
6958 static HOST_WIDE_INT
6959 ix86_GOT_alias_set (void)
6961 static HOST_WIDE_INT set = -1;
6963 set = new_alias_set ();
6967 /* Return a legitimate reference for ORIG (an address) using the
6968 register REG. If REG is 0, a new pseudo is generated.
6970 There are two types of references that must be handled:
6972 1. Global data references must load the address from the GOT, via
6973 the PIC reg. An insn is emitted to do this load, and the reg is
6976 2. Static data references, constant pool addresses, and code labels
6977 compute the address as an offset from the GOT, whose base is in
6978 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
6979 differentiate them from global data objects. The returned
6980 address is the PIC reg + an unspec constant.
6982 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
6983 reg also appears in the address. */
6986 legitimize_pic_address (rtx orig, rtx reg)
6993 if (TARGET_MACHO && !TARGET_64BIT)
6996 reg = gen_reg_rtx (Pmode);
6997 /* Use the generic Mach-O PIC machinery. */
6998 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
7002 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
7004 else if (TARGET_64BIT
7005 && ix86_cmodel != CM_SMALL_PIC
7006 && gotoff_operand (addr, Pmode))
7009 /* This symbol may be referenced via a displacement from the PIC
7010 base address (@GOTOFF). */
7012 if (reload_in_progress)
7013 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
7014 if (GET_CODE (addr) == CONST)
7015 addr = XEXP (addr, 0);
7016 if (GET_CODE (addr) == PLUS)
7018 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
7020 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
7023 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
7024 new = gen_rtx_CONST (Pmode, new);
7026 tmpreg = gen_reg_rtx (Pmode);
7029 emit_move_insn (tmpreg, new);
7033 new = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
7034 tmpreg, 1, OPTAB_DIRECT);
7037 else new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
7039 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
7041 /* This symbol may be referenced via a displacement from the PIC
7042 base address (@GOTOFF). */
7044 if (reload_in_progress)
7045 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
7046 if (GET_CODE (addr) == CONST)
7047 addr = XEXP (addr, 0);
7048 if (GET_CODE (addr) == PLUS)
7050 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
7052 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
7055 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
7056 new = gen_rtx_CONST (Pmode, new);
7057 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
7061 emit_move_insn (reg, new);
7065 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
7066 /* We can't use @GOTOFF for text labels on VxWorks;
7067 see gotoff_operand. */
7068 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
7070 /* Given that we've already handled dllimport variables separately
7071 in legitimize_address, and all other variables should satisfy
7072 legitimate_pic_address_disp_p, we should never arrive here. */
7073 gcc_assert (!TARGET_64BIT_MS_ABI);
7075 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
7077 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
7078 new = gen_rtx_CONST (Pmode, new);
7079 new = gen_const_mem (Pmode, new);
7080 set_mem_alias_set (new, ix86_GOT_alias_set ());
7083 reg = gen_reg_rtx (Pmode);
7084 /* Use directly gen_movsi, otherwise the address is loaded
7085 into register for CSE. We don't want to CSE this addresses,
7086 instead we CSE addresses from the GOT table, so skip this. */
7087 emit_insn (gen_movsi (reg, new));
7092 /* This symbol must be referenced via a load from the
7093 Global Offset Table (@GOT). */
7095 if (reload_in_progress)
7096 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
7097 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
7098 new = gen_rtx_CONST (Pmode, new);
7100 new = force_reg (Pmode, new);
7101 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
7102 new = gen_const_mem (Pmode, new);
7103 set_mem_alias_set (new, ix86_GOT_alias_set ());
7106 reg = gen_reg_rtx (Pmode);
7107 emit_move_insn (reg, new);
7113 if (CONST_INT_P (addr)
7114 && !x86_64_immediate_operand (addr, VOIDmode))
7118 emit_move_insn (reg, addr);
7122 new = force_reg (Pmode, addr);
7124 else if (GET_CODE (addr) == CONST)
7126 addr = XEXP (addr, 0);
7128 /* We must match stuff we generate before. Assume the only
7129 unspecs that can get here are ours. Not that we could do
7130 anything with them anyway.... */
7131 if (GET_CODE (addr) == UNSPEC
7132 || (GET_CODE (addr) == PLUS
7133 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
7135 gcc_assert (GET_CODE (addr) == PLUS);
7137 if (GET_CODE (addr) == PLUS)
7139 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
7141 /* Check first to see if this is a constant offset from a @GOTOFF
7142 symbol reference. */
7143 if (gotoff_operand (op0, Pmode)
7144 && CONST_INT_P (op1))
7148 if (reload_in_progress)
7149 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
7150 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
7152 new = gen_rtx_PLUS (Pmode, new, op1);
7153 new = gen_rtx_CONST (Pmode, new);
7154 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
7158 emit_move_insn (reg, new);
7164 if (INTVAL (op1) < -16*1024*1024
7165 || INTVAL (op1) >= 16*1024*1024)
7167 if (!x86_64_immediate_operand (op1, Pmode))
7168 op1 = force_reg (Pmode, op1);
7169 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
7175 base = legitimize_pic_address (XEXP (addr, 0), reg);
7176 new = legitimize_pic_address (XEXP (addr, 1),
7177 base == reg ? NULL_RTX : reg);
7179 if (CONST_INT_P (new))
7180 new = plus_constant (base, INTVAL (new));
7183 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
7185 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
7186 new = XEXP (new, 1);
7188 new = gen_rtx_PLUS (Pmode, base, new);
7196 /* Load the thread pointer. If TO_REG is true, force it into a register. */
7199 get_thread_pointer (int to_reg)
7203 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
7207 reg = gen_reg_rtx (Pmode);
7208 insn = gen_rtx_SET (VOIDmode, reg, tp);
7209 insn = emit_insn (insn);
7214 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
7215 false if we expect this to be used for a memory address and true if
7216 we expect to load the address into a register. */
7219 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
7221 rtx dest, base, off, pic, tp;
7226 case TLS_MODEL_GLOBAL_DYNAMIC:
7227 dest = gen_reg_rtx (Pmode);
7228 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
7230 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
7232 rtx rax = gen_rtx_REG (Pmode, 0), insns;
7235 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
7236 insns = get_insns ();
7239 emit_libcall_block (insns, dest, rax, x);
7241 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7242 emit_insn (gen_tls_global_dynamic_64 (dest, x));
7244 emit_insn (gen_tls_global_dynamic_32 (dest, x));
7246 if (TARGET_GNU2_TLS)
7248 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
7250 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7254 case TLS_MODEL_LOCAL_DYNAMIC:
7255 base = gen_reg_rtx (Pmode);
7256 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
7258 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
7260 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
7263 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
7264 insns = get_insns ();
7267 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
7268 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
7269 emit_libcall_block (insns, base, rax, note);
7271 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7272 emit_insn (gen_tls_local_dynamic_base_64 (base));
7274 emit_insn (gen_tls_local_dynamic_base_32 (base));
7276 if (TARGET_GNU2_TLS)
7278 rtx x = ix86_tls_module_base ();
7280 set_unique_reg_note (get_last_insn (), REG_EQUIV,
7281 gen_rtx_MINUS (Pmode, x, tp));
7284 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
7285 off = gen_rtx_CONST (Pmode, off);
7287 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
7289 if (TARGET_GNU2_TLS)
7291 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
7293 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7298 case TLS_MODEL_INITIAL_EXEC:
7302 type = UNSPEC_GOTNTPOFF;
7306 if (reload_in_progress)
7307 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
7308 pic = pic_offset_table_rtx;
7309 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
7311 else if (!TARGET_ANY_GNU_TLS)
7313 pic = gen_reg_rtx (Pmode);
7314 emit_insn (gen_set_got (pic));
7315 type = UNSPEC_GOTTPOFF;
7320 type = UNSPEC_INDNTPOFF;
7323 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
7324 off = gen_rtx_CONST (Pmode, off);
7326 off = gen_rtx_PLUS (Pmode, pic, off);
7327 off = gen_const_mem (Pmode, off);
7328 set_mem_alias_set (off, ix86_GOT_alias_set ());
7330 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7332 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7333 off = force_reg (Pmode, off);
7334 return gen_rtx_PLUS (Pmode, base, off);
7338 base = get_thread_pointer (true);
7339 dest = gen_reg_rtx (Pmode);
7340 emit_insn (gen_subsi3 (dest, base, off));
7344 case TLS_MODEL_LOCAL_EXEC:
7345 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
7346 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7347 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
7348 off = gen_rtx_CONST (Pmode, off);
7350 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7352 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7353 return gen_rtx_PLUS (Pmode, base, off);
7357 base = get_thread_pointer (true);
7358 dest = gen_reg_rtx (Pmode);
7359 emit_insn (gen_subsi3 (dest, base, off));
7370 /* Create or return the unique __imp_DECL dllimport symbol corresponding
7373 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
7374 htab_t dllimport_map;
7377 get_dllimport_decl (tree decl)
7379 struct tree_map *h, in;
7383 size_t namelen, prefixlen;
7389 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
7391 in.hash = htab_hash_pointer (decl);
7392 in.base.from = decl;
7393 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
7398 *loc = h = ggc_alloc (sizeof (struct tree_map));
7400 h->base.from = decl;
7401 h->to = to = build_decl (VAR_DECL, NULL, ptr_type_node);
7402 DECL_ARTIFICIAL (to) = 1;
7403 DECL_IGNORED_P (to) = 1;
7404 DECL_EXTERNAL (to) = 1;
7405 TREE_READONLY (to) = 1;
7407 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
7408 name = targetm.strip_name_encoding (name);
7409 if (name[0] == FASTCALL_PREFIX)
7415 prefix = "*__imp__";
7417 namelen = strlen (name);
7418 prefixlen = strlen (prefix);
7419 imp_name = alloca (namelen + prefixlen + 1);
7420 memcpy (imp_name, prefix, prefixlen);
7421 memcpy (imp_name + prefixlen, name, namelen + 1);
7423 name = ggc_alloc_string (imp_name, namelen + prefixlen);
7424 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
7425 SET_SYMBOL_REF_DECL (rtl, to);
7426 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
7428 rtl = gen_const_mem (Pmode, rtl);
7429 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
7431 SET_DECL_RTL (to, rtl);
7436 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
7437 true if we require the result be a register. */
7440 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
7445 gcc_assert (SYMBOL_REF_DECL (symbol));
7446 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
7448 x = DECL_RTL (imp_decl);
7450 x = force_reg (Pmode, x);
7454 /* Try machine-dependent ways of modifying an illegitimate address
7455 to be legitimate. If we find one, return the new, valid address.
7456 This macro is used in only one place: `memory_address' in explow.c.
7458 OLDX is the address as it was before break_out_memory_refs was called.
7459 In some cases it is useful to look at this to decide what needs to be done.
7461 MODE and WIN are passed so that this macro can use
7462 GO_IF_LEGITIMATE_ADDRESS.
7464 It is always safe for this macro to do nothing. It exists to recognize
7465 opportunities to optimize the output.
7467 For the 80386, we handle X+REG by loading X into a register R and
7468 using R+REG. R will go in a general reg and indexing will be used.
7469 However, if REG is a broken-out memory address or multiplication,
7470 nothing needs to be done because REG can certainly go in a general reg.
7472 When -fpic is used, special handling is needed for symbolic references.
7473 See comments by legitimize_pic_address in i386.c for details. */
7476 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
7481 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
7483 return legitimize_tls_address (x, log, false);
7484 if (GET_CODE (x) == CONST
7485 && GET_CODE (XEXP (x, 0)) == PLUS
7486 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
7487 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
7489 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
7490 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
7493 if (flag_pic && SYMBOLIC_CONST (x))
7494 return legitimize_pic_address (x, 0);
7496 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
7498 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
7499 return legitimize_dllimport_symbol (x, true);
7500 if (GET_CODE (x) == CONST
7501 && GET_CODE (XEXP (x, 0)) == PLUS
7502 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
7503 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
7505 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
7506 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
7510 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
7511 if (GET_CODE (x) == ASHIFT
7512 && CONST_INT_P (XEXP (x, 1))
7513 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
7516 log = INTVAL (XEXP (x, 1));
7517 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
7518 GEN_INT (1 << log));
7521 if (GET_CODE (x) == PLUS)
7523 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
7525 if (GET_CODE (XEXP (x, 0)) == ASHIFT
7526 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7527 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
7530 log = INTVAL (XEXP (XEXP (x, 0), 1));
7531 XEXP (x, 0) = gen_rtx_MULT (Pmode,
7532 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
7533 GEN_INT (1 << log));
7536 if (GET_CODE (XEXP (x, 1)) == ASHIFT
7537 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
7538 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
7541 log = INTVAL (XEXP (XEXP (x, 1), 1));
7542 XEXP (x, 1) = gen_rtx_MULT (Pmode,
7543 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
7544 GEN_INT (1 << log));
7547 /* Put multiply first if it isn't already. */
7548 if (GET_CODE (XEXP (x, 1)) == MULT)
7550 rtx tmp = XEXP (x, 0);
7551 XEXP (x, 0) = XEXP (x, 1);
7556 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
7557 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
7558 created by virtual register instantiation, register elimination, and
7559 similar optimizations. */
7560 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
7563 x = gen_rtx_PLUS (Pmode,
7564 gen_rtx_PLUS (Pmode, XEXP (x, 0),
7565 XEXP (XEXP (x, 1), 0)),
7566 XEXP (XEXP (x, 1), 1));
7570 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
7571 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
7572 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
7573 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
7574 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
7575 && CONSTANT_P (XEXP (x, 1)))
7578 rtx other = NULL_RTX;
7580 if (CONST_INT_P (XEXP (x, 1)))
7582 constant = XEXP (x, 1);
7583 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
7585 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
7587 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
7588 other = XEXP (x, 1);
7596 x = gen_rtx_PLUS (Pmode,
7597 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
7598 XEXP (XEXP (XEXP (x, 0), 1), 0)),
7599 plus_constant (other, INTVAL (constant)));
7603 if (changed && legitimate_address_p (mode, x, FALSE))
7606 if (GET_CODE (XEXP (x, 0)) == MULT)
7609 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
7612 if (GET_CODE (XEXP (x, 1)) == MULT)
7615 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
7619 && REG_P (XEXP (x, 1))
7620 && REG_P (XEXP (x, 0)))
7623 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
7626 x = legitimize_pic_address (x, 0);
7629 if (changed && legitimate_address_p (mode, x, FALSE))
7632 if (REG_P (XEXP (x, 0)))
7634 rtx temp = gen_reg_rtx (Pmode);
7635 rtx val = force_operand (XEXP (x, 1), temp);
7637 emit_move_insn (temp, val);
7643 else if (REG_P (XEXP (x, 1)))
7645 rtx temp = gen_reg_rtx (Pmode);
7646 rtx val = force_operand (XEXP (x, 0), temp);
7648 emit_move_insn (temp, val);
7658 /* Print an integer constant expression in assembler syntax. Addition
7659 and subtraction are the only arithmetic that may appear in these
7660 expressions. FILE is the stdio stream to write to, X is the rtx, and
7661 CODE is the operand print code from the output string. */
7664 output_pic_addr_const (FILE *file, rtx x, int code)
7668 switch (GET_CODE (x))
7671 gcc_assert (flag_pic);
7676 if (! TARGET_MACHO || TARGET_64BIT)
7677 output_addr_const (file, x);
7680 const char *name = XSTR (x, 0);
7682 /* Mark the decl as referenced so that cgraph will
7683 output the function. */
7684 if (SYMBOL_REF_DECL (x))
7685 mark_decl_referenced (SYMBOL_REF_DECL (x));
7688 if (MACHOPIC_INDIRECT
7689 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
7690 name = machopic_indirection_name (x, /*stub_p=*/true);
7692 assemble_name (file, name);
7694 if (!TARGET_MACHO && !TARGET_64BIT_MS_ABI
7695 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
7696 fputs ("@PLT", file);
7703 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
7704 assemble_name (asm_out_file, buf);
7708 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
7712 /* This used to output parentheses around the expression,
7713 but that does not work on the 386 (either ATT or BSD assembler). */
7714 output_pic_addr_const (file, XEXP (x, 0), code);
7718 if (GET_MODE (x) == VOIDmode)
7720 /* We can use %d if the number is <32 bits and positive. */
7721 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
7722 fprintf (file, "0x%lx%08lx",
7723 (unsigned long) CONST_DOUBLE_HIGH (x),
7724 (unsigned long) CONST_DOUBLE_LOW (x));
7726 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
7729 /* We can't handle floating point constants;
7730 PRINT_OPERAND must handle them. */
7731 output_operand_lossage ("floating constant misused");
7735 /* Some assemblers need integer constants to appear first. */
7736 if (CONST_INT_P (XEXP (x, 0)))
7738 output_pic_addr_const (file, XEXP (x, 0), code);
7740 output_pic_addr_const (file, XEXP (x, 1), code);
7744 gcc_assert (CONST_INT_P (XEXP (x, 1)));
7745 output_pic_addr_const (file, XEXP (x, 1), code);
7747 output_pic_addr_const (file, XEXP (x, 0), code);
7753 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
7754 output_pic_addr_const (file, XEXP (x, 0), code);
7756 output_pic_addr_const (file, XEXP (x, 1), code);
7758 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
7762 gcc_assert (XVECLEN (x, 0) == 1);
7763 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
7764 switch (XINT (x, 1))
7767 fputs ("@GOT", file);
7770 fputs ("@GOTOFF", file);
7773 fputs ("@PLTOFF", file);
7775 case UNSPEC_GOTPCREL:
7776 fputs ("@GOTPCREL(%rip)", file);
7778 case UNSPEC_GOTTPOFF:
7779 /* FIXME: This might be @TPOFF in Sun ld too. */
7780 fputs ("@GOTTPOFF", file);
7783 fputs ("@TPOFF", file);
7787 fputs ("@TPOFF", file);
7789 fputs ("@NTPOFF", file);
7792 fputs ("@DTPOFF", file);
7794 case UNSPEC_GOTNTPOFF:
7796 fputs ("@GOTTPOFF(%rip)", file);
7798 fputs ("@GOTNTPOFF", file);
7800 case UNSPEC_INDNTPOFF:
7801 fputs ("@INDNTPOFF", file);
7804 output_operand_lossage ("invalid UNSPEC as operand");
7810 output_operand_lossage ("invalid expression as operand");
7814 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
7815 We need to emit DTP-relative relocations. */
7817 static void ATTRIBUTE_UNUSED
7818 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
7820 fputs (ASM_LONG, file);
7821 output_addr_const (file, x);
7822 fputs ("@DTPOFF", file);
7828 fputs (", 0", file);
7835 /* In the name of slightly smaller debug output, and to cater to
7836 general assembler lossage, recognize PIC+GOTOFF and turn it back
7837 into a direct symbol reference.
7839 On Darwin, this is necessary to avoid a crash, because Darwin
7840 has a different PIC label for each routine but the DWARF debugging
7841 information is not associated with any particular routine, so it's
7842 necessary to remove references to the PIC label from RTL stored by
7843 the DWARF output code. */
7846 ix86_delegitimize_address (rtx orig_x)
7849 /* reg_addend is NULL or a multiple of some register. */
7850 rtx reg_addend = NULL_RTX;
7851 /* const_addend is NULL or a const_int. */
7852 rtx const_addend = NULL_RTX;
7853 /* This is the result, or NULL. */
7854 rtx result = NULL_RTX;
7861 if (GET_CODE (x) != CONST
7862 || GET_CODE (XEXP (x, 0)) != UNSPEC
7863 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
7866 return XVECEXP (XEXP (x, 0), 0, 0);
7869 if (GET_CODE (x) != PLUS
7870 || GET_CODE (XEXP (x, 1)) != CONST)
7873 if (REG_P (XEXP (x, 0))
7874 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
7875 /* %ebx + GOT/GOTOFF */
7877 else if (GET_CODE (XEXP (x, 0)) == PLUS)
7879 /* %ebx + %reg * scale + GOT/GOTOFF */
7880 reg_addend = XEXP (x, 0);
7881 if (REG_P (XEXP (reg_addend, 0))
7882 && REGNO (XEXP (reg_addend, 0)) == PIC_OFFSET_TABLE_REGNUM)
7883 reg_addend = XEXP (reg_addend, 1);
7884 else if (REG_P (XEXP (reg_addend, 1))
7885 && REGNO (XEXP (reg_addend, 1)) == PIC_OFFSET_TABLE_REGNUM)
7886 reg_addend = XEXP (reg_addend, 0);
7889 if (!REG_P (reg_addend)
7890 && GET_CODE (reg_addend) != MULT
7891 && GET_CODE (reg_addend) != ASHIFT)
7897 x = XEXP (XEXP (x, 1), 0);
7898 if (GET_CODE (x) == PLUS
7899 && CONST_INT_P (XEXP (x, 1)))
7901 const_addend = XEXP (x, 1);
7905 if (GET_CODE (x) == UNSPEC
7906 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x))
7907 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
7908 result = XVECEXP (x, 0, 0);
7910 if (TARGET_MACHO && darwin_local_data_pic (x)
7912 result = XEXP (x, 0);
7918 result = gen_rtx_PLUS (Pmode, result, const_addend);
7920 result = gen_rtx_PLUS (Pmode, reg_addend, result);
7924 /* If X is a machine specific address (i.e. a symbol or label being
7925 referenced as a displacement from the GOT implemented using an
7926 UNSPEC), then return the base term. Otherwise return X. */
7929 ix86_find_base_term (rtx x)
7935 if (GET_CODE (x) != CONST)
7938 if (GET_CODE (term) == PLUS
7939 && (CONST_INT_P (XEXP (term, 1))
7940 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
7941 term = XEXP (term, 0);
7942 if (GET_CODE (term) != UNSPEC
7943 || XINT (term, 1) != UNSPEC_GOTPCREL)
7946 term = XVECEXP (term, 0, 0);
7948 if (GET_CODE (term) != SYMBOL_REF
7949 && GET_CODE (term) != LABEL_REF)
7955 term = ix86_delegitimize_address (x);
7957 if (GET_CODE (term) != SYMBOL_REF
7958 && GET_CODE (term) != LABEL_REF)
7965 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
7970 if (mode == CCFPmode || mode == CCFPUmode)
7972 enum rtx_code second_code, bypass_code;
7973 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
7974 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
7975 code = ix86_fp_compare_code_to_integer (code);
7979 code = reverse_condition (code);
7990 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
7994 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
7995 Those same assemblers have the same but opposite lossage on cmov. */
7996 gcc_assert (mode == CCmode);
7997 suffix = fp ? "nbe" : "a";
8017 gcc_assert (mode == CCmode);
8039 gcc_assert (mode == CCmode);
8040 suffix = fp ? "nb" : "ae";
8043 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
8047 gcc_assert (mode == CCmode);
8051 suffix = fp ? "u" : "p";
8054 suffix = fp ? "nu" : "np";
8059 fputs (suffix, file);
8062 /* Print the name of register X to FILE based on its machine mode and number.
8063 If CODE is 'w', pretend the mode is HImode.
8064 If CODE is 'b', pretend the mode is QImode.
8065 If CODE is 'k', pretend the mode is SImode.
8066 If CODE is 'q', pretend the mode is DImode.
8067 If CODE is 'h', pretend the reg is the 'high' byte register.
8068 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
8071 print_reg (rtx x, int code, FILE *file)
8073 gcc_assert (REGNO (x) != ARG_POINTER_REGNUM
8074 && REGNO (x) != FRAME_POINTER_REGNUM
8075 && REGNO (x) != FLAGS_REG
8076 && REGNO (x) != FPSR_REG
8077 && REGNO (x) != FPCR_REG);
8079 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
8082 if (code == 'w' || MMX_REG_P (x))
8084 else if (code == 'b')
8086 else if (code == 'k')
8088 else if (code == 'q')
8090 else if (code == 'y')
8092 else if (code == 'h')
8095 code = GET_MODE_SIZE (GET_MODE (x));
8097 /* Irritatingly, AMD extended registers use different naming convention
8098 from the normal registers. */
8099 if (REX_INT_REG_P (x))
8101 gcc_assert (TARGET_64BIT);
8105 error ("extended registers have no high halves");
8108 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
8111 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
8114 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
8117 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
8120 error ("unsupported operand size for extended register");
8128 if (STACK_TOP_P (x))
8130 fputs ("st(0)", file);
8137 if (! ANY_FP_REG_P (x))
8138 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
8143 fputs (hi_reg_name[REGNO (x)], file);
8146 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
8148 fputs (qi_reg_name[REGNO (x)], file);
8151 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
8153 fputs (qi_high_reg_name[REGNO (x)], file);
8160 /* Locate some local-dynamic symbol still in use by this function
8161 so that we can print its name in some tls_local_dynamic_base
8165 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8169 if (GET_CODE (x) == SYMBOL_REF
8170 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8172 cfun->machine->some_ld_name = XSTR (x, 0);
8180 get_some_local_dynamic_name (void)
8184 if (cfun->machine->some_ld_name)
8185 return cfun->machine->some_ld_name;
8187 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8189 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8190 return cfun->machine->some_ld_name;
8196 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
8197 C -- print opcode suffix for set/cmov insn.
8198 c -- like C, but print reversed condition
8199 F,f -- likewise, but for floating-point.
8200 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
8202 R -- print the prefix for register names.
8203 z -- print the opcode suffix for the size of the current operand.
8204 * -- print a star (in certain assembler syntax)
8205 A -- print an absolute memory reference.
8206 w -- print the operand as if it's a "word" (HImode) even if it isn't.
8207 s -- print a shift double count, followed by the assemblers argument
8209 b -- print the QImode name of the register for the indicated operand.
8210 %b0 would print %al if operands[0] is reg 0.
8211 w -- likewise, print the HImode name of the register.
8212 k -- likewise, print the SImode name of the register.
8213 q -- likewise, print the DImode name of the register.
8214 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
8215 y -- print "st(0)" instead of "st" as a register.
8216 D -- print condition for SSE cmp instruction.
8217 P -- if PIC, print an @PLT suffix.
8218 X -- don't print any sort of PIC '@' suffix for a symbol.
8219 & -- print some in-use local-dynamic symbol name.
8220 H -- print a memory address offset by 8; used for sse high-parts
8224 print_operand (FILE *file, rtx x, int code)
8231 if (ASSEMBLER_DIALECT == ASM_ATT)
8236 assemble_name (file, get_some_local_dynamic_name ());
8240 switch (ASSEMBLER_DIALECT)
8247 /* Intel syntax. For absolute addresses, registers should not
8248 be surrounded by braces. */
8252 PRINT_OPERAND (file, x, 0);
8262 PRINT_OPERAND (file, x, 0);
8267 if (ASSEMBLER_DIALECT == ASM_ATT)
8272 if (ASSEMBLER_DIALECT == ASM_ATT)
8277 if (ASSEMBLER_DIALECT == ASM_ATT)
8282 if (ASSEMBLER_DIALECT == ASM_ATT)
8287 if (ASSEMBLER_DIALECT == ASM_ATT)
8292 if (ASSEMBLER_DIALECT == ASM_ATT)
8297 /* 387 opcodes don't get size suffixes if the operands are
8299 if (STACK_REG_P (x))
8302 /* Likewise if using Intel opcodes. */
8303 if (ASSEMBLER_DIALECT == ASM_INTEL)
8306 /* This is the size of op from size of operand. */
8307 switch (GET_MODE_SIZE (GET_MODE (x)))
8314 #ifdef HAVE_GAS_FILDS_FISTS
8320 if (GET_MODE (x) == SFmode)
8335 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
8337 #ifdef GAS_MNEMONICS
8363 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
8365 PRINT_OPERAND (file, x, 0);
8371 /* Little bit of braindamage here. The SSE compare instructions
8372 does use completely different names for the comparisons that the
8373 fp conditional moves. */
8374 switch (GET_CODE (x))
8389 fputs ("unord", file);
8393 fputs ("neq", file);
8397 fputs ("nlt", file);
8401 fputs ("nle", file);
8404 fputs ("ord", file);
8411 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8412 if (ASSEMBLER_DIALECT == ASM_ATT)
8414 switch (GET_MODE (x))
8416 case HImode: putc ('w', file); break;
8418 case SFmode: putc ('l', file); break;
8420 case DFmode: putc ('q', file); break;
8421 default: gcc_unreachable ();
8428 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
8431 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8432 if (ASSEMBLER_DIALECT == ASM_ATT)
8435 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
8438 /* Like above, but reverse condition */
8440 /* Check to see if argument to %c is really a constant
8441 and not a condition code which needs to be reversed. */
8442 if (!COMPARISON_P (x))
8444 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
8447 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
8450 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8451 if (ASSEMBLER_DIALECT == ASM_ATT)
8454 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
8458 /* It doesn't actually matter what mode we use here, as we're
8459 only going to use this for printing. */
8460 x = adjust_address_nv (x, DImode, 8);
8467 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
8470 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
8473 int pred_val = INTVAL (XEXP (x, 0));
8475 if (pred_val < REG_BR_PROB_BASE * 45 / 100
8476 || pred_val > REG_BR_PROB_BASE * 55 / 100)
8478 int taken = pred_val > REG_BR_PROB_BASE / 2;
8479 int cputaken = final_forward_branch_p (current_output_insn) == 0;
8481 /* Emit hints only in the case default branch prediction
8482 heuristics would fail. */
8483 if (taken != cputaken)
8485 /* We use 3e (DS) prefix for taken branches and
8486 2e (CS) prefix for not taken branches. */
8488 fputs ("ds ; ", file);
8490 fputs ("cs ; ", file);
8497 output_operand_lossage ("invalid operand code '%c'", code);
8502 print_reg (x, code, file);
8506 /* No `byte ptr' prefix for call instructions. */
8507 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
8510 switch (GET_MODE_SIZE (GET_MODE (x)))
8512 case 1: size = "BYTE"; break;
8513 case 2: size = "WORD"; break;
8514 case 4: size = "DWORD"; break;
8515 case 8: size = "QWORD"; break;
8516 case 12: size = "XWORD"; break;
8517 case 16: size = "XMMWORD"; break;
8522 /* Check for explicit size override (codes 'b', 'w' and 'k') */
8525 else if (code == 'w')
8527 else if (code == 'k')
8531 fputs (" PTR ", file);
8535 /* Avoid (%rip) for call operands. */
8536 if (CONSTANT_ADDRESS_P (x) && code == 'P'
8537 && !CONST_INT_P (x))
8538 output_addr_const (file, x);
8539 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
8540 output_operand_lossage ("invalid constraints for operand");
8545 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
8550 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8551 REAL_VALUE_TO_TARGET_SINGLE (r, l);
8553 if (ASSEMBLER_DIALECT == ASM_ATT)
8555 fprintf (file, "0x%08lx", l);
8558 /* These float cases don't actually occur as immediate operands. */
8559 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
8563 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
8564 fprintf (file, "%s", dstr);
8567 else if (GET_CODE (x) == CONST_DOUBLE
8568 && GET_MODE (x) == XFmode)
8572 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
8573 fprintf (file, "%s", dstr);
8578 /* We have patterns that allow zero sets of memory, for instance.
8579 In 64-bit mode, we should probably support all 8-byte vectors,
8580 since we can in fact encode that into an immediate. */
8581 if (GET_CODE (x) == CONST_VECTOR)
8583 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
8589 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
8591 if (ASSEMBLER_DIALECT == ASM_ATT)
8594 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
8595 || GET_CODE (x) == LABEL_REF)
8597 if (ASSEMBLER_DIALECT == ASM_ATT)
8600 fputs ("OFFSET FLAT:", file);
8603 if (CONST_INT_P (x))
8604 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8606 output_pic_addr_const (file, x, code);
8608 output_addr_const (file, x);
8612 /* Print a memory operand whose address is ADDR. */
8615 print_operand_address (FILE *file, rtx addr)
8617 struct ix86_address parts;
8618 rtx base, index, disp;
8620 int ok = ix86_decompose_address (addr, &parts);
8625 index = parts.index;
8627 scale = parts.scale;
8635 if (USER_LABEL_PREFIX[0] == 0)
8637 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
8643 if (!base && !index)
8645 /* Displacement only requires special attention. */
8647 if (CONST_INT_P (disp))
8649 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
8651 if (USER_LABEL_PREFIX[0] == 0)
8653 fputs ("ds:", file);
8655 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
8658 output_pic_addr_const (file, disp, 0);
8660 output_addr_const (file, disp);
8662 /* Use one byte shorter RIP relative addressing for 64bit mode. */
8665 if (GET_CODE (disp) == CONST
8666 && GET_CODE (XEXP (disp, 0)) == PLUS
8667 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
8668 disp = XEXP (XEXP (disp, 0), 0);
8669 if (GET_CODE (disp) == LABEL_REF
8670 || (GET_CODE (disp) == SYMBOL_REF
8671 && SYMBOL_REF_TLS_MODEL (disp) == 0))
8672 fputs ("(%rip)", file);
8677 if (ASSEMBLER_DIALECT == ASM_ATT)
8682 output_pic_addr_const (file, disp, 0);
8683 else if (GET_CODE (disp) == LABEL_REF)
8684 output_asm_label (disp);
8686 output_addr_const (file, disp);
8691 print_reg (base, 0, file);
8695 print_reg (index, 0, file);
8697 fprintf (file, ",%d", scale);
8703 rtx offset = NULL_RTX;
8707 /* Pull out the offset of a symbol; print any symbol itself. */
8708 if (GET_CODE (disp) == CONST
8709 && GET_CODE (XEXP (disp, 0)) == PLUS
8710 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
8712 offset = XEXP (XEXP (disp, 0), 1);
8713 disp = gen_rtx_CONST (VOIDmode,
8714 XEXP (XEXP (disp, 0), 0));
8718 output_pic_addr_const (file, disp, 0);
8719 else if (GET_CODE (disp) == LABEL_REF)
8720 output_asm_label (disp);
8721 else if (CONST_INT_P (disp))
8724 output_addr_const (file, disp);
8730 print_reg (base, 0, file);
8733 if (INTVAL (offset) >= 0)
8735 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
8739 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
8746 print_reg (index, 0, file);
8748 fprintf (file, "*%d", scale);
8756 output_addr_const_extra (FILE *file, rtx x)
8760 if (GET_CODE (x) != UNSPEC)
8763 op = XVECEXP (x, 0, 0);
8764 switch (XINT (x, 1))
8766 case UNSPEC_GOTTPOFF:
8767 output_addr_const (file, op);
8768 /* FIXME: This might be @TPOFF in Sun ld. */
8769 fputs ("@GOTTPOFF", file);
8772 output_addr_const (file, op);
8773 fputs ("@TPOFF", file);
8776 output_addr_const (file, op);
8778 fputs ("@TPOFF", file);
8780 fputs ("@NTPOFF", file);
8783 output_addr_const (file, op);
8784 fputs ("@DTPOFF", file);
8786 case UNSPEC_GOTNTPOFF:
8787 output_addr_const (file, op);
8789 fputs ("@GOTTPOFF(%rip)", file);
8791 fputs ("@GOTNTPOFF", file);
8793 case UNSPEC_INDNTPOFF:
8794 output_addr_const (file, op);
8795 fputs ("@INDNTPOFF", file);
8805 /* Split one or more DImode RTL references into pairs of SImode
8806 references. The RTL can be REG, offsettable MEM, integer constant, or
8807 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
8808 split and "num" is its length. lo_half and hi_half are output arrays
8809 that parallel "operands". */
8812 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
8816 rtx op = operands[num];
8818 /* simplify_subreg refuse to split volatile memory addresses,
8819 but we still have to handle it. */
8822 lo_half[num] = adjust_address (op, SImode, 0);
8823 hi_half[num] = adjust_address (op, SImode, 4);
8827 lo_half[num] = simplify_gen_subreg (SImode, op,
8828 GET_MODE (op) == VOIDmode
8829 ? DImode : GET_MODE (op), 0);
8830 hi_half[num] = simplify_gen_subreg (SImode, op,
8831 GET_MODE (op) == VOIDmode
8832 ? DImode : GET_MODE (op), 4);
8836 /* Split one or more TImode RTL references into pairs of DImode
8837 references. The RTL can be REG, offsettable MEM, integer constant, or
8838 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
8839 split and "num" is its length. lo_half and hi_half are output arrays
8840 that parallel "operands". */
8843 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
8847 rtx op = operands[num];
8849 /* simplify_subreg refuse to split volatile memory addresses, but we
8850 still have to handle it. */
8853 lo_half[num] = adjust_address (op, DImode, 0);
8854 hi_half[num] = adjust_address (op, DImode, 8);
8858 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
8859 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
8864 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
8865 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
8866 is the expression of the binary operation. The output may either be
8867 emitted here, or returned to the caller, like all output_* functions.
8869 There is no guarantee that the operands are the same mode, as they
8870 might be within FLOAT or FLOAT_EXTEND expressions. */
8872 #ifndef SYSV386_COMPAT
8873 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
8874 wants to fix the assemblers because that causes incompatibility
8875 with gcc. No-one wants to fix gcc because that causes
8876 incompatibility with assemblers... You can use the option of
8877 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
8878 #define SYSV386_COMPAT 1
8882 output_387_binary_op (rtx insn, rtx *operands)
8884 static char buf[30];
8887 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
8889 #ifdef ENABLE_CHECKING
8890 /* Even if we do not want to check the inputs, this documents input
8891 constraints. Which helps in understanding the following code. */
8892 if (STACK_REG_P (operands[0])
8893 && ((REG_P (operands[1])
8894 && REGNO (operands[0]) == REGNO (operands[1])
8895 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
8896 || (REG_P (operands[2])
8897 && REGNO (operands[0]) == REGNO (operands[2])
8898 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
8899 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
8902 gcc_assert (is_sse);
8905 switch (GET_CODE (operands[3]))
8908 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8909 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8917 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8918 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8926 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8927 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8935 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8936 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8950 if (GET_MODE (operands[0]) == SFmode)
8951 strcat (buf, "ss\t{%2, %0|%0, %2}");
8953 strcat (buf, "sd\t{%2, %0|%0, %2}");
8958 switch (GET_CODE (operands[3]))
8962 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
8964 rtx temp = operands[2];
8965 operands[2] = operands[1];
8969 /* know operands[0] == operands[1]. */
8971 if (MEM_P (operands[2]))
8977 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
8979 if (STACK_TOP_P (operands[0]))
8980 /* How is it that we are storing to a dead operand[2]?
8981 Well, presumably operands[1] is dead too. We can't
8982 store the result to st(0) as st(0) gets popped on this
8983 instruction. Instead store to operands[2] (which I
8984 think has to be st(1)). st(1) will be popped later.
8985 gcc <= 2.8.1 didn't have this check and generated
8986 assembly code that the Unixware assembler rejected. */
8987 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
8989 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
8993 if (STACK_TOP_P (operands[0]))
8994 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
8996 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
9001 if (MEM_P (operands[1]))
9007 if (MEM_P (operands[2]))
9013 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
9016 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
9017 derived assemblers, confusingly reverse the direction of
9018 the operation for fsub{r} and fdiv{r} when the
9019 destination register is not st(0). The Intel assembler
9020 doesn't have this brain damage. Read !SYSV386_COMPAT to
9021 figure out what the hardware really does. */
9022 if (STACK_TOP_P (operands[0]))
9023 p = "{p\t%0, %2|rp\t%2, %0}";
9025 p = "{rp\t%2, %0|p\t%0, %2}";
9027 if (STACK_TOP_P (operands[0]))
9028 /* As above for fmul/fadd, we can't store to st(0). */
9029 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
9031 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
9036 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
9039 if (STACK_TOP_P (operands[0]))
9040 p = "{rp\t%0, %1|p\t%1, %0}";
9042 p = "{p\t%1, %0|rp\t%0, %1}";
9044 if (STACK_TOP_P (operands[0]))
9045 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
9047 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
9052 if (STACK_TOP_P (operands[0]))
9054 if (STACK_TOP_P (operands[1]))
9055 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
9057 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
9060 else if (STACK_TOP_P (operands[1]))
9063 p = "{\t%1, %0|r\t%0, %1}";
9065 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
9071 p = "{r\t%2, %0|\t%0, %2}";
9073 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
9086 /* Return needed mode for entity in optimize_mode_switching pass. */
9089 ix86_mode_needed (int entity, rtx insn)
9091 enum attr_i387_cw mode;
9093 /* The mode UNINITIALIZED is used to store control word after a
9094 function call or ASM pattern. The mode ANY specify that function
9095 has no requirements on the control word and make no changes in the
9096 bits we are interested in. */
9099 || (NONJUMP_INSN_P (insn)
9100 && (asm_noperands (PATTERN (insn)) >= 0
9101 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
9102 return I387_CW_UNINITIALIZED;
9104 if (recog_memoized (insn) < 0)
9107 mode = get_attr_i387_cw (insn);
9112 if (mode == I387_CW_TRUNC)
9117 if (mode == I387_CW_FLOOR)
9122 if (mode == I387_CW_CEIL)
9127 if (mode == I387_CW_MASK_PM)
9138 /* Output code to initialize control word copies used by trunc?f?i and
9139 rounding patterns. CURRENT_MODE is set to current control word,
9140 while NEW_MODE is set to new control word. */
9143 emit_i387_cw_initialization (int mode)
9145 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
9150 rtx reg = gen_reg_rtx (HImode);
9152 emit_insn (gen_x86_fnstcw_1 (stored_mode));
9153 emit_move_insn (reg, copy_rtx (stored_mode));
9155 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
9160 /* round toward zero (truncate) */
9161 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
9162 slot = SLOT_CW_TRUNC;
9166 /* round down toward -oo */
9167 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
9168 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
9169 slot = SLOT_CW_FLOOR;
9173 /* round up toward +oo */
9174 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
9175 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
9176 slot = SLOT_CW_CEIL;
9179 case I387_CW_MASK_PM:
9180 /* mask precision exception for nearbyint() */
9181 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
9182 slot = SLOT_CW_MASK_PM;
9194 /* round toward zero (truncate) */
9195 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
9196 slot = SLOT_CW_TRUNC;
9200 /* round down toward -oo */
9201 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
9202 slot = SLOT_CW_FLOOR;
9206 /* round up toward +oo */
9207 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
9208 slot = SLOT_CW_CEIL;
9211 case I387_CW_MASK_PM:
9212 /* mask precision exception for nearbyint() */
9213 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
9214 slot = SLOT_CW_MASK_PM;
9222 gcc_assert (slot < MAX_386_STACK_LOCALS);
9224 new_mode = assign_386_stack_local (HImode, slot);
9225 emit_move_insn (new_mode, reg);
9228 /* Output code for INSN to convert a float to a signed int. OPERANDS
9229 are the insn operands. The output may be [HSD]Imode and the input
9230 operand may be [SDX]Fmode. */
9233 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
9235 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
9236 int dimode_p = GET_MODE (operands[0]) == DImode;
9237 int round_mode = get_attr_i387_cw (insn);
9239 /* Jump through a hoop or two for DImode, since the hardware has no
9240 non-popping instruction. We used to do this a different way, but
9241 that was somewhat fragile and broke with post-reload splitters. */
9242 if ((dimode_p || fisttp) && !stack_top_dies)
9243 output_asm_insn ("fld\t%y1", operands);
9245 gcc_assert (STACK_TOP_P (operands[1]));
9246 gcc_assert (MEM_P (operands[0]));
9249 output_asm_insn ("fisttp%z0\t%0", operands);
9252 if (round_mode != I387_CW_ANY)
9253 output_asm_insn ("fldcw\t%3", operands);
9254 if (stack_top_dies || dimode_p)
9255 output_asm_insn ("fistp%z0\t%0", operands);
9257 output_asm_insn ("fist%z0\t%0", operands);
9258 if (round_mode != I387_CW_ANY)
9259 output_asm_insn ("fldcw\t%2", operands);
9265 /* Output code for x87 ffreep insn. The OPNO argument, which may only
9266 have the values zero or one, indicates the ffreep insn's operand
9267 from the OPERANDS array. */
9270 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
9272 if (TARGET_USE_FFREEP)
9273 #if HAVE_AS_IX86_FFREEP
9274 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
9277 static char retval[] = ".word\t0xc_df";
9278 int regno = REGNO (operands[opno]);
9280 gcc_assert (FP_REGNO_P (regno));
9282 retval[9] = '0' + (regno - FIRST_STACK_REG);
9287 return opno ? "fstp\t%y1" : "fstp\t%y0";
9291 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
9292 should be used. UNORDERED_P is true when fucom should be used. */
9295 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
9298 rtx cmp_op0, cmp_op1;
9299 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
9303 cmp_op0 = operands[0];
9304 cmp_op1 = operands[1];
9308 cmp_op0 = operands[1];
9309 cmp_op1 = operands[2];
9314 if (GET_MODE (operands[0]) == SFmode)
9316 return "ucomiss\t{%1, %0|%0, %1}";
9318 return "comiss\t{%1, %0|%0, %1}";
9321 return "ucomisd\t{%1, %0|%0, %1}";
9323 return "comisd\t{%1, %0|%0, %1}";
9326 gcc_assert (STACK_TOP_P (cmp_op0));
9328 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
9330 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
9334 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
9335 return output_387_ffreep (operands, 1);
9338 return "ftst\n\tfnstsw\t%0";
9341 if (STACK_REG_P (cmp_op1)
9343 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
9344 && REGNO (cmp_op1) != FIRST_STACK_REG)
9346 /* If both the top of the 387 stack dies, and the other operand
9347 is also a stack register that dies, then this must be a
9348 `fcompp' float compare */
9352 /* There is no double popping fcomi variant. Fortunately,
9353 eflags is immune from the fstp's cc clobbering. */
9355 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
9357 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
9358 return output_387_ffreep (operands, 0);
9363 return "fucompp\n\tfnstsw\t%0";
9365 return "fcompp\n\tfnstsw\t%0";
9370 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
9372 static const char * const alt[16] =
9374 "fcom%z2\t%y2\n\tfnstsw\t%0",
9375 "fcomp%z2\t%y2\n\tfnstsw\t%0",
9376 "fucom%z2\t%y2\n\tfnstsw\t%0",
9377 "fucomp%z2\t%y2\n\tfnstsw\t%0",
9379 "ficom%z2\t%y2\n\tfnstsw\t%0",
9380 "ficomp%z2\t%y2\n\tfnstsw\t%0",
9384 "fcomi\t{%y1, %0|%0, %y1}",
9385 "fcomip\t{%y1, %0|%0, %y1}",
9386 "fucomi\t{%y1, %0|%0, %y1}",
9387 "fucomip\t{%y1, %0|%0, %y1}",
9398 mask = eflags_p << 3;
9399 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
9400 mask |= unordered_p << 1;
9401 mask |= stack_top_dies;
9403 gcc_assert (mask < 16);
9412 ix86_output_addr_vec_elt (FILE *file, int value)
9414 const char *directive = ASM_LONG;
9418 directive = ASM_QUAD;
9420 gcc_assert (!TARGET_64BIT);
9423 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
9427 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
9429 const char *directive = ASM_LONG;
9432 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
9433 directive = ASM_QUAD;
9435 gcc_assert (!TARGET_64BIT);
9437 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
9438 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
9439 fprintf (file, "%s%s%d-%s%d\n",
9440 directive, LPREFIX, value, LPREFIX, rel);
9441 else if (HAVE_AS_GOTOFF_IN_DATA)
9442 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
9444 else if (TARGET_MACHO)
9446 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
9447 machopic_output_function_base_name (file);
9448 fprintf(file, "\n");
9452 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
9453 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
9456 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
9460 ix86_expand_clear (rtx dest)
9464 /* We play register width games, which are only valid after reload. */
9465 gcc_assert (reload_completed);
9467 /* Avoid HImode and its attendant prefix byte. */
9468 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
9469 dest = gen_rtx_REG (SImode, REGNO (dest));
9470 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
9472 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
9473 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
9475 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
9476 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
9482 /* X is an unchanging MEM. If it is a constant pool reference, return
9483 the constant pool rtx, else NULL. */
9486 maybe_get_pool_constant (rtx x)
9488 x = ix86_delegitimize_address (XEXP (x, 0));
9490 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
9491 return get_pool_constant (x);
9497 ix86_expand_move (enum machine_mode mode, rtx operands[])
9499 int strict = (reload_in_progress || reload_completed);
9501 enum tls_model model;
9506 if (GET_CODE (op1) == SYMBOL_REF)
9508 model = SYMBOL_REF_TLS_MODEL (op1);
9511 op1 = legitimize_tls_address (op1, model, true);
9512 op1 = force_operand (op1, op0);
9516 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
9517 && SYMBOL_REF_DLLIMPORT_P (op1))
9518 op1 = legitimize_dllimport_symbol (op1, false);
9520 else if (GET_CODE (op1) == CONST
9521 && GET_CODE (XEXP (op1, 0)) == PLUS
9522 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
9524 rtx addend = XEXP (XEXP (op1, 0), 1);
9525 rtx symbol = XEXP (XEXP (op1, 0), 0);
9528 model = SYMBOL_REF_TLS_MODEL (symbol);
9530 tmp = legitimize_tls_address (symbol, model, true);
9531 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
9532 && SYMBOL_REF_DLLIMPORT_P (symbol))
9533 tmp = legitimize_dllimport_symbol (symbol, true);
9537 tmp = force_operand (tmp, NULL);
9538 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
9539 op0, 1, OPTAB_DIRECT);
9545 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
9547 if (TARGET_MACHO && !TARGET_64BIT)
9552 rtx temp = ((reload_in_progress
9553 || ((op0 && REG_P (op0))
9555 ? op0 : gen_reg_rtx (Pmode));
9556 op1 = machopic_indirect_data_reference (op1, temp);
9557 op1 = machopic_legitimize_pic_address (op1, mode,
9558 temp == op1 ? 0 : temp);
9560 else if (MACHOPIC_INDIRECT)
9561 op1 = machopic_indirect_data_reference (op1, 0);
9569 op1 = force_reg (Pmode, op1);
9570 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
9572 rtx reg = no_new_pseudos ? op0 : NULL_RTX;
9573 op1 = legitimize_pic_address (op1, reg);
9582 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
9583 || !push_operand (op0, mode))
9585 op1 = force_reg (mode, op1);
9587 if (push_operand (op0, mode)
9588 && ! general_no_elim_operand (op1, mode))
9589 op1 = copy_to_mode_reg (mode, op1);
9591 /* Force large constants in 64bit compilation into register
9592 to get them CSEed. */
9593 if (TARGET_64BIT && mode == DImode
9594 && immediate_operand (op1, mode)
9595 && !x86_64_zext_immediate_operand (op1, VOIDmode)
9596 && !register_operand (op0, mode)
9597 && optimize && !reload_completed && !reload_in_progress)
9598 op1 = copy_to_mode_reg (mode, op1);
9600 if (FLOAT_MODE_P (mode))
9602 /* If we are loading a floating point constant to a register,
9603 force the value to memory now, since we'll get better code
9604 out the back end. */
9608 else if (GET_CODE (op1) == CONST_DOUBLE)
9610 op1 = validize_mem (force_const_mem (mode, op1));
9611 if (!register_operand (op0, mode))
9613 rtx temp = gen_reg_rtx (mode);
9614 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
9615 emit_move_insn (op0, temp);
9622 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
9626 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
9628 rtx op0 = operands[0], op1 = operands[1];
9630 /* Force constants other than zero into memory. We do not know how
9631 the instructions used to build constants modify the upper 64 bits
9632 of the register, once we have that information we may be able
9633 to handle some of them more efficiently. */
9634 if ((reload_in_progress | reload_completed) == 0
9635 && register_operand (op0, mode)
9637 && standard_sse_constant_p (op1) <= 0)
9638 op1 = validize_mem (force_const_mem (mode, op1));
9640 /* Make operand1 a register if it isn't already. */
9642 && !register_operand (op0, mode)
9643 && !register_operand (op1, mode))
9645 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
9649 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
9652 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
9653 straight to ix86_expand_vector_move. */
9654 /* Code generation for scalar reg-reg moves of single and double precision data:
9655 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
9659 if (x86_sse_partial_reg_dependency == true)
9664 Code generation for scalar loads of double precision data:
9665 if (x86_sse_split_regs == true)
9666 movlpd mem, reg (gas syntax)
9670 Code generation for unaligned packed loads of single precision data
9671 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
9672 if (x86_sse_unaligned_move_optimal)
9675 if (x86_sse_partial_reg_dependency == true)
9687 Code generation for unaligned packed loads of double precision data
9688 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
9689 if (x86_sse_unaligned_move_optimal)
9692 if (x86_sse_split_regs == true)
9705 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
9714 /* If we're optimizing for size, movups is the smallest. */
9717 op0 = gen_lowpart (V4SFmode, op0);
9718 op1 = gen_lowpart (V4SFmode, op1);
9719 emit_insn (gen_sse_movups (op0, op1));
9723 /* ??? If we have typed data, then it would appear that using
9724 movdqu is the only way to get unaligned data loaded with
9726 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
9728 op0 = gen_lowpart (V16QImode, op0);
9729 op1 = gen_lowpart (V16QImode, op1);
9730 emit_insn (gen_sse2_movdqu (op0, op1));
9734 if (TARGET_SSE2 && mode == V2DFmode)
9738 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
9740 op0 = gen_lowpart (V2DFmode, op0);
9741 op1 = gen_lowpart (V2DFmode, op1);
9742 emit_insn (gen_sse2_movupd (op0, op1));
9746 /* When SSE registers are split into halves, we can avoid
9747 writing to the top half twice. */
9748 if (TARGET_SSE_SPLIT_REGS)
9750 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9755 /* ??? Not sure about the best option for the Intel chips.
9756 The following would seem to satisfy; the register is
9757 entirely cleared, breaking the dependency chain. We
9758 then store to the upper half, with a dependency depth
9759 of one. A rumor has it that Intel recommends two movsd
9760 followed by an unpacklpd, but this is unconfirmed. And
9761 given that the dependency depth of the unpacklpd would
9762 still be one, I'm not sure why this would be better. */
9763 zero = CONST0_RTX (V2DFmode);
9766 m = adjust_address (op1, DFmode, 0);
9767 emit_insn (gen_sse2_loadlpd (op0, zero, m));
9768 m = adjust_address (op1, DFmode, 8);
9769 emit_insn (gen_sse2_loadhpd (op0, op0, m));
9773 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
9775 op0 = gen_lowpart (V4SFmode, op0);
9776 op1 = gen_lowpart (V4SFmode, op1);
9777 emit_insn (gen_sse_movups (op0, op1));
9781 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
9782 emit_move_insn (op0, CONST0_RTX (mode));
9784 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9786 if (mode != V4SFmode)
9787 op0 = gen_lowpart (V4SFmode, op0);
9788 m = adjust_address (op1, V2SFmode, 0);
9789 emit_insn (gen_sse_loadlps (op0, op0, m));
9790 m = adjust_address (op1, V2SFmode, 8);
9791 emit_insn (gen_sse_loadhps (op0, op0, m));
9794 else if (MEM_P (op0))
9796 /* If we're optimizing for size, movups is the smallest. */
9799 op0 = gen_lowpart (V4SFmode, op0);
9800 op1 = gen_lowpart (V4SFmode, op1);
9801 emit_insn (gen_sse_movups (op0, op1));
9805 /* ??? Similar to above, only less clear because of quote
9806 typeless stores unquote. */
9807 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
9808 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
9810 op0 = gen_lowpart (V16QImode, op0);
9811 op1 = gen_lowpart (V16QImode, op1);
9812 emit_insn (gen_sse2_movdqu (op0, op1));
9816 if (TARGET_SSE2 && mode == V2DFmode)
9818 m = adjust_address (op0, DFmode, 0);
9819 emit_insn (gen_sse2_storelpd (m, op1));
9820 m = adjust_address (op0, DFmode, 8);
9821 emit_insn (gen_sse2_storehpd (m, op1));
9825 if (mode != V4SFmode)
9826 op1 = gen_lowpart (V4SFmode, op1);
9827 m = adjust_address (op0, V2SFmode, 0);
9828 emit_insn (gen_sse_storelps (m, op1));
9829 m = adjust_address (op0, V2SFmode, 8);
9830 emit_insn (gen_sse_storehps (m, op1));
9837 /* Expand a push in MODE. This is some mode for which we do not support
9838 proper push instructions, at least from the registers that we expect
9839 the value to live in. */
9842 ix86_expand_push (enum machine_mode mode, rtx x)
9846 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
9847 GEN_INT (-GET_MODE_SIZE (mode)),
9848 stack_pointer_rtx, 1, OPTAB_DIRECT);
9849 if (tmp != stack_pointer_rtx)
9850 emit_move_insn (stack_pointer_rtx, tmp);
9852 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
9853 emit_move_insn (tmp, x);
9856 /* Helper function of ix86_fixup_binary_operands to canonicalize
9857 operand order. Returns true if the operands should be swapped. */
9860 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
9863 rtx dst = operands[0];
9864 rtx src1 = operands[1];
9865 rtx src2 = operands[2];
9867 /* If the operation is not commutative, we can't do anything. */
9868 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
9871 /* Highest priority is that src1 should match dst. */
9872 if (rtx_equal_p (dst, src1))
9874 if (rtx_equal_p (dst, src2))
9877 /* Next highest priority is that immediate constants come second. */
9878 if (immediate_operand (src2, mode))
9880 if (immediate_operand (src1, mode))
9883 /* Lowest priority is that memory references should come second. */
9893 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
9894 destination to use for the operation. If different from the true
9895 destination in operands[0], a copy operation will be required. */
9898 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
9901 rtx dst = operands[0];
9902 rtx src1 = operands[1];
9903 rtx src2 = operands[2];
9905 /* Canonicalize operand order. */
9906 if (ix86_swap_binary_operands_p (code, mode, operands))
9913 /* Both source operands cannot be in memory. */
9914 if (MEM_P (src1) && MEM_P (src2))
9916 /* Optimization: Only read from memory once. */
9917 if (rtx_equal_p (src1, src2))
9919 src2 = force_reg (mode, src2);
9923 src2 = force_reg (mode, src2);
9926 /* If the destination is memory, and we do not have matching source
9927 operands, do things in registers. */
9928 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
9929 dst = gen_reg_rtx (mode);
9931 /* Source 1 cannot be a constant. */
9932 if (CONSTANT_P (src1))
9933 src1 = force_reg (mode, src1);
9935 /* Source 1 cannot be a non-matching memory. */
9936 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
9937 src1 = force_reg (mode, src1);
9944 /* Similarly, but assume that the destination has already been
9948 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
9949 enum machine_mode mode, rtx operands[])
9951 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
9952 gcc_assert (dst == operands[0]);
9955 /* Attempt to expand a binary operator. Make the expansion closer to the
9956 actual machine, then just general_operand, which will allow 3 separate
9957 memory references (one output, two input) in a single insn. */
9960 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
9963 rtx src1, src2, dst, op, clob;
9965 dst = ix86_fixup_binary_operands (code, mode, operands);
9969 /* Emit the instruction. */
9971 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
9972 if (reload_in_progress)
9974 /* Reload doesn't know about the flags register, and doesn't know that
9975 it doesn't want to clobber it. We can only do this with PLUS. */
9976 gcc_assert (code == PLUS);
9981 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9982 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
9985 /* Fix up the destination if needed. */
9986 if (dst != operands[0])
9987 emit_move_insn (operands[0], dst);
9990 /* Return TRUE or FALSE depending on whether the binary operator meets the
9991 appropriate constraints. */
9994 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
9997 rtx dst = operands[0];
9998 rtx src1 = operands[1];
9999 rtx src2 = operands[2];
10001 /* Both source operands cannot be in memory. */
10002 if (MEM_P (src1) && MEM_P (src2))
10005 /* Canonicalize operand order for commutative operators. */
10006 if (ix86_swap_binary_operands_p (code, mode, operands))
10013 /* If the destination is memory, we must have a matching source operand. */
10014 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
10017 /* Source 1 cannot be a constant. */
10018 if (CONSTANT_P (src1))
10021 /* Source 1 cannot be a non-matching memory. */
10022 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
10028 /* Attempt to expand a unary operator. Make the expansion closer to the
10029 actual machine, then just general_operand, which will allow 2 separate
10030 memory references (one output, one input) in a single insn. */
10033 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
10036 int matching_memory;
10037 rtx src, dst, op, clob;
10042 /* If the destination is memory, and we do not have matching source
10043 operands, do things in registers. */
10044 matching_memory = 0;
10047 if (rtx_equal_p (dst, src))
10048 matching_memory = 1;
10050 dst = gen_reg_rtx (mode);
10053 /* When source operand is memory, destination must match. */
10054 if (MEM_P (src) && !matching_memory)
10055 src = force_reg (mode, src);
10057 /* Emit the instruction. */
10059 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
10060 if (reload_in_progress || code == NOT)
10062 /* Reload doesn't know about the flags register, and doesn't know that
10063 it doesn't want to clobber it. */
10064 gcc_assert (code == NOT);
10069 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
10070 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
10073 /* Fix up the destination if needed. */
10074 if (dst != operands[0])
10075 emit_move_insn (operands[0], dst);
10078 /* Return TRUE or FALSE depending on whether the unary operator meets the
10079 appropriate constraints. */
10082 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
10083 enum machine_mode mode ATTRIBUTE_UNUSED,
10084 rtx operands[2] ATTRIBUTE_UNUSED)
10086 /* If one of operands is memory, source and destination must match. */
10087 if ((MEM_P (operands[0])
10088 || MEM_P (operands[1]))
10089 && ! rtx_equal_p (operands[0], operands[1]))
10094 /* Post-reload splitter for converting an SF or DFmode value in an
10095 SSE register into an unsigned SImode. */
10098 ix86_split_convert_uns_si_sse (rtx operands[])
10100 enum machine_mode vecmode;
10101 rtx value, large, zero_or_two31, input, two31, x;
10103 large = operands[1];
10104 zero_or_two31 = operands[2];
10105 input = operands[3];
10106 two31 = operands[4];
10107 vecmode = GET_MODE (large);
10108 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
10110 /* Load up the value into the low element. We must ensure that the other
10111 elements are valid floats -- zero is the easiest such value. */
10114 if (vecmode == V4SFmode)
10115 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
10117 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
10121 input = gen_rtx_REG (vecmode, REGNO (input));
10122 emit_move_insn (value, CONST0_RTX (vecmode));
10123 if (vecmode == V4SFmode)
10124 emit_insn (gen_sse_movss (value, value, input));
10126 emit_insn (gen_sse2_movsd (value, value, input));
10129 emit_move_insn (large, two31);
10130 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
10132 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
10133 emit_insn (gen_rtx_SET (VOIDmode, large, x));
10135 x = gen_rtx_AND (vecmode, zero_or_two31, large);
10136 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
10138 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
10139 emit_insn (gen_rtx_SET (VOIDmode, value, x));
10141 large = gen_rtx_REG (V4SImode, REGNO (large));
10142 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
10144 x = gen_rtx_REG (V4SImode, REGNO (value));
10145 if (vecmode == V4SFmode)
10146 emit_insn (gen_sse2_cvttps2dq (x, value));
10148 emit_insn (gen_sse2_cvttpd2dq (x, value));
10151 emit_insn (gen_xorv4si3 (value, value, large));
10154 /* Convert an unsigned DImode value into a DFmode, using only SSE.
10155 Expects the 64-bit DImode to be supplied in a pair of integral
10156 registers. Requires SSE2; will use SSE3 if available. For x86_32,
10157 -mfpmath=sse, !optimize_size only. */
10160 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
10162 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
10163 rtx int_xmm, fp_xmm;
10164 rtx biases, exponents;
10167 int_xmm = gen_reg_rtx (V4SImode);
10168 if (TARGET_INTER_UNIT_MOVES)
10169 emit_insn (gen_movdi_to_sse (int_xmm, input));
10170 else if (TARGET_SSE_SPLIT_REGS)
10172 emit_insn (gen_rtx_CLOBBER (VOIDmode, int_xmm));
10173 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
10177 x = gen_reg_rtx (V2DImode);
10178 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
10179 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
10182 x = gen_rtx_CONST_VECTOR (V4SImode,
10183 gen_rtvec (4, GEN_INT (0x43300000UL),
10184 GEN_INT (0x45300000UL),
10185 const0_rtx, const0_rtx));
10186 exponents = validize_mem (force_const_mem (V4SImode, x));
10188 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
10189 emit_insn (gen_sse2_punpckldq (int_xmm, int_xmm, exponents));
10191 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
10192 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
10193 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
10194 (0x1.0p84 + double(fp_value_hi_xmm)).
10195 Note these exponents differ by 32. */
10197 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
10199 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
10200 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
10201 real_ldexp (&bias_lo_rvt, &dconst1, 52);
10202 real_ldexp (&bias_hi_rvt, &dconst1, 84);
10203 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
10204 x = const_double_from_real_value (bias_hi_rvt, DFmode);
10205 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
10206 biases = validize_mem (force_const_mem (V2DFmode, biases));
10207 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
10209 /* Add the upper and lower DFmode values together. */
10211 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
10214 x = copy_to_mode_reg (V2DFmode, fp_xmm);
10215 emit_insn (gen_sse2_unpckhpd (fp_xmm, fp_xmm, fp_xmm));
10216 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
10219 ix86_expand_vector_extract (false, target, fp_xmm, 0);
10222 /* Convert an unsigned SImode value into a DFmode. Only currently used
10223 for SSE, but applicable anywhere. */
10226 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
10228 REAL_VALUE_TYPE TWO31r;
10231 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
10232 NULL, 1, OPTAB_DIRECT);
10234 fp = gen_reg_rtx (DFmode);
10235 emit_insn (gen_floatsidf2 (fp, x));
10237 real_ldexp (&TWO31r, &dconst1, 31);
10238 x = const_double_from_real_value (TWO31r, DFmode);
10240 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
10242 emit_move_insn (target, x);
10245 /* Convert a signed DImode value into a DFmode. Only used for SSE in
10246 32-bit mode; otherwise we have a direct convert instruction. */
10249 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
10251 REAL_VALUE_TYPE TWO32r;
10252 rtx fp_lo, fp_hi, x;
10254 fp_lo = gen_reg_rtx (DFmode);
10255 fp_hi = gen_reg_rtx (DFmode);
10257 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
10259 real_ldexp (&TWO32r, &dconst1, 32);
10260 x = const_double_from_real_value (TWO32r, DFmode);
10261 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
10263 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
10265 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
10268 emit_move_insn (target, x);
10271 /* Convert an unsigned SImode value into a SFmode, using only SSE.
10272 For x86_32, -mfpmath=sse, !optimize_size only. */
10274 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
10276 REAL_VALUE_TYPE ONE16r;
10277 rtx fp_hi, fp_lo, int_hi, int_lo, x;
10279 real_ldexp (&ONE16r, &dconst1, 16);
10280 x = const_double_from_real_value (ONE16r, SFmode);
10281 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
10282 NULL, 0, OPTAB_DIRECT);
10283 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
10284 NULL, 0, OPTAB_DIRECT);
10285 fp_hi = gen_reg_rtx (SFmode);
10286 fp_lo = gen_reg_rtx (SFmode);
10287 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
10288 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
10289 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
10291 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
10293 if (!rtx_equal_p (target, fp_hi))
10294 emit_move_insn (target, fp_hi);
10297 /* A subroutine of ix86_build_signbit_mask_vector. If VECT is true,
10298 then replicate the value for all elements of the vector
10302 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
10309 v = gen_rtvec (4, value, value, value, value);
10311 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
10312 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
10313 return gen_rtx_CONST_VECTOR (V4SFmode, v);
10317 v = gen_rtvec (2, value, value);
10319 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
10320 return gen_rtx_CONST_VECTOR (V2DFmode, v);
10323 gcc_unreachable ();
10327 /* A subroutine of ix86_expand_fp_absneg_operator and copysign expanders.
10328 Create a mask for the sign bit in MODE for an SSE register. If VECT is
10329 true, then replicate the mask for all elements of the vector register.
10330 If INVERT is true, then create a mask excluding the sign bit. */
10333 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
10335 enum machine_mode vec_mode;
10336 HOST_WIDE_INT hi, lo;
10341 /* Find the sign bit, sign extended to 2*HWI. */
10342 if (mode == SFmode)
10343 lo = 0x80000000, hi = lo < 0;
10344 else if (HOST_BITS_PER_WIDE_INT >= 64)
10345 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
10347 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
10350 lo = ~lo, hi = ~hi;
10352 /* Force this value into the low part of a fp vector constant. */
10353 mask = immed_double_const (lo, hi, mode == SFmode ? SImode : DImode);
10354 mask = gen_lowpart (mode, mask);
10356 v = ix86_build_const_vector (mode, vect, mask);
10357 vec_mode = (mode == SFmode) ? V4SFmode : V2DFmode;
10358 return force_reg (vec_mode, v);
10361 /* Generate code for floating point ABS or NEG. */
10364 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
10367 rtx mask, set, use, clob, dst, src;
10368 bool matching_memory;
10369 bool use_sse = false;
10370 bool vector_mode = VECTOR_MODE_P (mode);
10371 enum machine_mode elt_mode = mode;
10375 elt_mode = GET_MODE_INNER (mode);
10378 else if (TARGET_SSE_MATH)
10379 use_sse = SSE_FLOAT_MODE_P (mode);
10381 /* NEG and ABS performed with SSE use bitwise mask operations.
10382 Create the appropriate mask now. */
10384 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
10391 /* If the destination is memory, and we don't have matching source
10392 operands or we're using the x87, do things in registers. */
10393 matching_memory = false;
10396 if (use_sse && rtx_equal_p (dst, src))
10397 matching_memory = true;
10399 dst = gen_reg_rtx (mode);
10401 if (MEM_P (src) && !matching_memory)
10402 src = force_reg (mode, src);
10406 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
10407 set = gen_rtx_SET (VOIDmode, dst, set);
10412 set = gen_rtx_fmt_e (code, mode, src);
10413 set = gen_rtx_SET (VOIDmode, dst, set);
10416 use = gen_rtx_USE (VOIDmode, mask);
10417 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
10418 emit_insn (gen_rtx_PARALLEL (VOIDmode,
10419 gen_rtvec (3, set, use, clob)));
10425 if (dst != operands[0])
10426 emit_move_insn (operands[0], dst);
10429 /* Expand a copysign operation. Special case operand 0 being a constant. */
10432 ix86_expand_copysign (rtx operands[])
10434 enum machine_mode mode, vmode;
10435 rtx dest, op0, op1, mask, nmask;
10437 dest = operands[0];
10441 mode = GET_MODE (dest);
10442 vmode = mode == SFmode ? V4SFmode : V2DFmode;
10444 if (GET_CODE (op0) == CONST_DOUBLE)
10448 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
10449 op0 = simplify_unary_operation (ABS, mode, op0, mode);
10451 if (op0 == CONST0_RTX (mode))
10452 op0 = CONST0_RTX (vmode);
10455 if (mode == SFmode)
10456 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
10457 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
10459 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
10460 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
10463 mask = ix86_build_signbit_mask (mode, 0, 0);
10465 if (mode == SFmode)
10466 emit_insn (gen_copysignsf3_const (dest, op0, op1, mask));
10468 emit_insn (gen_copysigndf3_const (dest, op0, op1, mask));
10472 nmask = ix86_build_signbit_mask (mode, 0, 1);
10473 mask = ix86_build_signbit_mask (mode, 0, 0);
10475 if (mode == SFmode)
10476 emit_insn (gen_copysignsf3_var (dest, NULL, op0, op1, nmask, mask));
10478 emit_insn (gen_copysigndf3_var (dest, NULL, op0, op1, nmask, mask));
10482 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
10483 be a constant, and so has already been expanded into a vector constant. */
10486 ix86_split_copysign_const (rtx operands[])
10488 enum machine_mode mode, vmode;
10489 rtx dest, op0, op1, mask, x;
10491 dest = operands[0];
10494 mask = operands[3];
10496 mode = GET_MODE (dest);
10497 vmode = GET_MODE (mask);
10499 dest = simplify_gen_subreg (vmode, dest, mode, 0);
10500 x = gen_rtx_AND (vmode, dest, mask);
10501 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10503 if (op0 != CONST0_RTX (vmode))
10505 x = gen_rtx_IOR (vmode, dest, op0);
10506 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10510 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
10511 so we have to do two masks. */
10514 ix86_split_copysign_var (rtx operands[])
10516 enum machine_mode mode, vmode;
10517 rtx dest, scratch, op0, op1, mask, nmask, x;
10519 dest = operands[0];
10520 scratch = operands[1];
10523 nmask = operands[4];
10524 mask = operands[5];
10526 mode = GET_MODE (dest);
10527 vmode = GET_MODE (mask);
10529 if (rtx_equal_p (op0, op1))
10531 /* Shouldn't happen often (it's useless, obviously), but when it does
10532 we'd generate incorrect code if we continue below. */
10533 emit_move_insn (dest, op0);
10537 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
10539 gcc_assert (REGNO (op1) == REGNO (scratch));
10541 x = gen_rtx_AND (vmode, scratch, mask);
10542 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
10545 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
10546 x = gen_rtx_NOT (vmode, dest);
10547 x = gen_rtx_AND (vmode, x, op0);
10548 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10552 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
10554 x = gen_rtx_AND (vmode, scratch, mask);
10556 else /* alternative 2,4 */
10558 gcc_assert (REGNO (mask) == REGNO (scratch));
10559 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
10560 x = gen_rtx_AND (vmode, scratch, op1);
10562 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
10564 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
10566 dest = simplify_gen_subreg (vmode, op0, mode, 0);
10567 x = gen_rtx_AND (vmode, dest, nmask);
10569 else /* alternative 3,4 */
10571 gcc_assert (REGNO (nmask) == REGNO (dest));
10573 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
10574 x = gen_rtx_AND (vmode, dest, op0);
10576 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10579 x = gen_rtx_IOR (vmode, dest, scratch);
10580 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10583 /* Return TRUE or FALSE depending on whether the first SET in INSN
10584 has source and destination with matching CC modes, and that the
10585 CC mode is at least as constrained as REQ_MODE. */
10588 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
10591 enum machine_mode set_mode;
10593 set = PATTERN (insn);
10594 if (GET_CODE (set) == PARALLEL)
10595 set = XVECEXP (set, 0, 0);
10596 gcc_assert (GET_CODE (set) == SET);
10597 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
10599 set_mode = GET_MODE (SET_DEST (set));
10603 if (req_mode != CCNOmode
10604 && (req_mode != CCmode
10605 || XEXP (SET_SRC (set), 1) != const0_rtx))
10609 if (req_mode == CCGCmode)
10613 if (req_mode == CCGOCmode || req_mode == CCNOmode)
10617 if (req_mode == CCZmode)
10624 gcc_unreachable ();
10627 return (GET_MODE (SET_SRC (set)) == set_mode);
10630 /* Generate insn patterns to do an integer compare of OPERANDS. */
10633 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
10635 enum machine_mode cmpmode;
10638 cmpmode = SELECT_CC_MODE (code, op0, op1);
10639 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
10641 /* This is very simple, but making the interface the same as in the
10642 FP case makes the rest of the code easier. */
10643 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
10644 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
10646 /* Return the test that should be put into the flags user, i.e.
10647 the bcc, scc, or cmov instruction. */
10648 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
10651 /* Figure out whether to use ordered or unordered fp comparisons.
10652 Return the appropriate mode to use. */
10655 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
10657 /* ??? In order to make all comparisons reversible, we do all comparisons
10658 non-trapping when compiling for IEEE. Once gcc is able to distinguish
10659 all forms trapping and nontrapping comparisons, we can make inequality
10660 comparisons trapping again, since it results in better code when using
10661 FCOM based compares. */
10662 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
10666 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
10668 if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
10669 return ix86_fp_compare_mode (code);
10672 /* Only zero flag is needed. */
10673 case EQ: /* ZF=0 */
10674 case NE: /* ZF!=0 */
10676 /* Codes needing carry flag. */
10677 case GEU: /* CF=0 */
10678 case GTU: /* CF=0 & ZF=0 */
10679 case LTU: /* CF=1 */
10680 case LEU: /* CF=1 | ZF=1 */
10682 /* Codes possibly doable only with sign flag when
10683 comparing against zero. */
10684 case GE: /* SF=OF or SF=0 */
10685 case LT: /* SF<>OF or SF=1 */
10686 if (op1 == const0_rtx)
10689 /* For other cases Carry flag is not required. */
10691 /* Codes doable only with sign flag when comparing
10692 against zero, but we miss jump instruction for it
10693 so we need to use relational tests against overflow
10694 that thus needs to be zero. */
10695 case GT: /* ZF=0 & SF=OF */
10696 case LE: /* ZF=1 | SF<>OF */
10697 if (op1 == const0_rtx)
10701 /* strcmp pattern do (use flags) and combine may ask us for proper
10706 gcc_unreachable ();
10710 /* Return the fixed registers used for condition codes. */
10713 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
10720 /* If two condition code modes are compatible, return a condition code
10721 mode which is compatible with both. Otherwise, return
10724 static enum machine_mode
10725 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
10730 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
10733 if ((m1 == CCGCmode && m2 == CCGOCmode)
10734 || (m1 == CCGOCmode && m2 == CCGCmode))
10740 gcc_unreachable ();
10762 /* These are only compatible with themselves, which we already
10768 /* Split comparison code CODE into comparisons we can do using branch
10769 instructions. BYPASS_CODE is comparison code for branch that will
10770 branch around FIRST_CODE and SECOND_CODE. If some of branches
10771 is not required, set value to UNKNOWN.
10772 We never require more than two branches. */
10775 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
10776 enum rtx_code *first_code,
10777 enum rtx_code *second_code)
10779 *first_code = code;
10780 *bypass_code = UNKNOWN;
10781 *second_code = UNKNOWN;
10783 /* The fcomi comparison sets flags as follows:
10793 case GT: /* GTU - CF=0 & ZF=0 */
10794 case GE: /* GEU - CF=0 */
10795 case ORDERED: /* PF=0 */
10796 case UNORDERED: /* PF=1 */
10797 case UNEQ: /* EQ - ZF=1 */
10798 case UNLT: /* LTU - CF=1 */
10799 case UNLE: /* LEU - CF=1 | ZF=1 */
10800 case LTGT: /* EQ - ZF=0 */
10802 case LT: /* LTU - CF=1 - fails on unordered */
10803 *first_code = UNLT;
10804 *bypass_code = UNORDERED;
10806 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
10807 *first_code = UNLE;
10808 *bypass_code = UNORDERED;
10810 case EQ: /* EQ - ZF=1 - fails on unordered */
10811 *first_code = UNEQ;
10812 *bypass_code = UNORDERED;
10814 case NE: /* NE - ZF=0 - fails on unordered */
10815 *first_code = LTGT;
10816 *second_code = UNORDERED;
10818 case UNGE: /* GEU - CF=0 - fails on unordered */
10820 *second_code = UNORDERED;
10822 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
10824 *second_code = UNORDERED;
10827 gcc_unreachable ();
10829 if (!TARGET_IEEE_FP)
10831 *second_code = UNKNOWN;
10832 *bypass_code = UNKNOWN;
10836 /* Return cost of comparison done fcom + arithmetics operations on AX.
10837 All following functions do use number of instructions as a cost metrics.
10838 In future this should be tweaked to compute bytes for optimize_size and
10839 take into account performance of various instructions on various CPUs. */
10841 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
10843 if (!TARGET_IEEE_FP)
10845 /* The cost of code output by ix86_expand_fp_compare. */
10869 gcc_unreachable ();
10873 /* Return cost of comparison done using fcomi operation.
10874 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10876 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
10878 enum rtx_code bypass_code, first_code, second_code;
10879 /* Return arbitrarily high cost when instruction is not supported - this
10880 prevents gcc from using it. */
10883 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10884 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
10887 /* Return cost of comparison done using sahf operation.
10888 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10890 ix86_fp_comparison_sahf_cost (enum rtx_code code)
10892 enum rtx_code bypass_code, first_code, second_code;
10893 /* Return arbitrarily high cost when instruction is not preferred - this
10894 avoids gcc from using it. */
10895 if (!(TARGET_SAHF && (TARGET_USE_SAHF || optimize_size)))
10897 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10898 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
10901 /* Compute cost of the comparison done using any method.
10902 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10904 ix86_fp_comparison_cost (enum rtx_code code)
10906 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
10909 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
10910 sahf_cost = ix86_fp_comparison_sahf_cost (code);
10912 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
10913 if (min > sahf_cost)
10915 if (min > fcomi_cost)
10920 /* Return true if we should use an FCOMI instruction for this
10924 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
10926 enum rtx_code swapped_code = swap_condition (code);
10928 return ((ix86_fp_comparison_cost (code)
10929 == ix86_fp_comparison_fcomi_cost (code))
10930 || (ix86_fp_comparison_cost (swapped_code)
10931 == ix86_fp_comparison_fcomi_cost (swapped_code)));
10934 /* Swap, force into registers, or otherwise massage the two operands
10935 to a fp comparison. The operands are updated in place; the new
10936 comparison code is returned. */
10938 static enum rtx_code
10939 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
10941 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
10942 rtx op0 = *pop0, op1 = *pop1;
10943 enum machine_mode op_mode = GET_MODE (op0);
10944 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
10946 /* All of the unordered compare instructions only work on registers.
10947 The same is true of the fcomi compare instructions. The XFmode
10948 compare instructions require registers except when comparing
10949 against zero or when converting operand 1 from fixed point to
10953 && (fpcmp_mode == CCFPUmode
10954 || (op_mode == XFmode
10955 && ! (standard_80387_constant_p (op0) == 1
10956 || standard_80387_constant_p (op1) == 1)
10957 && GET_CODE (op1) != FLOAT)
10958 || ix86_use_fcomi_compare (code)))
10960 op0 = force_reg (op_mode, op0);
10961 op1 = force_reg (op_mode, op1);
10965 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
10966 things around if they appear profitable, otherwise force op0
10967 into a register. */
10969 if (standard_80387_constant_p (op0) == 0
10971 && ! (standard_80387_constant_p (op1) == 0
10975 tmp = op0, op0 = op1, op1 = tmp;
10976 code = swap_condition (code);
10980 op0 = force_reg (op_mode, op0);
10982 if (CONSTANT_P (op1))
10984 int tmp = standard_80387_constant_p (op1);
10986 op1 = validize_mem (force_const_mem (op_mode, op1));
10990 op1 = force_reg (op_mode, op1);
10993 op1 = force_reg (op_mode, op1);
10997 /* Try to rearrange the comparison to make it cheaper. */
10998 if (ix86_fp_comparison_cost (code)
10999 > ix86_fp_comparison_cost (swap_condition (code))
11000 && (REG_P (op1) || !no_new_pseudos))
11003 tmp = op0, op0 = op1, op1 = tmp;
11004 code = swap_condition (code);
11006 op0 = force_reg (op_mode, op0);
11014 /* Convert comparison codes we use to represent FP comparison to integer
11015 code that will result in proper branch. Return UNKNOWN if no such code
11019 ix86_fp_compare_code_to_integer (enum rtx_code code)
11048 /* Generate insn patterns to do a floating point compare of OPERANDS. */
11051 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
11052 rtx *second_test, rtx *bypass_test)
11054 enum machine_mode fpcmp_mode, intcmp_mode;
11056 int cost = ix86_fp_comparison_cost (code);
11057 enum rtx_code bypass_code, first_code, second_code;
11059 fpcmp_mode = ix86_fp_compare_mode (code);
11060 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
11063 *second_test = NULL_RTX;
11065 *bypass_test = NULL_RTX;
11067 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
11069 /* Do fcomi/sahf based test when profitable. */
11070 if ((TARGET_CMOVE || TARGET_SAHF)
11071 && (bypass_code == UNKNOWN || bypass_test)
11072 && (second_code == UNKNOWN || second_test)
11073 && ix86_fp_comparison_arithmetics_cost (code) > cost)
11077 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
11078 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
11084 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
11085 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
11087 scratch = gen_reg_rtx (HImode);
11088 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
11089 emit_insn (gen_x86_sahf_1 (scratch));
11092 /* The FP codes work out to act like unsigned. */
11093 intcmp_mode = fpcmp_mode;
11095 if (bypass_code != UNKNOWN)
11096 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
11097 gen_rtx_REG (intcmp_mode, FLAGS_REG),
11099 if (second_code != UNKNOWN)
11100 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
11101 gen_rtx_REG (intcmp_mode, FLAGS_REG),
11106 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
11107 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
11108 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
11110 scratch = gen_reg_rtx (HImode);
11111 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
11113 /* In the unordered case, we have to check C2 for NaN's, which
11114 doesn't happen to work out to anything nice combination-wise.
11115 So do some bit twiddling on the value we've got in AH to come
11116 up with an appropriate set of condition codes. */
11118 intcmp_mode = CCNOmode;
11123 if (code == GT || !TARGET_IEEE_FP)
11125 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
11130 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
11131 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
11132 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
11133 intcmp_mode = CCmode;
11139 if (code == LT && TARGET_IEEE_FP)
11141 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
11142 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
11143 intcmp_mode = CCmode;
11148 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
11154 if (code == GE || !TARGET_IEEE_FP)
11156 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
11161 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
11162 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
11169 if (code == LE && TARGET_IEEE_FP)
11171 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
11172 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
11173 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
11174 intcmp_mode = CCmode;
11179 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
11185 if (code == EQ && TARGET_IEEE_FP)
11187 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
11188 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
11189 intcmp_mode = CCmode;
11194 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
11201 if (code == NE && TARGET_IEEE_FP)
11203 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
11204 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
11210 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
11216 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
11220 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
11225 gcc_unreachable ();
11229 /* Return the test that should be put into the flags user, i.e.
11230 the bcc, scc, or cmov instruction. */
11231 return gen_rtx_fmt_ee (code, VOIDmode,
11232 gen_rtx_REG (intcmp_mode, FLAGS_REG),
11237 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
11240 op0 = ix86_compare_op0;
11241 op1 = ix86_compare_op1;
11244 *second_test = NULL_RTX;
11246 *bypass_test = NULL_RTX;
11248 if (ix86_compare_emitted)
11250 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
11251 ix86_compare_emitted = NULL_RTX;
11253 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
11254 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
11255 second_test, bypass_test);
11257 ret = ix86_expand_int_compare (code, op0, op1);
11262 /* Return true if the CODE will result in nontrivial jump sequence. */
11264 ix86_fp_jump_nontrivial_p (enum rtx_code code)
11266 enum rtx_code bypass_code, first_code, second_code;
11269 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
11270 return bypass_code != UNKNOWN || second_code != UNKNOWN;
11274 ix86_expand_branch (enum rtx_code code, rtx label)
11278 /* If we have emitted a compare insn, go straight to simple.
11279 ix86_expand_compare won't emit anything if ix86_compare_emitted
11281 if (ix86_compare_emitted)
11284 switch (GET_MODE (ix86_compare_op0))
11290 tmp = ix86_expand_compare (code, NULL, NULL);
11291 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
11292 gen_rtx_LABEL_REF (VOIDmode, label),
11294 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
11303 enum rtx_code bypass_code, first_code, second_code;
11305 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
11306 &ix86_compare_op1);
11308 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
11310 /* Check whether we will use the natural sequence with one jump. If
11311 so, we can expand jump early. Otherwise delay expansion by
11312 creating compound insn to not confuse optimizers. */
11313 if (bypass_code == UNKNOWN && second_code == UNKNOWN
11316 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
11317 gen_rtx_LABEL_REF (VOIDmode, label),
11318 pc_rtx, NULL_RTX, NULL_RTX);
11322 tmp = gen_rtx_fmt_ee (code, VOIDmode,
11323 ix86_compare_op0, ix86_compare_op1);
11324 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
11325 gen_rtx_LABEL_REF (VOIDmode, label),
11327 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
11329 use_fcomi = ix86_use_fcomi_compare (code);
11330 vec = rtvec_alloc (3 + !use_fcomi);
11331 RTVEC_ELT (vec, 0) = tmp;
11333 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
11335 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
11338 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
11340 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
11349 /* Expand DImode branch into multiple compare+branch. */
11351 rtx lo[2], hi[2], label2;
11352 enum rtx_code code1, code2, code3;
11353 enum machine_mode submode;
11355 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
11357 tmp = ix86_compare_op0;
11358 ix86_compare_op0 = ix86_compare_op1;
11359 ix86_compare_op1 = tmp;
11360 code = swap_condition (code);
11362 if (GET_MODE (ix86_compare_op0) == DImode)
11364 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
11365 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
11370 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
11371 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
11375 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
11376 avoid two branches. This costs one extra insn, so disable when
11377 optimizing for size. */
11379 if ((code == EQ || code == NE)
11381 || hi[1] == const0_rtx || lo[1] == const0_rtx))
11386 if (hi[1] != const0_rtx)
11387 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
11388 NULL_RTX, 0, OPTAB_WIDEN);
11391 if (lo[1] != const0_rtx)
11392 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
11393 NULL_RTX, 0, OPTAB_WIDEN);
11395 tmp = expand_binop (submode, ior_optab, xor1, xor0,
11396 NULL_RTX, 0, OPTAB_WIDEN);
11398 ix86_compare_op0 = tmp;
11399 ix86_compare_op1 = const0_rtx;
11400 ix86_expand_branch (code, label);
11404 /* Otherwise, if we are doing less-than or greater-or-equal-than,
11405 op1 is a constant and the low word is zero, then we can just
11406 examine the high word. */
11408 if (CONST_INT_P (hi[1]) && lo[1] == const0_rtx)
11411 case LT: case LTU: case GE: case GEU:
11412 ix86_compare_op0 = hi[0];
11413 ix86_compare_op1 = hi[1];
11414 ix86_expand_branch (code, label);
11420 /* Otherwise, we need two or three jumps. */
11422 label2 = gen_label_rtx ();
11425 code2 = swap_condition (code);
11426 code3 = unsigned_condition (code);
11430 case LT: case GT: case LTU: case GTU:
11433 case LE: code1 = LT; code2 = GT; break;
11434 case GE: code1 = GT; code2 = LT; break;
11435 case LEU: code1 = LTU; code2 = GTU; break;
11436 case GEU: code1 = GTU; code2 = LTU; break;
11438 case EQ: code1 = UNKNOWN; code2 = NE; break;
11439 case NE: code2 = UNKNOWN; break;
11442 gcc_unreachable ();
11447 * if (hi(a) < hi(b)) goto true;
11448 * if (hi(a) > hi(b)) goto false;
11449 * if (lo(a) < lo(b)) goto true;
11453 ix86_compare_op0 = hi[0];
11454 ix86_compare_op1 = hi[1];
11456 if (code1 != UNKNOWN)
11457 ix86_expand_branch (code1, label);
11458 if (code2 != UNKNOWN)
11459 ix86_expand_branch (code2, label2);
11461 ix86_compare_op0 = lo[0];
11462 ix86_compare_op1 = lo[1];
11463 ix86_expand_branch (code3, label);
11465 if (code2 != UNKNOWN)
11466 emit_label (label2);
11471 gcc_unreachable ();
11475 /* Split branch based on floating point condition. */
11477 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
11478 rtx target1, rtx target2, rtx tmp, rtx pushed)
11480 rtx second, bypass;
11481 rtx label = NULL_RTX;
11483 int bypass_probability = -1, second_probability = -1, probability = -1;
11486 if (target2 != pc_rtx)
11489 code = reverse_condition_maybe_unordered (code);
11494 condition = ix86_expand_fp_compare (code, op1, op2,
11495 tmp, &second, &bypass);
11497 /* Remove pushed operand from stack. */
11499 ix86_free_from_memory (GET_MODE (pushed));
11501 if (split_branch_probability >= 0)
11503 /* Distribute the probabilities across the jumps.
11504 Assume the BYPASS and SECOND to be always test
11506 probability = split_branch_probability;
11508 /* Value of 1 is low enough to make no need for probability
11509 to be updated. Later we may run some experiments and see
11510 if unordered values are more frequent in practice. */
11512 bypass_probability = 1;
11514 second_probability = 1;
11516 if (bypass != NULL_RTX)
11518 label = gen_label_rtx ();
11519 i = emit_jump_insn (gen_rtx_SET
11521 gen_rtx_IF_THEN_ELSE (VOIDmode,
11523 gen_rtx_LABEL_REF (VOIDmode,
11526 if (bypass_probability >= 0)
11528 = gen_rtx_EXPR_LIST (REG_BR_PROB,
11529 GEN_INT (bypass_probability),
11532 i = emit_jump_insn (gen_rtx_SET
11534 gen_rtx_IF_THEN_ELSE (VOIDmode,
11535 condition, target1, target2)));
11536 if (probability >= 0)
11538 = gen_rtx_EXPR_LIST (REG_BR_PROB,
11539 GEN_INT (probability),
11541 if (second != NULL_RTX)
11543 i = emit_jump_insn (gen_rtx_SET
11545 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
11547 if (second_probability >= 0)
11549 = gen_rtx_EXPR_LIST (REG_BR_PROB,
11550 GEN_INT (second_probability),
11553 if (label != NULL_RTX)
11554 emit_label (label);
11558 ix86_expand_setcc (enum rtx_code code, rtx dest)
11560 rtx ret, tmp, tmpreg, equiv;
11561 rtx second_test, bypass_test;
11563 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
11564 return 0; /* FAIL */
11566 gcc_assert (GET_MODE (dest) == QImode);
11568 ret = ix86_expand_compare (code, &second_test, &bypass_test);
11569 PUT_MODE (ret, QImode);
11574 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
11575 if (bypass_test || second_test)
11577 rtx test = second_test;
11579 rtx tmp2 = gen_reg_rtx (QImode);
11582 gcc_assert (!second_test);
11583 test = bypass_test;
11585 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
11587 PUT_MODE (test, QImode);
11588 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
11591 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
11593 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
11596 /* Attach a REG_EQUAL note describing the comparison result. */
11597 if (ix86_compare_op0 && ix86_compare_op1)
11599 equiv = simplify_gen_relational (code, QImode,
11600 GET_MODE (ix86_compare_op0),
11601 ix86_compare_op0, ix86_compare_op1);
11602 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
11605 return 1; /* DONE */
11608 /* Expand comparison setting or clearing carry flag. Return true when
11609 successful and set pop for the operation. */
11611 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
11613 enum machine_mode mode =
11614 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
11616 /* Do not handle DImode compares that go through special path. Also we can't
11617 deal with FP compares yet. This is possible to add. */
11618 if (mode == (TARGET_64BIT ? TImode : DImode))
11620 if (FLOAT_MODE_P (mode))
11622 rtx second_test = NULL, bypass_test = NULL;
11623 rtx compare_op, compare_seq;
11625 /* Shortcut: following common codes never translate into carry flag compares. */
11626 if (code == EQ || code == NE || code == UNEQ || code == LTGT
11627 || code == ORDERED || code == UNORDERED)
11630 /* These comparisons require zero flag; swap operands so they won't. */
11631 if ((code == GT || code == UNLE || code == LE || code == UNGT)
11632 && !TARGET_IEEE_FP)
11637 code = swap_condition (code);
11640 /* Try to expand the comparison and verify that we end up with carry flag
11641 based comparison. This is fails to be true only when we decide to expand
11642 comparison using arithmetic that is not too common scenario. */
11644 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
11645 &second_test, &bypass_test);
11646 compare_seq = get_insns ();
11649 if (second_test || bypass_test)
11651 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11652 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11653 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
11655 code = GET_CODE (compare_op);
11656 if (code != LTU && code != GEU)
11658 emit_insn (compare_seq);
11662 if (!INTEGRAL_MODE_P (mode))
11670 /* Convert a==0 into (unsigned)a<1. */
11673 if (op1 != const0_rtx)
11676 code = (code == EQ ? LTU : GEU);
11679 /* Convert a>b into b<a or a>=b-1. */
11682 if (CONST_INT_P (op1))
11684 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
11685 /* Bail out on overflow. We still can swap operands but that
11686 would force loading of the constant into register. */
11687 if (op1 == const0_rtx
11688 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
11690 code = (code == GTU ? GEU : LTU);
11697 code = (code == GTU ? LTU : GEU);
11701 /* Convert a>=0 into (unsigned)a<0x80000000. */
11704 if (mode == DImode || op1 != const0_rtx)
11706 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
11707 code = (code == LT ? GEU : LTU);
11711 if (mode == DImode || op1 != constm1_rtx)
11713 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
11714 code = (code == LE ? GEU : LTU);
11720 /* Swapping operands may cause constant to appear as first operand. */
11721 if (!nonimmediate_operand (op0, VOIDmode))
11723 if (no_new_pseudos)
11725 op0 = force_reg (mode, op0);
11727 ix86_compare_op0 = op0;
11728 ix86_compare_op1 = op1;
11729 *pop = ix86_expand_compare (code, NULL, NULL);
11730 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
11735 ix86_expand_int_movcc (rtx operands[])
11737 enum rtx_code code = GET_CODE (operands[1]), compare_code;
11738 rtx compare_seq, compare_op;
11739 rtx second_test, bypass_test;
11740 enum machine_mode mode = GET_MODE (operands[0]);
11741 bool sign_bit_compare_p = false;;
11744 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11745 compare_seq = get_insns ();
11748 compare_code = GET_CODE (compare_op);
11750 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
11751 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
11752 sign_bit_compare_p = true;
11754 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
11755 HImode insns, we'd be swallowed in word prefix ops. */
11757 if ((mode != HImode || TARGET_FAST_PREFIX)
11758 && (mode != (TARGET_64BIT ? TImode : DImode))
11759 && CONST_INT_P (operands[2])
11760 && CONST_INT_P (operands[3]))
11762 rtx out = operands[0];
11763 HOST_WIDE_INT ct = INTVAL (operands[2]);
11764 HOST_WIDE_INT cf = INTVAL (operands[3]);
11765 HOST_WIDE_INT diff;
11768 /* Sign bit compares are better done using shifts than we do by using
11770 if (sign_bit_compare_p
11771 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
11772 ix86_compare_op1, &compare_op))
11774 /* Detect overlap between destination and compare sources. */
11777 if (!sign_bit_compare_p)
11779 bool fpcmp = false;
11781 compare_code = GET_CODE (compare_op);
11783 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11784 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11787 compare_code = ix86_fp_compare_code_to_integer (compare_code);
11790 /* To simplify rest of code, restrict to the GEU case. */
11791 if (compare_code == LTU)
11793 HOST_WIDE_INT tmp = ct;
11796 compare_code = reverse_condition (compare_code);
11797 code = reverse_condition (code);
11802 PUT_CODE (compare_op,
11803 reverse_condition_maybe_unordered
11804 (GET_CODE (compare_op)));
11806 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
11810 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
11811 || reg_overlap_mentioned_p (out, ix86_compare_op1))
11812 tmp = gen_reg_rtx (mode);
11814 if (mode == DImode)
11815 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
11817 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
11821 if (code == GT || code == GE)
11822 code = reverse_condition (code);
11825 HOST_WIDE_INT tmp = ct;
11830 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
11831 ix86_compare_op1, VOIDmode, 0, -1);
11844 tmp = expand_simple_binop (mode, PLUS,
11846 copy_rtx (tmp), 1, OPTAB_DIRECT);
11857 tmp = expand_simple_binop (mode, IOR,
11859 copy_rtx (tmp), 1, OPTAB_DIRECT);
11861 else if (diff == -1 && ct)
11871 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
11873 tmp = expand_simple_binop (mode, PLUS,
11874 copy_rtx (tmp), GEN_INT (cf),
11875 copy_rtx (tmp), 1, OPTAB_DIRECT);
11883 * andl cf - ct, dest
11893 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
11896 tmp = expand_simple_binop (mode, AND,
11898 gen_int_mode (cf - ct, mode),
11899 copy_rtx (tmp), 1, OPTAB_DIRECT);
11901 tmp = expand_simple_binop (mode, PLUS,
11902 copy_rtx (tmp), GEN_INT (ct),
11903 copy_rtx (tmp), 1, OPTAB_DIRECT);
11906 if (!rtx_equal_p (tmp, out))
11907 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
11909 return 1; /* DONE */
11915 tmp = ct, ct = cf, cf = tmp;
11917 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
11919 /* We may be reversing unordered compare to normal compare, that
11920 is not valid in general (we may convert non-trapping condition
11921 to trapping one), however on i386 we currently emit all
11922 comparisons unordered. */
11923 compare_code = reverse_condition_maybe_unordered (compare_code);
11924 code = reverse_condition_maybe_unordered (code);
11928 compare_code = reverse_condition (compare_code);
11929 code = reverse_condition (code);
11933 compare_code = UNKNOWN;
11934 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
11935 && CONST_INT_P (ix86_compare_op1))
11937 if (ix86_compare_op1 == const0_rtx
11938 && (code == LT || code == GE))
11939 compare_code = code;
11940 else if (ix86_compare_op1 == constm1_rtx)
11944 else if (code == GT)
11949 /* Optimize dest = (op0 < 0) ? -1 : cf. */
11950 if (compare_code != UNKNOWN
11951 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
11952 && (cf == -1 || ct == -1))
11954 /* If lea code below could be used, only optimize
11955 if it results in a 2 insn sequence. */
11957 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
11958 || diff == 3 || diff == 5 || diff == 9)
11959 || (compare_code == LT && ct == -1)
11960 || (compare_code == GE && cf == -1))
11963 * notl op1 (if necessary)
11971 code = reverse_condition (code);
11974 out = emit_store_flag (out, code, ix86_compare_op0,
11975 ix86_compare_op1, VOIDmode, 0, -1);
11977 out = expand_simple_binop (mode, IOR,
11979 out, 1, OPTAB_DIRECT);
11980 if (out != operands[0])
11981 emit_move_insn (operands[0], out);
11983 return 1; /* DONE */
11988 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
11989 || diff == 3 || diff == 5 || diff == 9)
11990 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
11992 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
11998 * lea cf(dest*(ct-cf)),dest
12002 * This also catches the degenerate setcc-only case.
12008 out = emit_store_flag (out, code, ix86_compare_op0,
12009 ix86_compare_op1, VOIDmode, 0, 1);
12012 /* On x86_64 the lea instruction operates on Pmode, so we need
12013 to get arithmetics done in proper mode to match. */
12015 tmp = copy_rtx (out);
12019 out1 = copy_rtx (out);
12020 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
12024 tmp = gen_rtx_PLUS (mode, tmp, out1);
12030 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
12033 if (!rtx_equal_p (tmp, out))
12036 out = force_operand (tmp, copy_rtx (out));
12038 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
12040 if (!rtx_equal_p (out, operands[0]))
12041 emit_move_insn (operands[0], copy_rtx (out));
12043 return 1; /* DONE */
12047 * General case: Jumpful:
12048 * xorl dest,dest cmpl op1, op2
12049 * cmpl op1, op2 movl ct, dest
12050 * setcc dest jcc 1f
12051 * decl dest movl cf, dest
12052 * andl (cf-ct),dest 1:
12055 * Size 20. Size 14.
12057 * This is reasonably steep, but branch mispredict costs are
12058 * high on modern cpus, so consider failing only if optimizing
12062 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
12063 && BRANCH_COST >= 2)
12069 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
12070 /* We may be reversing unordered compare to normal compare,
12071 that is not valid in general (we may convert non-trapping
12072 condition to trapping one), however on i386 we currently
12073 emit all comparisons unordered. */
12074 code = reverse_condition_maybe_unordered (code);
12077 code = reverse_condition (code);
12078 if (compare_code != UNKNOWN)
12079 compare_code = reverse_condition (compare_code);
12083 if (compare_code != UNKNOWN)
12085 /* notl op1 (if needed)
12090 For x < 0 (resp. x <= -1) there will be no notl,
12091 so if possible swap the constants to get rid of the
12093 True/false will be -1/0 while code below (store flag
12094 followed by decrement) is 0/-1, so the constants need
12095 to be exchanged once more. */
12097 if (compare_code == GE || !cf)
12099 code = reverse_condition (code);
12104 HOST_WIDE_INT tmp = cf;
12109 out = emit_store_flag (out, code, ix86_compare_op0,
12110 ix86_compare_op1, VOIDmode, 0, -1);
12114 out = emit_store_flag (out, code, ix86_compare_op0,
12115 ix86_compare_op1, VOIDmode, 0, 1);
12117 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
12118 copy_rtx (out), 1, OPTAB_DIRECT);
12121 out = expand_simple_binop (mode, AND, copy_rtx (out),
12122 gen_int_mode (cf - ct, mode),
12123 copy_rtx (out), 1, OPTAB_DIRECT);
12125 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
12126 copy_rtx (out), 1, OPTAB_DIRECT);
12127 if (!rtx_equal_p (out, operands[0]))
12128 emit_move_insn (operands[0], copy_rtx (out));
12130 return 1; /* DONE */
12134 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
12136 /* Try a few things more with specific constants and a variable. */
12139 rtx var, orig_out, out, tmp;
12141 if (BRANCH_COST <= 2)
12142 return 0; /* FAIL */
12144 /* If one of the two operands is an interesting constant, load a
12145 constant with the above and mask it in with a logical operation. */
12147 if (CONST_INT_P (operands[2]))
12150 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
12151 operands[3] = constm1_rtx, op = and_optab;
12152 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
12153 operands[3] = const0_rtx, op = ior_optab;
12155 return 0; /* FAIL */
12157 else if (CONST_INT_P (operands[3]))
12160 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
12161 operands[2] = constm1_rtx, op = and_optab;
12162 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
12163 operands[2] = const0_rtx, op = ior_optab;
12165 return 0; /* FAIL */
12168 return 0; /* FAIL */
12170 orig_out = operands[0];
12171 tmp = gen_reg_rtx (mode);
12174 /* Recurse to get the constant loaded. */
12175 if (ix86_expand_int_movcc (operands) == 0)
12176 return 0; /* FAIL */
12178 /* Mask in the interesting variable. */
12179 out = expand_binop (mode, op, var, tmp, orig_out, 0,
12181 if (!rtx_equal_p (out, orig_out))
12182 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
12184 return 1; /* DONE */
12188 * For comparison with above,
12198 if (! nonimmediate_operand (operands[2], mode))
12199 operands[2] = force_reg (mode, operands[2]);
12200 if (! nonimmediate_operand (operands[3], mode))
12201 operands[3] = force_reg (mode, operands[3]);
12203 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
12205 rtx tmp = gen_reg_rtx (mode);
12206 emit_move_insn (tmp, operands[3]);
12209 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
12211 rtx tmp = gen_reg_rtx (mode);
12212 emit_move_insn (tmp, operands[2]);
12216 if (! register_operand (operands[2], VOIDmode)
12218 || ! register_operand (operands[3], VOIDmode)))
12219 operands[2] = force_reg (mode, operands[2]);
12222 && ! register_operand (operands[3], VOIDmode))
12223 operands[3] = force_reg (mode, operands[3]);
12225 emit_insn (compare_seq);
12226 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
12227 gen_rtx_IF_THEN_ELSE (mode,
12228 compare_op, operands[2],
12231 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
12232 gen_rtx_IF_THEN_ELSE (mode,
12234 copy_rtx (operands[3]),
12235 copy_rtx (operands[0]))));
12237 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
12238 gen_rtx_IF_THEN_ELSE (mode,
12240 copy_rtx (operands[2]),
12241 copy_rtx (operands[0]))));
12243 return 1; /* DONE */
12246 /* Swap, force into registers, or otherwise massage the two operands
12247 to an sse comparison with a mask result. Thus we differ a bit from
12248 ix86_prepare_fp_compare_args which expects to produce a flags result.
12250 The DEST operand exists to help determine whether to commute commutative
12251 operators. The POP0/POP1 operands are updated in place. The new
12252 comparison code is returned, or UNKNOWN if not implementable. */
12254 static enum rtx_code
12255 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
12256 rtx *pop0, rtx *pop1)
12264 /* We have no LTGT as an operator. We could implement it with
12265 NE & ORDERED, but this requires an extra temporary. It's
12266 not clear that it's worth it. */
12273 /* These are supported directly. */
12280 /* For commutative operators, try to canonicalize the destination
12281 operand to be first in the comparison - this helps reload to
12282 avoid extra moves. */
12283 if (!dest || !rtx_equal_p (dest, *pop1))
12291 /* These are not supported directly. Swap the comparison operands
12292 to transform into something that is supported. */
12296 code = swap_condition (code);
12300 gcc_unreachable ();
12306 /* Detect conditional moves that exactly match min/max operational
12307 semantics. Note that this is IEEE safe, as long as we don't
12308 interchange the operands.
12310 Returns FALSE if this conditional move doesn't match a MIN/MAX,
12311 and TRUE if the operation is successful and instructions are emitted. */
12314 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
12315 rtx cmp_op1, rtx if_true, rtx if_false)
12317 enum machine_mode mode;
12323 else if (code == UNGE)
12326 if_true = if_false;
12332 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
12334 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
12339 mode = GET_MODE (dest);
12341 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
12342 but MODE may be a vector mode and thus not appropriate. */
12343 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
12345 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
12348 if_true = force_reg (mode, if_true);
12349 v = gen_rtvec (2, if_true, if_false);
12350 tmp = gen_rtx_UNSPEC (mode, v, u);
12354 code = is_min ? SMIN : SMAX;
12355 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
12358 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
12362 /* Expand an sse vector comparison. Return the register with the result. */
12365 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
12366 rtx op_true, rtx op_false)
12368 enum machine_mode mode = GET_MODE (dest);
12371 cmp_op0 = force_reg (mode, cmp_op0);
12372 if (!nonimmediate_operand (cmp_op1, mode))
12373 cmp_op1 = force_reg (mode, cmp_op1);
12376 || reg_overlap_mentioned_p (dest, op_true)
12377 || reg_overlap_mentioned_p (dest, op_false))
12378 dest = gen_reg_rtx (mode);
12380 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
12381 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
12386 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
12387 operations. This is used for both scalar and vector conditional moves. */
12390 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
12392 enum machine_mode mode = GET_MODE (dest);
12395 if (op_false == CONST0_RTX (mode))
12397 op_true = force_reg (mode, op_true);
12398 x = gen_rtx_AND (mode, cmp, op_true);
12399 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
12401 else if (op_true == CONST0_RTX (mode))
12403 op_false = force_reg (mode, op_false);
12404 x = gen_rtx_NOT (mode, cmp);
12405 x = gen_rtx_AND (mode, x, op_false);
12406 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
12410 op_true = force_reg (mode, op_true);
12411 op_false = force_reg (mode, op_false);
12413 t2 = gen_reg_rtx (mode);
12415 t3 = gen_reg_rtx (mode);
12419 x = gen_rtx_AND (mode, op_true, cmp);
12420 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
12422 x = gen_rtx_NOT (mode, cmp);
12423 x = gen_rtx_AND (mode, x, op_false);
12424 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
12426 x = gen_rtx_IOR (mode, t3, t2);
12427 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
12431 /* Expand a floating-point conditional move. Return true if successful. */
12434 ix86_expand_fp_movcc (rtx operands[])
12436 enum machine_mode mode = GET_MODE (operands[0]);
12437 enum rtx_code code = GET_CODE (operands[1]);
12438 rtx tmp, compare_op, second_test, bypass_test;
12440 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
12442 enum machine_mode cmode;
12444 /* Since we've no cmove for sse registers, don't force bad register
12445 allocation just to gain access to it. Deny movcc when the
12446 comparison mode doesn't match the move mode. */
12447 cmode = GET_MODE (ix86_compare_op0);
12448 if (cmode == VOIDmode)
12449 cmode = GET_MODE (ix86_compare_op1);
12453 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
12455 &ix86_compare_op1);
12456 if (code == UNKNOWN)
12459 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
12460 ix86_compare_op1, operands[2],
12464 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
12465 ix86_compare_op1, operands[2], operands[3]);
12466 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
12470 /* The floating point conditional move instructions don't directly
12471 support conditions resulting from a signed integer comparison. */
12473 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
12475 /* The floating point conditional move instructions don't directly
12476 support signed integer comparisons. */
12478 if (!fcmov_comparison_operator (compare_op, VOIDmode))
12480 gcc_assert (!second_test && !bypass_test);
12481 tmp = gen_reg_rtx (QImode);
12482 ix86_expand_setcc (code, tmp);
12484 ix86_compare_op0 = tmp;
12485 ix86_compare_op1 = const0_rtx;
12486 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
12488 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
12490 tmp = gen_reg_rtx (mode);
12491 emit_move_insn (tmp, operands[3]);
12494 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
12496 tmp = gen_reg_rtx (mode);
12497 emit_move_insn (tmp, operands[2]);
12501 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
12502 gen_rtx_IF_THEN_ELSE (mode, compare_op,
12503 operands[2], operands[3])));
12505 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
12506 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
12507 operands[3], operands[0])));
12509 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
12510 gen_rtx_IF_THEN_ELSE (mode, second_test,
12511 operands[2], operands[0])));
12516 /* Expand a floating-point vector conditional move; a vcond operation
12517 rather than a movcc operation. */
12520 ix86_expand_fp_vcond (rtx operands[])
12522 enum rtx_code code = GET_CODE (operands[3]);
12525 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
12526 &operands[4], &operands[5]);
12527 if (code == UNKNOWN)
12530 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
12531 operands[5], operands[1], operands[2]))
12534 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
12535 operands[1], operands[2]);
12536 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
12540 /* Expand a signed integral vector conditional move. */
12543 ix86_expand_int_vcond (rtx operands[])
12545 enum machine_mode mode = GET_MODE (operands[0]);
12546 enum rtx_code code = GET_CODE (operands[3]);
12547 bool negate = false;
12550 cop0 = operands[4];
12551 cop1 = operands[5];
12553 /* Canonicalize the comparison to EQ, GT, GTU. */
12564 code = reverse_condition (code);
12570 code = reverse_condition (code);
12576 code = swap_condition (code);
12577 x = cop0, cop0 = cop1, cop1 = x;
12581 gcc_unreachable ();
12584 /* Unsigned parallel compare is not supported by the hardware. Play some
12585 tricks to turn this into a signed comparison against 0. */
12588 cop0 = force_reg (mode, cop0);
12596 /* Perform a parallel modulo subtraction. */
12597 t1 = gen_reg_rtx (mode);
12598 emit_insn (gen_subv4si3 (t1, cop0, cop1));
12600 /* Extract the original sign bit of op0. */
12601 mask = GEN_INT (-0x80000000);
12602 mask = gen_rtx_CONST_VECTOR (mode,
12603 gen_rtvec (4, mask, mask, mask, mask));
12604 mask = force_reg (mode, mask);
12605 t2 = gen_reg_rtx (mode);
12606 emit_insn (gen_andv4si3 (t2, cop0, mask));
12608 /* XOR it back into the result of the subtraction. This results
12609 in the sign bit set iff we saw unsigned underflow. */
12610 x = gen_reg_rtx (mode);
12611 emit_insn (gen_xorv4si3 (x, t1, t2));
12619 /* Perform a parallel unsigned saturating subtraction. */
12620 x = gen_reg_rtx (mode);
12621 emit_insn (gen_rtx_SET (VOIDmode, x,
12622 gen_rtx_US_MINUS (mode, cop0, cop1)));
12629 gcc_unreachable ();
12633 cop1 = CONST0_RTX (mode);
12636 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
12637 operands[1+negate], operands[2-negate]);
12639 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
12640 operands[2-negate]);
12644 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
12645 true if we should do zero extension, else sign extension. HIGH_P is
12646 true if we want the N/2 high elements, else the low elements. */
12649 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
12651 enum machine_mode imode = GET_MODE (operands[1]);
12652 rtx (*unpack)(rtx, rtx, rtx);
12659 unpack = gen_vec_interleave_highv16qi;
12661 unpack = gen_vec_interleave_lowv16qi;
12665 unpack = gen_vec_interleave_highv8hi;
12667 unpack = gen_vec_interleave_lowv8hi;
12671 unpack = gen_vec_interleave_highv4si;
12673 unpack = gen_vec_interleave_lowv4si;
12676 gcc_unreachable ();
12679 dest = gen_lowpart (imode, operands[0]);
12682 se = force_reg (imode, CONST0_RTX (imode));
12684 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
12685 operands[1], pc_rtx, pc_rtx);
12687 emit_insn (unpack (dest, operands[1], se));
12690 /* Expand conditional increment or decrement using adb/sbb instructions.
12691 The default case using setcc followed by the conditional move can be
12692 done by generic code. */
12694 ix86_expand_int_addcc (rtx operands[])
12696 enum rtx_code code = GET_CODE (operands[1]);
12698 rtx val = const0_rtx;
12699 bool fpcmp = false;
12700 enum machine_mode mode = GET_MODE (operands[0]);
12702 if (operands[3] != const1_rtx
12703 && operands[3] != constm1_rtx)
12705 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
12706 ix86_compare_op1, &compare_op))
12708 code = GET_CODE (compare_op);
12710 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
12711 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
12714 code = ix86_fp_compare_code_to_integer (code);
12721 PUT_CODE (compare_op,
12722 reverse_condition_maybe_unordered
12723 (GET_CODE (compare_op)));
12725 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
12727 PUT_MODE (compare_op, mode);
12729 /* Construct either adc or sbb insn. */
12730 if ((code == LTU) == (operands[3] == constm1_rtx))
12732 switch (GET_MODE (operands[0]))
12735 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
12738 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
12741 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
12744 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
12747 gcc_unreachable ();
12752 switch (GET_MODE (operands[0]))
12755 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
12758 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
12761 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
12764 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
12767 gcc_unreachable ();
12770 return 1; /* DONE */
12774 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
12775 works for floating pointer parameters and nonoffsetable memories.
12776 For pushes, it returns just stack offsets; the values will be saved
12777 in the right order. Maximally three parts are generated. */
12780 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
12785 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
12787 size = (GET_MODE_SIZE (mode) + 4) / 8;
12789 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
12790 gcc_assert (size >= 2 && size <= 3);
12792 /* Optimize constant pool reference to immediates. This is used by fp
12793 moves, that force all constants to memory to allow combining. */
12794 if (MEM_P (operand) && MEM_READONLY_P (operand))
12796 rtx tmp = maybe_get_pool_constant (operand);
12801 if (MEM_P (operand) && !offsettable_memref_p (operand))
12803 /* The only non-offsetable memories we handle are pushes. */
12804 int ok = push_operand (operand, VOIDmode);
12808 operand = copy_rtx (operand);
12809 PUT_MODE (operand, Pmode);
12810 parts[0] = parts[1] = parts[2] = operand;
12814 if (GET_CODE (operand) == CONST_VECTOR)
12816 enum machine_mode imode = int_mode_for_mode (mode);
12817 /* Caution: if we looked through a constant pool memory above,
12818 the operand may actually have a different mode now. That's
12819 ok, since we want to pun this all the way back to an integer. */
12820 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
12821 gcc_assert (operand != NULL);
12827 if (mode == DImode)
12828 split_di (&operand, 1, &parts[0], &parts[1]);
12831 if (REG_P (operand))
12833 gcc_assert (reload_completed);
12834 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
12835 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
12837 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
12839 else if (offsettable_memref_p (operand))
12841 operand = adjust_address (operand, SImode, 0);
12842 parts[0] = operand;
12843 parts[1] = adjust_address (operand, SImode, 4);
12845 parts[2] = adjust_address (operand, SImode, 8);
12847 else if (GET_CODE (operand) == CONST_DOUBLE)
12852 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
12856 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
12857 parts[2] = gen_int_mode (l[2], SImode);
12860 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
12863 gcc_unreachable ();
12865 parts[1] = gen_int_mode (l[1], SImode);
12866 parts[0] = gen_int_mode (l[0], SImode);
12869 gcc_unreachable ();
12874 if (mode == TImode)
12875 split_ti (&operand, 1, &parts[0], &parts[1]);
12876 if (mode == XFmode || mode == TFmode)
12878 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
12879 if (REG_P (operand))
12881 gcc_assert (reload_completed);
12882 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
12883 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
12885 else if (offsettable_memref_p (operand))
12887 operand = adjust_address (operand, DImode, 0);
12888 parts[0] = operand;
12889 parts[1] = adjust_address (operand, upper_mode, 8);
12891 else if (GET_CODE (operand) == CONST_DOUBLE)
12896 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
12897 real_to_target (l, &r, mode);
12899 /* Do not use shift by 32 to avoid warning on 32bit systems. */
12900 if (HOST_BITS_PER_WIDE_INT >= 64)
12903 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
12904 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
12907 parts[0] = immed_double_const (l[0], l[1], DImode);
12909 if (upper_mode == SImode)
12910 parts[1] = gen_int_mode (l[2], SImode);
12911 else if (HOST_BITS_PER_WIDE_INT >= 64)
12914 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
12915 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
12918 parts[1] = immed_double_const (l[2], l[3], DImode);
12921 gcc_unreachable ();
12928 /* Emit insns to perform a move or push of DI, DF, and XF values.
12929 Return false when normal moves are needed; true when all required
12930 insns have been emitted. Operands 2-4 contain the input values
12931 int the correct order; operands 5-7 contain the output values. */
12934 ix86_split_long_move (rtx operands[])
12939 int collisions = 0;
12940 enum machine_mode mode = GET_MODE (operands[0]);
12942 /* The DFmode expanders may ask us to move double.
12943 For 64bit target this is single move. By hiding the fact
12944 here we simplify i386.md splitters. */
12945 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
12947 /* Optimize constant pool reference to immediates. This is used by
12948 fp moves, that force all constants to memory to allow combining. */
12950 if (MEM_P (operands[1])
12951 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
12952 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
12953 operands[1] = get_pool_constant (XEXP (operands[1], 0));
12954 if (push_operand (operands[0], VOIDmode))
12956 operands[0] = copy_rtx (operands[0]);
12957 PUT_MODE (operands[0], Pmode);
12960 operands[0] = gen_lowpart (DImode, operands[0]);
12961 operands[1] = gen_lowpart (DImode, operands[1]);
12962 emit_move_insn (operands[0], operands[1]);
12966 /* The only non-offsettable memory we handle is push. */
12967 if (push_operand (operands[0], VOIDmode))
12970 gcc_assert (!MEM_P (operands[0])
12971 || offsettable_memref_p (operands[0]));
12973 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
12974 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
12976 /* When emitting push, take care for source operands on the stack. */
12977 if (push && MEM_P (operands[1])
12978 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
12981 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
12982 XEXP (part[1][2], 0));
12983 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
12984 XEXP (part[1][1], 0));
12987 /* We need to do copy in the right order in case an address register
12988 of the source overlaps the destination. */
12989 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
12991 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
12993 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
12996 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
12999 /* Collision in the middle part can be handled by reordering. */
13000 if (collisions == 1 && nparts == 3
13001 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
13004 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
13005 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
13008 /* If there are more collisions, we can't handle it by reordering.
13009 Do an lea to the last part and use only one colliding move. */
13010 else if (collisions > 1)
13016 base = part[0][nparts - 1];
13018 /* Handle the case when the last part isn't valid for lea.
13019 Happens in 64-bit mode storing the 12-byte XFmode. */
13020 if (GET_MODE (base) != Pmode)
13021 base = gen_rtx_REG (Pmode, REGNO (base));
13023 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
13024 part[1][0] = replace_equiv_address (part[1][0], base);
13025 part[1][1] = replace_equiv_address (part[1][1],
13026 plus_constant (base, UNITS_PER_WORD));
13028 part[1][2] = replace_equiv_address (part[1][2],
13029 plus_constant (base, 8));
13039 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
13040 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
13041 emit_move_insn (part[0][2], part[1][2]);
13046 /* In 64bit mode we don't have 32bit push available. In case this is
13047 register, it is OK - we will just use larger counterpart. We also
13048 retype memory - these comes from attempt to avoid REX prefix on
13049 moving of second half of TFmode value. */
13050 if (GET_MODE (part[1][1]) == SImode)
13052 switch (GET_CODE (part[1][1]))
13055 part[1][1] = adjust_address (part[1][1], DImode, 0);
13059 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
13063 gcc_unreachable ();
13066 if (GET_MODE (part[1][0]) == SImode)
13067 part[1][0] = part[1][1];
13070 emit_move_insn (part[0][1], part[1][1]);
13071 emit_move_insn (part[0][0], part[1][0]);
13075 /* Choose correct order to not overwrite the source before it is copied. */
13076 if ((REG_P (part[0][0])
13077 && REG_P (part[1][1])
13078 && (REGNO (part[0][0]) == REGNO (part[1][1])
13080 && REGNO (part[0][0]) == REGNO (part[1][2]))))
13082 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
13086 operands[2] = part[0][2];
13087 operands[3] = part[0][1];
13088 operands[4] = part[0][0];
13089 operands[5] = part[1][2];
13090 operands[6] = part[1][1];
13091 operands[7] = part[1][0];
13095 operands[2] = part[0][1];
13096 operands[3] = part[0][0];
13097 operands[5] = part[1][1];
13098 operands[6] = part[1][0];
13105 operands[2] = part[0][0];
13106 operands[3] = part[0][1];
13107 operands[4] = part[0][2];
13108 operands[5] = part[1][0];
13109 operands[6] = part[1][1];
13110 operands[7] = part[1][2];
13114 operands[2] = part[0][0];
13115 operands[3] = part[0][1];
13116 operands[5] = part[1][0];
13117 operands[6] = part[1][1];
13121 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
13124 if (CONST_INT_P (operands[5])
13125 && operands[5] != const0_rtx
13126 && REG_P (operands[2]))
13128 if (CONST_INT_P (operands[6])
13129 && INTVAL (operands[6]) == INTVAL (operands[5]))
13130 operands[6] = operands[2];
13133 && CONST_INT_P (operands[7])
13134 && INTVAL (operands[7]) == INTVAL (operands[5]))
13135 operands[7] = operands[2];
13139 && CONST_INT_P (operands[6])
13140 && operands[6] != const0_rtx
13141 && REG_P (operands[3])
13142 && CONST_INT_P (operands[7])
13143 && INTVAL (operands[7]) == INTVAL (operands[6]))
13144 operands[7] = operands[3];
13147 emit_move_insn (operands[2], operands[5]);
13148 emit_move_insn (operands[3], operands[6]);
13150 emit_move_insn (operands[4], operands[7]);
13155 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
13156 left shift by a constant, either using a single shift or
13157 a sequence of add instructions. */
13160 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
13164 emit_insn ((mode == DImode
13166 : gen_adddi3) (operand, operand, operand));
13168 else if (!optimize_size
13169 && count * ix86_cost->add <= ix86_cost->shift_const)
13172 for (i=0; i<count; i++)
13174 emit_insn ((mode == DImode
13176 : gen_adddi3) (operand, operand, operand));
13180 emit_insn ((mode == DImode
13182 : gen_ashldi3) (operand, operand, GEN_INT (count)));
13186 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
13188 rtx low[2], high[2];
13190 const int single_width = mode == DImode ? 32 : 64;
13192 if (CONST_INT_P (operands[2]))
13194 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
13195 count = INTVAL (operands[2]) & (single_width * 2 - 1);
13197 if (count >= single_width)
13199 emit_move_insn (high[0], low[1]);
13200 emit_move_insn (low[0], const0_rtx);
13202 if (count > single_width)
13203 ix86_expand_ashl_const (high[0], count - single_width, mode);
13207 if (!rtx_equal_p (operands[0], operands[1]))
13208 emit_move_insn (operands[0], operands[1]);
13209 emit_insn ((mode == DImode
13211 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
13212 ix86_expand_ashl_const (low[0], count, mode);
13217 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
13219 if (operands[1] == const1_rtx)
13221 /* Assuming we've chosen a QImode capable registers, then 1 << N
13222 can be done with two 32/64-bit shifts, no branches, no cmoves. */
13223 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
13225 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
13227 ix86_expand_clear (low[0]);
13228 ix86_expand_clear (high[0]);
13229 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
13231 d = gen_lowpart (QImode, low[0]);
13232 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
13233 s = gen_rtx_EQ (QImode, flags, const0_rtx);
13234 emit_insn (gen_rtx_SET (VOIDmode, d, s));
13236 d = gen_lowpart (QImode, high[0]);
13237 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
13238 s = gen_rtx_NE (QImode, flags, const0_rtx);
13239 emit_insn (gen_rtx_SET (VOIDmode, d, s));
13242 /* Otherwise, we can get the same results by manually performing
13243 a bit extract operation on bit 5/6, and then performing the two
13244 shifts. The two methods of getting 0/1 into low/high are exactly
13245 the same size. Avoiding the shift in the bit extract case helps
13246 pentium4 a bit; no one else seems to care much either way. */
13251 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
13252 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
13254 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
13255 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
13257 emit_insn ((mode == DImode
13259 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
13260 emit_insn ((mode == DImode
13262 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
13263 emit_move_insn (low[0], high[0]);
13264 emit_insn ((mode == DImode
13266 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
13269 emit_insn ((mode == DImode
13271 : gen_ashldi3) (low[0], low[0], operands[2]));
13272 emit_insn ((mode == DImode
13274 : gen_ashldi3) (high[0], high[0], operands[2]));
13278 if (operands[1] == constm1_rtx)
13280 /* For -1 << N, we can avoid the shld instruction, because we
13281 know that we're shifting 0...31/63 ones into a -1. */
13282 emit_move_insn (low[0], constm1_rtx);
13284 emit_move_insn (high[0], low[0]);
13286 emit_move_insn (high[0], constm1_rtx);
13290 if (!rtx_equal_p (operands[0], operands[1]))
13291 emit_move_insn (operands[0], operands[1]);
13293 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
13294 emit_insn ((mode == DImode
13296 : gen_x86_64_shld) (high[0], low[0], operands[2]));
13299 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
13301 if (TARGET_CMOVE && scratch)
13303 ix86_expand_clear (scratch);
13304 emit_insn ((mode == DImode
13305 ? gen_x86_shift_adj_1
13306 : gen_x86_64_shift_adj) (high[0], low[0], operands[2], scratch));
13309 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
13313 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
13315 rtx low[2], high[2];
13317 const int single_width = mode == DImode ? 32 : 64;
13319 if (CONST_INT_P (operands[2]))
13321 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
13322 count = INTVAL (operands[2]) & (single_width * 2 - 1);
13324 if (count == single_width * 2 - 1)
13326 emit_move_insn (high[0], high[1]);
13327 emit_insn ((mode == DImode
13329 : gen_ashrdi3) (high[0], high[0],
13330 GEN_INT (single_width - 1)));
13331 emit_move_insn (low[0], high[0]);
13334 else if (count >= single_width)
13336 emit_move_insn (low[0], high[1]);
13337 emit_move_insn (high[0], low[0]);
13338 emit_insn ((mode == DImode
13340 : gen_ashrdi3) (high[0], high[0],
13341 GEN_INT (single_width - 1)));
13342 if (count > single_width)
13343 emit_insn ((mode == DImode
13345 : gen_ashrdi3) (low[0], low[0],
13346 GEN_INT (count - single_width)));
13350 if (!rtx_equal_p (operands[0], operands[1]))
13351 emit_move_insn (operands[0], operands[1]);
13352 emit_insn ((mode == DImode
13354 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
13355 emit_insn ((mode == DImode
13357 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
13362 if (!rtx_equal_p (operands[0], operands[1]))
13363 emit_move_insn (operands[0], operands[1]);
13365 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
13367 emit_insn ((mode == DImode
13369 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
13370 emit_insn ((mode == DImode
13372 : gen_ashrdi3) (high[0], high[0], operands[2]));
13374 if (TARGET_CMOVE && scratch)
13376 emit_move_insn (scratch, high[0]);
13377 emit_insn ((mode == DImode
13379 : gen_ashrdi3) (scratch, scratch,
13380 GEN_INT (single_width - 1)));
13381 emit_insn ((mode == DImode
13382 ? gen_x86_shift_adj_1
13383 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
13387 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
13392 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
13394 rtx low[2], high[2];
13396 const int single_width = mode == DImode ? 32 : 64;
13398 if (CONST_INT_P (operands[2]))
13400 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
13401 count = INTVAL (operands[2]) & (single_width * 2 - 1);
13403 if (count >= single_width)
13405 emit_move_insn (low[0], high[1]);
13406 ix86_expand_clear (high[0]);
13408 if (count > single_width)
13409 emit_insn ((mode == DImode
13411 : gen_lshrdi3) (low[0], low[0],
13412 GEN_INT (count - single_width)));
13416 if (!rtx_equal_p (operands[0], operands[1]))
13417 emit_move_insn (operands[0], operands[1]);
13418 emit_insn ((mode == DImode
13420 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
13421 emit_insn ((mode == DImode
13423 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
13428 if (!rtx_equal_p (operands[0], operands[1]))
13429 emit_move_insn (operands[0], operands[1]);
13431 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
13433 emit_insn ((mode == DImode
13435 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
13436 emit_insn ((mode == DImode
13438 : gen_lshrdi3) (high[0], high[0], operands[2]));
13440 /* Heh. By reversing the arguments, we can reuse this pattern. */
13441 if (TARGET_CMOVE && scratch)
13443 ix86_expand_clear (scratch);
13444 emit_insn ((mode == DImode
13445 ? gen_x86_shift_adj_1
13446 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
13450 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
13454 /* Predict just emitted jump instruction to be taken with probability PROB. */
13456 predict_jump (int prob)
13458 rtx insn = get_last_insn ();
13459 gcc_assert (JUMP_P (insn));
13461 = gen_rtx_EXPR_LIST (REG_BR_PROB,
13466 /* Helper function for the string operations below. Dest VARIABLE whether
13467 it is aligned to VALUE bytes. If true, jump to the label. */
13469 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
13471 rtx label = gen_label_rtx ();
13472 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
13473 if (GET_MODE (variable) == DImode)
13474 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
13476 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
13477 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
13480 predict_jump (REG_BR_PROB_BASE * 50 / 100);
13482 predict_jump (REG_BR_PROB_BASE * 90 / 100);
13486 /* Adjust COUNTER by the VALUE. */
13488 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
13490 if (GET_MODE (countreg) == DImode)
13491 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
13493 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
13496 /* Zero extend possibly SImode EXP to Pmode register. */
13498 ix86_zero_extend_to_Pmode (rtx exp)
13501 if (GET_MODE (exp) == VOIDmode)
13502 return force_reg (Pmode, exp);
13503 if (GET_MODE (exp) == Pmode)
13504 return copy_to_mode_reg (Pmode, exp);
13505 r = gen_reg_rtx (Pmode);
13506 emit_insn (gen_zero_extendsidi2 (r, exp));
13510 /* Divide COUNTREG by SCALE. */
13512 scale_counter (rtx countreg, int scale)
13515 rtx piece_size_mask;
13519 if (CONST_INT_P (countreg))
13520 return GEN_INT (INTVAL (countreg) / scale);
13521 gcc_assert (REG_P (countreg));
13523 piece_size_mask = GEN_INT (scale - 1);
13524 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
13525 GEN_INT (exact_log2 (scale)),
13526 NULL, 1, OPTAB_DIRECT);
13530 /* Return mode for the memcpy/memset loop counter. Preffer SImode over DImode
13531 for constant loop counts. */
13533 static enum machine_mode
13534 counter_mode (rtx count_exp)
13536 if (GET_MODE (count_exp) != VOIDmode)
13537 return GET_MODE (count_exp);
13538 if (GET_CODE (count_exp) != CONST_INT)
13540 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
13545 /* When SRCPTR is non-NULL, output simple loop to move memory
13546 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
13547 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
13548 equivalent loop to set memory by VALUE (supposed to be in MODE).
13550 The size is rounded down to whole number of chunk size moved at once.
13551 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
13555 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
13556 rtx destptr, rtx srcptr, rtx value,
13557 rtx count, enum machine_mode mode, int unroll,
13560 rtx out_label, top_label, iter, tmp;
13561 enum machine_mode iter_mode = counter_mode (count);
13562 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
13563 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
13569 top_label = gen_label_rtx ();
13570 out_label = gen_label_rtx ();
13571 iter = gen_reg_rtx (iter_mode);
13573 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
13574 NULL, 1, OPTAB_DIRECT);
13575 /* Those two should combine. */
13576 if (piece_size == const1_rtx)
13578 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
13580 predict_jump (REG_BR_PROB_BASE * 10 / 100);
13582 emit_move_insn (iter, const0_rtx);
13584 emit_label (top_label);
13586 tmp = convert_modes (Pmode, iter_mode, iter, true);
13587 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
13588 destmem = change_address (destmem, mode, x_addr);
13592 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
13593 srcmem = change_address (srcmem, mode, y_addr);
13595 /* When unrolling for chips that reorder memory reads and writes,
13596 we can save registers by using single temporary.
13597 Also using 4 temporaries is overkill in 32bit mode. */
13598 if (!TARGET_64BIT && 0)
13600 for (i = 0; i < unroll; i++)
13605 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
13607 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
13609 emit_move_insn (destmem, srcmem);
13615 gcc_assert (unroll <= 4);
13616 for (i = 0; i < unroll; i++)
13618 tmpreg[i] = gen_reg_rtx (mode);
13622 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
13624 emit_move_insn (tmpreg[i], srcmem);
13626 for (i = 0; i < unroll; i++)
13631 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
13633 emit_move_insn (destmem, tmpreg[i]);
13638 for (i = 0; i < unroll; i++)
13642 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
13643 emit_move_insn (destmem, value);
13646 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
13647 true, OPTAB_LIB_WIDEN);
13649 emit_move_insn (iter, tmp);
13651 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
13653 if (expected_size != -1)
13655 expected_size /= GET_MODE_SIZE (mode) * unroll;
13656 if (expected_size == 0)
13658 else if (expected_size > REG_BR_PROB_BASE)
13659 predict_jump (REG_BR_PROB_BASE - 1);
13661 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
13664 predict_jump (REG_BR_PROB_BASE * 80 / 100);
13665 iter = ix86_zero_extend_to_Pmode (iter);
13666 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
13667 true, OPTAB_LIB_WIDEN);
13668 if (tmp != destptr)
13669 emit_move_insn (destptr, tmp);
13672 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
13673 true, OPTAB_LIB_WIDEN);
13675 emit_move_insn (srcptr, tmp);
13677 emit_label (out_label);
13680 /* Output "rep; mov" instruction.
13681 Arguments have same meaning as for previous function */
13683 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
13684 rtx destptr, rtx srcptr,
13686 enum machine_mode mode)
13692 /* If the size is known, it is shorter to use rep movs. */
13693 if (mode == QImode && CONST_INT_P (count)
13694 && !(INTVAL (count) & 3))
13697 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
13698 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
13699 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
13700 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
13701 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
13702 if (mode != QImode)
13704 destexp = gen_rtx_ASHIFT (Pmode, countreg,
13705 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
13706 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
13707 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
13708 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
13709 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
13713 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
13714 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
13716 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
13720 /* Output "rep; stos" instruction.
13721 Arguments have same meaning as for previous function */
13723 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
13725 enum machine_mode mode)
13730 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
13731 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
13732 value = force_reg (mode, gen_lowpart (mode, value));
13733 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
13734 if (mode != QImode)
13736 destexp = gen_rtx_ASHIFT (Pmode, countreg,
13737 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
13738 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
13741 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
13742 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
13746 emit_strmov (rtx destmem, rtx srcmem,
13747 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
13749 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
13750 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
13751 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13754 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
13756 expand_movmem_epilogue (rtx destmem, rtx srcmem,
13757 rtx destptr, rtx srcptr, rtx count, int max_size)
13760 if (CONST_INT_P (count))
13762 HOST_WIDE_INT countval = INTVAL (count);
13765 if ((countval & 0x10) && max_size > 16)
13769 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13770 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
13773 gcc_unreachable ();
13776 if ((countval & 0x08) && max_size > 8)
13779 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13782 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
13783 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
13787 if ((countval & 0x04) && max_size > 4)
13789 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
13792 if ((countval & 0x02) && max_size > 2)
13794 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
13797 if ((countval & 0x01) && max_size > 1)
13799 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
13806 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
13807 count, 1, OPTAB_DIRECT);
13808 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
13809 count, QImode, 1, 4);
13813 /* When there are stringops, we can cheaply increase dest and src pointers.
13814 Otherwise we save code size by maintaining offset (zero is readily
13815 available from preceding rep operation) and using x86 addressing modes.
13817 if (TARGET_SINGLE_STRINGOP)
13821 rtx label = ix86_expand_aligntest (count, 4, true);
13822 src = change_address (srcmem, SImode, srcptr);
13823 dest = change_address (destmem, SImode, destptr);
13824 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13825 emit_label (label);
13826 LABEL_NUSES (label) = 1;
13830 rtx label = ix86_expand_aligntest (count, 2, true);
13831 src = change_address (srcmem, HImode, srcptr);
13832 dest = change_address (destmem, HImode, destptr);
13833 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13834 emit_label (label);
13835 LABEL_NUSES (label) = 1;
13839 rtx label = ix86_expand_aligntest (count, 1, true);
13840 src = change_address (srcmem, QImode, srcptr);
13841 dest = change_address (destmem, QImode, destptr);
13842 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13843 emit_label (label);
13844 LABEL_NUSES (label) = 1;
13849 rtx offset = force_reg (Pmode, const0_rtx);
13854 rtx label = ix86_expand_aligntest (count, 4, true);
13855 src = change_address (srcmem, SImode, srcptr);
13856 dest = change_address (destmem, SImode, destptr);
13857 emit_move_insn (dest, src);
13858 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
13859 true, OPTAB_LIB_WIDEN);
13861 emit_move_insn (offset, tmp);
13862 emit_label (label);
13863 LABEL_NUSES (label) = 1;
13867 rtx label = ix86_expand_aligntest (count, 2, true);
13868 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
13869 src = change_address (srcmem, HImode, tmp);
13870 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
13871 dest = change_address (destmem, HImode, tmp);
13872 emit_move_insn (dest, src);
13873 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
13874 true, OPTAB_LIB_WIDEN);
13876 emit_move_insn (offset, tmp);
13877 emit_label (label);
13878 LABEL_NUSES (label) = 1;
13882 rtx label = ix86_expand_aligntest (count, 1, true);
13883 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
13884 src = change_address (srcmem, QImode, tmp);
13885 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
13886 dest = change_address (destmem, QImode, tmp);
13887 emit_move_insn (dest, src);
13888 emit_label (label);
13889 LABEL_NUSES (label) = 1;
13894 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
13896 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
13897 rtx count, int max_size)
13900 expand_simple_binop (counter_mode (count), AND, count,
13901 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
13902 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
13903 gen_lowpart (QImode, value), count, QImode,
13907 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
13909 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
13913 if (CONST_INT_P (count))
13915 HOST_WIDE_INT countval = INTVAL (count);
13918 if ((countval & 0x10) && max_size > 16)
13922 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
13923 emit_insn (gen_strset (destptr, dest, value));
13924 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
13925 emit_insn (gen_strset (destptr, dest, value));
13928 gcc_unreachable ();
13931 if ((countval & 0x08) && max_size > 8)
13935 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
13936 emit_insn (gen_strset (destptr, dest, value));
13940 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
13941 emit_insn (gen_strset (destptr, dest, value));
13942 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
13943 emit_insn (gen_strset (destptr, dest, value));
13947 if ((countval & 0x04) && max_size > 4)
13949 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
13950 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
13953 if ((countval & 0x02) && max_size > 2)
13955 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
13956 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
13959 if ((countval & 0x01) && max_size > 1)
13961 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
13962 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
13969 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
13974 rtx label = ix86_expand_aligntest (count, 16, true);
13977 dest = change_address (destmem, DImode, destptr);
13978 emit_insn (gen_strset (destptr, dest, value));
13979 emit_insn (gen_strset (destptr, dest, value));
13983 dest = change_address (destmem, SImode, destptr);
13984 emit_insn (gen_strset (destptr, dest, value));
13985 emit_insn (gen_strset (destptr, dest, value));
13986 emit_insn (gen_strset (destptr, dest, value));
13987 emit_insn (gen_strset (destptr, dest, value));
13989 emit_label (label);
13990 LABEL_NUSES (label) = 1;
13994 rtx label = ix86_expand_aligntest (count, 8, true);
13997 dest = change_address (destmem, DImode, destptr);
13998 emit_insn (gen_strset (destptr, dest, value));
14002 dest = change_address (destmem, SImode, destptr);
14003 emit_insn (gen_strset (destptr, dest, value));
14004 emit_insn (gen_strset (destptr, dest, value));
14006 emit_label (label);
14007 LABEL_NUSES (label) = 1;
14011 rtx label = ix86_expand_aligntest (count, 4, true);
14012 dest = change_address (destmem, SImode, destptr);
14013 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
14014 emit_label (label);
14015 LABEL_NUSES (label) = 1;
14019 rtx label = ix86_expand_aligntest (count, 2, true);
14020 dest = change_address (destmem, HImode, destptr);
14021 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
14022 emit_label (label);
14023 LABEL_NUSES (label) = 1;
14027 rtx label = ix86_expand_aligntest (count, 1, true);
14028 dest = change_address (destmem, QImode, destptr);
14029 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
14030 emit_label (label);
14031 LABEL_NUSES (label) = 1;
14035 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
14036 DESIRED_ALIGNMENT. */
14038 expand_movmem_prologue (rtx destmem, rtx srcmem,
14039 rtx destptr, rtx srcptr, rtx count,
14040 int align, int desired_alignment)
14042 if (align <= 1 && desired_alignment > 1)
14044 rtx label = ix86_expand_aligntest (destptr, 1, false);
14045 srcmem = change_address (srcmem, QImode, srcptr);
14046 destmem = change_address (destmem, QImode, destptr);
14047 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
14048 ix86_adjust_counter (count, 1);
14049 emit_label (label);
14050 LABEL_NUSES (label) = 1;
14052 if (align <= 2 && desired_alignment > 2)
14054 rtx label = ix86_expand_aligntest (destptr, 2, false);
14055 srcmem = change_address (srcmem, HImode, srcptr);
14056 destmem = change_address (destmem, HImode, destptr);
14057 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
14058 ix86_adjust_counter (count, 2);
14059 emit_label (label);
14060 LABEL_NUSES (label) = 1;
14062 if (align <= 4 && desired_alignment > 4)
14064 rtx label = ix86_expand_aligntest (destptr, 4, false);
14065 srcmem = change_address (srcmem, SImode, srcptr);
14066 destmem = change_address (destmem, SImode, destptr);
14067 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
14068 ix86_adjust_counter (count, 4);
14069 emit_label (label);
14070 LABEL_NUSES (label) = 1;
14072 gcc_assert (desired_alignment <= 8);
14075 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
14076 DESIRED_ALIGNMENT. */
14078 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
14079 int align, int desired_alignment)
14081 if (align <= 1 && desired_alignment > 1)
14083 rtx label = ix86_expand_aligntest (destptr, 1, false);
14084 destmem = change_address (destmem, QImode, destptr);
14085 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
14086 ix86_adjust_counter (count, 1);
14087 emit_label (label);
14088 LABEL_NUSES (label) = 1;
14090 if (align <= 2 && desired_alignment > 2)
14092 rtx label = ix86_expand_aligntest (destptr, 2, false);
14093 destmem = change_address (destmem, HImode, destptr);
14094 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
14095 ix86_adjust_counter (count, 2);
14096 emit_label (label);
14097 LABEL_NUSES (label) = 1;
14099 if (align <= 4 && desired_alignment > 4)
14101 rtx label = ix86_expand_aligntest (destptr, 4, false);
14102 destmem = change_address (destmem, SImode, destptr);
14103 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
14104 ix86_adjust_counter (count, 4);
14105 emit_label (label);
14106 LABEL_NUSES (label) = 1;
14108 gcc_assert (desired_alignment <= 8);
14111 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
14112 static enum stringop_alg
14113 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
14114 int *dynamic_check)
14116 const struct stringop_algs * algs;
14118 *dynamic_check = -1;
14120 algs = &ix86_cost->memset[TARGET_64BIT != 0];
14122 algs = &ix86_cost->memcpy[TARGET_64BIT != 0];
14123 if (stringop_alg != no_stringop)
14124 return stringop_alg;
14125 /* rep; movq or rep; movl is the smallest variant. */
14126 else if (optimize_size)
14128 if (!count || (count & 3))
14129 return rep_prefix_1_byte;
14131 return rep_prefix_4_byte;
14133 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
14135 else if (expected_size != -1 && expected_size < 4)
14136 return loop_1_byte;
14137 else if (expected_size != -1)
14140 enum stringop_alg alg = libcall;
14141 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
14143 gcc_assert (algs->size[i].max);
14144 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
14146 if (algs->size[i].alg != libcall)
14147 alg = algs->size[i].alg;
14148 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
14149 last non-libcall inline algorithm. */
14150 if (TARGET_INLINE_ALL_STRINGOPS)
14152 /* When the current size is best to be copied by a libcall,
14153 but we are still forced to inline, run the heuristic bellow
14154 that will pick code for medium sized blocks. */
14155 if (alg != libcall)
14160 return algs->size[i].alg;
14163 gcc_assert (TARGET_INLINE_ALL_STRINGOPS);
14165 /* When asked to inline the call anyway, try to pick meaningful choice.
14166 We look for maximal size of block that is faster to copy by hand and
14167 take blocks of at most of that size guessing that average size will
14168 be roughly half of the block.
14170 If this turns out to be bad, we might simply specify the preferred
14171 choice in ix86_costs. */
14172 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
14173 && algs->unknown_size == libcall)
14176 enum stringop_alg alg;
14179 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
14180 if (algs->size[i].alg != libcall && algs->size[i].alg)
14181 max = algs->size[i].max;
14184 alg = decide_alg (count, max / 2, memset, dynamic_check);
14185 gcc_assert (*dynamic_check == -1);
14186 gcc_assert (alg != libcall);
14187 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
14188 *dynamic_check = max;
14191 return algs->unknown_size;
14194 /* Decide on alignment. We know that the operand is already aligned to ALIGN
14195 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
14197 decide_alignment (int align,
14198 enum stringop_alg alg,
14201 int desired_align = 0;
14205 gcc_unreachable ();
14207 case unrolled_loop:
14208 desired_align = GET_MODE_SIZE (Pmode);
14210 case rep_prefix_8_byte:
14213 case rep_prefix_4_byte:
14214 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
14215 copying whole cacheline at once. */
14216 if (TARGET_PENTIUMPRO)
14221 case rep_prefix_1_byte:
14222 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
14223 copying whole cacheline at once. */
14224 if (TARGET_PENTIUMPRO)
14238 if (desired_align < align)
14239 desired_align = align;
14240 if (expected_size != -1 && expected_size < 4)
14241 desired_align = align;
14242 return desired_align;
14245 /* Return the smallest power of 2 greater than VAL. */
14247 smallest_pow2_greater_than (int val)
14255 /* Expand string move (memcpy) operation. Use i386 string operations when
14256 profitable. expand_clrmem contains similar code. The code depends upon
14257 architecture, block size and alignment, but always has the same
14260 1) Prologue guard: Conditional that jumps up to epilogues for small
14261 blocks that can be handled by epilogue alone. This is faster but
14262 also needed for correctness, since prologue assume the block is larger
14263 than the desired alignment.
14265 Optional dynamic check for size and libcall for large
14266 blocks is emitted here too, with -minline-stringops-dynamically.
14268 2) Prologue: copy first few bytes in order to get destination aligned
14269 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
14270 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
14271 We emit either a jump tree on power of two sized blocks, or a byte loop.
14273 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
14274 with specified algorithm.
14276 4) Epilogue: code copying tail of the block that is too small to be
14277 handled by main body (or up to size guarded by prologue guard). */
14280 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
14281 rtx expected_align_exp, rtx expected_size_exp)
14287 rtx jump_around_label = NULL;
14288 HOST_WIDE_INT align = 1;
14289 unsigned HOST_WIDE_INT count = 0;
14290 HOST_WIDE_INT expected_size = -1;
14291 int size_needed = 0, epilogue_size_needed;
14292 int desired_align = 0;
14293 enum stringop_alg alg;
14296 if (CONST_INT_P (align_exp))
14297 align = INTVAL (align_exp);
14298 /* i386 can do misaligned access on reasonably increased cost. */
14299 if (CONST_INT_P (expected_align_exp)
14300 && INTVAL (expected_align_exp) > align)
14301 align = INTVAL (expected_align_exp);
14302 if (CONST_INT_P (count_exp))
14303 count = expected_size = INTVAL (count_exp);
14304 if (CONST_INT_P (expected_size_exp) && count == 0)
14305 expected_size = INTVAL (expected_size_exp);
14307 /* Step 0: Decide on preferred algorithm, desired alignment and
14308 size of chunks to be copied by main loop. */
14310 alg = decide_alg (count, expected_size, false, &dynamic_check);
14311 desired_align = decide_alignment (align, alg, expected_size);
14313 if (!TARGET_ALIGN_STRINGOPS)
14314 align = desired_align;
14316 if (alg == libcall)
14318 gcc_assert (alg != no_stringop);
14320 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
14321 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
14322 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
14327 gcc_unreachable ();
14329 size_needed = GET_MODE_SIZE (Pmode);
14331 case unrolled_loop:
14332 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
14334 case rep_prefix_8_byte:
14337 case rep_prefix_4_byte:
14340 case rep_prefix_1_byte:
14346 epilogue_size_needed = size_needed;
14348 /* Step 1: Prologue guard. */
14350 /* Alignment code needs count to be in register. */
14351 if (CONST_INT_P (count_exp) && desired_align > align)
14353 enum machine_mode mode = SImode;
14354 if (TARGET_64BIT && (count & ~0xffffffff))
14356 count_exp = force_reg (mode, count_exp);
14358 gcc_assert (desired_align >= 1 && align >= 1);
14360 /* Ensure that alignment prologue won't copy past end of block. */
14361 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
14363 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
14364 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
14365 Make sure it is power of 2. */
14366 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
14368 label = gen_label_rtx ();
14369 emit_cmp_and_jump_insns (count_exp,
14370 GEN_INT (epilogue_size_needed),
14371 LTU, 0, counter_mode (count_exp), 1, label);
14372 if (GET_CODE (count_exp) == CONST_INT)
14374 else if (expected_size == -1 || expected_size < epilogue_size_needed)
14375 predict_jump (REG_BR_PROB_BASE * 60 / 100);
14377 predict_jump (REG_BR_PROB_BASE * 20 / 100);
14379 /* Emit code to decide on runtime whether library call or inline should be
14381 if (dynamic_check != -1)
14383 rtx hot_label = gen_label_rtx ();
14384 jump_around_label = gen_label_rtx ();
14385 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
14386 LEU, 0, GET_MODE (count_exp), 1, hot_label);
14387 predict_jump (REG_BR_PROB_BASE * 90 / 100);
14388 emit_block_move_via_libcall (dst, src, count_exp, false);
14389 emit_jump (jump_around_label);
14390 emit_label (hot_label);
14393 /* Step 2: Alignment prologue. */
14395 if (desired_align > align)
14397 /* Except for the first move in epilogue, we no longer know
14398 constant offset in aliasing info. It don't seems to worth
14399 the pain to maintain it for the first move, so throw away
14401 src = change_address (src, BLKmode, srcreg);
14402 dst = change_address (dst, BLKmode, destreg);
14403 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
14406 if (label && size_needed == 1)
14408 emit_label (label);
14409 LABEL_NUSES (label) = 1;
14413 /* Step 3: Main loop. */
14419 gcc_unreachable ();
14421 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
14422 count_exp, QImode, 1, expected_size);
14425 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
14426 count_exp, Pmode, 1, expected_size);
14428 case unrolled_loop:
14429 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
14430 registers for 4 temporaries anyway. */
14431 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
14432 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
14435 case rep_prefix_8_byte:
14436 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
14439 case rep_prefix_4_byte:
14440 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
14443 case rep_prefix_1_byte:
14444 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
14448 /* Adjust properly the offset of src and dest memory for aliasing. */
14449 if (CONST_INT_P (count_exp))
14451 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
14452 (count / size_needed) * size_needed);
14453 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
14454 (count / size_needed) * size_needed);
14458 src = change_address (src, BLKmode, srcreg);
14459 dst = change_address (dst, BLKmode, destreg);
14462 /* Step 4: Epilogue to copy the remaining bytes. */
14466 /* When the main loop is done, COUNT_EXP might hold original count,
14467 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
14468 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
14469 bytes. Compensate if needed. */
14471 if (size_needed < epilogue_size_needed)
14474 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
14475 GEN_INT (size_needed - 1), count_exp, 1,
14477 if (tmp != count_exp)
14478 emit_move_insn (count_exp, tmp);
14480 emit_label (label);
14481 LABEL_NUSES (label) = 1;
14484 if (count_exp != const0_rtx && epilogue_size_needed > 1)
14485 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
14486 epilogue_size_needed);
14487 if (jump_around_label)
14488 emit_label (jump_around_label);
14492 /* Helper function for memcpy. For QImode value 0xXY produce
14493 0xXYXYXYXY of wide specified by MODE. This is essentially
14494 a * 0x10101010, but we can do slightly better than
14495 synth_mult by unwinding the sequence by hand on CPUs with
14498 promote_duplicated_reg (enum machine_mode mode, rtx val)
14500 enum machine_mode valmode = GET_MODE (val);
14502 int nops = mode == DImode ? 3 : 2;
14504 gcc_assert (mode == SImode || mode == DImode);
14505 if (val == const0_rtx)
14506 return copy_to_mode_reg (mode, const0_rtx);
14507 if (CONST_INT_P (val))
14509 HOST_WIDE_INT v = INTVAL (val) & 255;
14513 if (mode == DImode)
14514 v |= (v << 16) << 16;
14515 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
14518 if (valmode == VOIDmode)
14520 if (valmode != QImode)
14521 val = gen_lowpart (QImode, val);
14522 if (mode == QImode)
14524 if (!TARGET_PARTIAL_REG_STALL)
14526 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
14527 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
14528 <= (ix86_cost->shift_const + ix86_cost->add) * nops
14529 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
14531 rtx reg = convert_modes (mode, QImode, val, true);
14532 tmp = promote_duplicated_reg (mode, const1_rtx);
14533 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
14538 rtx reg = convert_modes (mode, QImode, val, true);
14540 if (!TARGET_PARTIAL_REG_STALL)
14541 if (mode == SImode)
14542 emit_insn (gen_movsi_insv_1 (reg, reg));
14544 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
14547 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
14548 NULL, 1, OPTAB_DIRECT);
14550 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
14552 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
14553 NULL, 1, OPTAB_DIRECT);
14554 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
14555 if (mode == SImode)
14557 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
14558 NULL, 1, OPTAB_DIRECT);
14559 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
14564 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
14565 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
14566 alignment from ALIGN to DESIRED_ALIGN. */
14568 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
14573 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
14574 promoted_val = promote_duplicated_reg (DImode, val);
14575 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
14576 promoted_val = promote_duplicated_reg (SImode, val);
14577 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
14578 promoted_val = promote_duplicated_reg (HImode, val);
14580 promoted_val = val;
14582 return promoted_val;
14585 /* Expand string clear operation (bzero). Use i386 string operations when
14586 profitable. See expand_movmem comment for explanation of individual
14587 steps performed. */
14589 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
14590 rtx expected_align_exp, rtx expected_size_exp)
14595 rtx jump_around_label = NULL;
14596 HOST_WIDE_INT align = 1;
14597 unsigned HOST_WIDE_INT count = 0;
14598 HOST_WIDE_INT expected_size = -1;
14599 int size_needed = 0, epilogue_size_needed;
14600 int desired_align = 0;
14601 enum stringop_alg alg;
14602 rtx promoted_val = NULL;
14603 bool force_loopy_epilogue = false;
14606 if (CONST_INT_P (align_exp))
14607 align = INTVAL (align_exp);
14608 /* i386 can do misaligned access on reasonably increased cost. */
14609 if (CONST_INT_P (expected_align_exp)
14610 && INTVAL (expected_align_exp) > align)
14611 align = INTVAL (expected_align_exp);
14612 if (CONST_INT_P (count_exp))
14613 count = expected_size = INTVAL (count_exp);
14614 if (CONST_INT_P (expected_size_exp) && count == 0)
14615 expected_size = INTVAL (expected_size_exp);
14617 /* Step 0: Decide on preferred algorithm, desired alignment and
14618 size of chunks to be copied by main loop. */
14620 alg = decide_alg (count, expected_size, true, &dynamic_check);
14621 desired_align = decide_alignment (align, alg, expected_size);
14623 if (!TARGET_ALIGN_STRINGOPS)
14624 align = desired_align;
14626 if (alg == libcall)
14628 gcc_assert (alg != no_stringop);
14630 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
14631 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
14636 gcc_unreachable ();
14638 size_needed = GET_MODE_SIZE (Pmode);
14640 case unrolled_loop:
14641 size_needed = GET_MODE_SIZE (Pmode) * 4;
14643 case rep_prefix_8_byte:
14646 case rep_prefix_4_byte:
14649 case rep_prefix_1_byte:
14654 epilogue_size_needed = size_needed;
14656 /* Step 1: Prologue guard. */
14658 /* Alignment code needs count to be in register. */
14659 if (CONST_INT_P (count_exp) && desired_align > align)
14661 enum machine_mode mode = SImode;
14662 if (TARGET_64BIT && (count & ~0xffffffff))
14664 count_exp = force_reg (mode, count_exp);
14666 /* Do the cheap promotion to allow better CSE across the
14667 main loop and epilogue (ie one load of the big constant in the
14668 front of all code. */
14669 if (CONST_INT_P (val_exp))
14670 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
14671 desired_align, align);
14672 /* Ensure that alignment prologue won't copy past end of block. */
14673 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
14675 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
14676 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
14677 Make sure it is power of 2. */
14678 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
14680 /* To improve performance of small blocks, we jump around the VAL
14681 promoting mode. This mean that if the promoted VAL is not constant,
14682 we might not use it in the epilogue and have to use byte
14684 if (epilogue_size_needed > 2 && !promoted_val)
14685 force_loopy_epilogue = true;
14686 label = gen_label_rtx ();
14687 emit_cmp_and_jump_insns (count_exp,
14688 GEN_INT (epilogue_size_needed),
14689 LTU, 0, counter_mode (count_exp), 1, label);
14690 if (GET_CODE (count_exp) == CONST_INT)
14692 else if (expected_size == -1 || expected_size <= epilogue_size_needed)
14693 predict_jump (REG_BR_PROB_BASE * 60 / 100);
14695 predict_jump (REG_BR_PROB_BASE * 20 / 100);
14697 if (dynamic_check != -1)
14699 rtx hot_label = gen_label_rtx ();
14700 jump_around_label = gen_label_rtx ();
14701 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
14702 LEU, 0, counter_mode (count_exp), 1, hot_label);
14703 predict_jump (REG_BR_PROB_BASE * 90 / 100);
14704 set_storage_via_libcall (dst, count_exp, val_exp, false);
14705 emit_jump (jump_around_label);
14706 emit_label (hot_label);
14709 /* Step 2: Alignment prologue. */
14711 /* Do the expensive promotion once we branched off the small blocks. */
14713 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
14714 desired_align, align);
14715 gcc_assert (desired_align >= 1 && align >= 1);
14717 if (desired_align > align)
14719 /* Except for the first move in epilogue, we no longer know
14720 constant offset in aliasing info. It don't seems to worth
14721 the pain to maintain it for the first move, so throw away
14723 dst = change_address (dst, BLKmode, destreg);
14724 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
14727 if (label && size_needed == 1)
14729 emit_label (label);
14730 LABEL_NUSES (label) = 1;
14734 /* Step 3: Main loop. */
14740 gcc_unreachable ();
14742 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
14743 count_exp, QImode, 1, expected_size);
14746 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
14747 count_exp, Pmode, 1, expected_size);
14749 case unrolled_loop:
14750 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
14751 count_exp, Pmode, 4, expected_size);
14753 case rep_prefix_8_byte:
14754 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14757 case rep_prefix_4_byte:
14758 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14761 case rep_prefix_1_byte:
14762 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14766 /* Adjust properly the offset of src and dest memory for aliasing. */
14767 if (CONST_INT_P (count_exp))
14768 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
14769 (count / size_needed) * size_needed);
14771 dst = change_address (dst, BLKmode, destreg);
14773 /* Step 4: Epilogue to copy the remaining bytes. */
14777 /* When the main loop is done, COUNT_EXP might hold original count,
14778 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
14779 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
14780 bytes. Compensate if needed. */
14782 if (size_needed < desired_align - align)
14785 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
14786 GEN_INT (size_needed - 1), count_exp, 1,
14788 size_needed = desired_align - align + 1;
14789 if (tmp != count_exp)
14790 emit_move_insn (count_exp, tmp);
14792 emit_label (label);
14793 LABEL_NUSES (label) = 1;
14795 if (count_exp != const0_rtx && epilogue_size_needed > 1)
14797 if (force_loopy_epilogue)
14798 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
14801 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
14804 if (jump_around_label)
14805 emit_label (jump_around_label);
14809 /* Expand the appropriate insns for doing strlen if not just doing
14812 out = result, initialized with the start address
14813 align_rtx = alignment of the address.
14814 scratch = scratch register, initialized with the startaddress when
14815 not aligned, otherwise undefined
14817 This is just the body. It needs the initializations mentioned above and
14818 some address computing at the end. These things are done in i386.md. */
14821 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
14825 rtx align_2_label = NULL_RTX;
14826 rtx align_3_label = NULL_RTX;
14827 rtx align_4_label = gen_label_rtx ();
14828 rtx end_0_label = gen_label_rtx ();
14830 rtx tmpreg = gen_reg_rtx (SImode);
14831 rtx scratch = gen_reg_rtx (SImode);
14835 if (CONST_INT_P (align_rtx))
14836 align = INTVAL (align_rtx);
14838 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
14840 /* Is there a known alignment and is it less than 4? */
14843 rtx scratch1 = gen_reg_rtx (Pmode);
14844 emit_move_insn (scratch1, out);
14845 /* Is there a known alignment and is it not 2? */
14848 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
14849 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
14851 /* Leave just the 3 lower bits. */
14852 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
14853 NULL_RTX, 0, OPTAB_WIDEN);
14855 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
14856 Pmode, 1, align_4_label);
14857 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
14858 Pmode, 1, align_2_label);
14859 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
14860 Pmode, 1, align_3_label);
14864 /* Since the alignment is 2, we have to check 2 or 0 bytes;
14865 check if is aligned to 4 - byte. */
14867 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
14868 NULL_RTX, 0, OPTAB_WIDEN);
14870 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
14871 Pmode, 1, align_4_label);
14874 mem = change_address (src, QImode, out);
14876 /* Now compare the bytes. */
14878 /* Compare the first n unaligned byte on a byte per byte basis. */
14879 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
14880 QImode, 1, end_0_label);
14882 /* Increment the address. */
14884 emit_insn (gen_adddi3 (out, out, const1_rtx));
14886 emit_insn (gen_addsi3 (out, out, const1_rtx));
14888 /* Not needed with an alignment of 2 */
14891 emit_label (align_2_label);
14893 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
14897 emit_insn (gen_adddi3 (out, out, const1_rtx));
14899 emit_insn (gen_addsi3 (out, out, const1_rtx));
14901 emit_label (align_3_label);
14904 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
14908 emit_insn (gen_adddi3 (out, out, const1_rtx));
14910 emit_insn (gen_addsi3 (out, out, const1_rtx));
14913 /* Generate loop to check 4 bytes at a time. It is not a good idea to
14914 align this loop. It gives only huge programs, but does not help to
14916 emit_label (align_4_label);
14918 mem = change_address (src, SImode, out);
14919 emit_move_insn (scratch, mem);
14921 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
14923 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
14925 /* This formula yields a nonzero result iff one of the bytes is zero.
14926 This saves three branches inside loop and many cycles. */
14928 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
14929 emit_insn (gen_one_cmplsi2 (scratch, scratch));
14930 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
14931 emit_insn (gen_andsi3 (tmpreg, tmpreg,
14932 gen_int_mode (0x80808080, SImode)));
14933 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
14938 rtx reg = gen_reg_rtx (SImode);
14939 rtx reg2 = gen_reg_rtx (Pmode);
14940 emit_move_insn (reg, tmpreg);
14941 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
14943 /* If zero is not in the first two bytes, move two bytes forward. */
14944 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
14945 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14946 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
14947 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
14948 gen_rtx_IF_THEN_ELSE (SImode, tmp,
14951 /* Emit lea manually to avoid clobbering of flags. */
14952 emit_insn (gen_rtx_SET (SImode, reg2,
14953 gen_rtx_PLUS (Pmode, out, const2_rtx)));
14955 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14956 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
14957 emit_insn (gen_rtx_SET (VOIDmode, out,
14958 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
14965 rtx end_2_label = gen_label_rtx ();
14966 /* Is zero in the first two bytes? */
14968 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
14969 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14970 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
14971 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
14972 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
14974 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
14975 JUMP_LABEL (tmp) = end_2_label;
14977 /* Not in the first two. Move two bytes forward. */
14978 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
14980 emit_insn (gen_adddi3 (out, out, const2_rtx));
14982 emit_insn (gen_addsi3 (out, out, const2_rtx));
14984 emit_label (end_2_label);
14988 /* Avoid branch in fixing the byte. */
14989 tmpreg = gen_lowpart (QImode, tmpreg);
14990 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
14991 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
14993 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
14995 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
14997 emit_label (end_0_label);
15000 /* Expand strlen. */
15003 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
15005 rtx addr, scratch1, scratch2, scratch3, scratch4;
15007 /* The generic case of strlen expander is long. Avoid it's
15008 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
15010 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
15011 && !TARGET_INLINE_ALL_STRINGOPS
15013 && (!CONST_INT_P (align) || INTVAL (align) < 4))
15016 addr = force_reg (Pmode, XEXP (src, 0));
15017 scratch1 = gen_reg_rtx (Pmode);
15019 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
15022 /* Well it seems that some optimizer does not combine a call like
15023 foo(strlen(bar), strlen(bar));
15024 when the move and the subtraction is done here. It does calculate
15025 the length just once when these instructions are done inside of
15026 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
15027 often used and I use one fewer register for the lifetime of
15028 output_strlen_unroll() this is better. */
15030 emit_move_insn (out, addr);
15032 ix86_expand_strlensi_unroll_1 (out, src, align);
15034 /* strlensi_unroll_1 returns the address of the zero at the end of
15035 the string, like memchr(), so compute the length by subtracting
15036 the start address. */
15038 emit_insn (gen_subdi3 (out, out, addr));
15040 emit_insn (gen_subsi3 (out, out, addr));
15045 scratch2 = gen_reg_rtx (Pmode);
15046 scratch3 = gen_reg_rtx (Pmode);
15047 scratch4 = force_reg (Pmode, constm1_rtx);
15049 emit_move_insn (scratch3, addr);
15050 eoschar = force_reg (QImode, eoschar);
15052 src = replace_equiv_address_nv (src, scratch3);
15054 /* If .md starts supporting :P, this can be done in .md. */
15055 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
15056 scratch4), UNSPEC_SCAS);
15057 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
15060 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
15061 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
15065 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
15066 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
15072 /* For given symbol (function) construct code to compute address of it's PLT
15073 entry in large x86-64 PIC model. */
15075 construct_plt_address (rtx symbol)
15077 rtx tmp = gen_reg_rtx (Pmode);
15078 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
15080 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
15081 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
15083 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
15084 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
15089 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
15090 rtx callarg2 ATTRIBUTE_UNUSED,
15091 rtx pop, int sibcall)
15093 rtx use = NULL, call;
15095 if (pop == const0_rtx)
15097 gcc_assert (!TARGET_64BIT || !pop);
15099 if (TARGET_MACHO && !TARGET_64BIT)
15102 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
15103 fnaddr = machopic_indirect_call_target (fnaddr);
15108 /* Static functions and indirect calls don't need the pic register. */
15109 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
15110 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
15111 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
15112 use_reg (&use, pic_offset_table_rtx);
15115 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
15117 rtx al = gen_rtx_REG (QImode, 0);
15118 emit_move_insn (al, callarg2);
15119 use_reg (&use, al);
15122 if (ix86_cmodel == CM_LARGE_PIC
15123 && GET_CODE (fnaddr) == MEM
15124 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
15125 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
15126 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
15127 else if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
15129 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
15130 fnaddr = gen_rtx_MEM (QImode, fnaddr);
15132 if (sibcall && TARGET_64BIT
15133 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
15136 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
15137 fnaddr = gen_rtx_REG (Pmode, R11_REG);
15138 emit_move_insn (fnaddr, addr);
15139 fnaddr = gen_rtx_MEM (QImode, fnaddr);
15142 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
15144 call = gen_rtx_SET (VOIDmode, retval, call);
15147 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
15148 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
15149 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
15152 call = emit_call_insn (call);
15154 CALL_INSN_FUNCTION_USAGE (call) = use;
15158 /* Clear stack slot assignments remembered from previous functions.
15159 This is called from INIT_EXPANDERS once before RTL is emitted for each
15162 static struct machine_function *
15163 ix86_init_machine_status (void)
15165 struct machine_function *f;
15167 f = ggc_alloc_cleared (sizeof (struct machine_function));
15168 f->use_fast_prologue_epilogue_nregs = -1;
15169 f->tls_descriptor_call_expanded_p = 0;
15174 /* Return a MEM corresponding to a stack slot with mode MODE.
15175 Allocate a new slot if necessary.
15177 The RTL for a function can have several slots available: N is
15178 which slot to use. */
15181 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
15183 struct stack_local_entry *s;
15185 gcc_assert (n < MAX_386_STACK_LOCALS);
15187 for (s = ix86_stack_locals; s; s = s->next)
15188 if (s->mode == mode && s->n == n)
15189 return copy_rtx (s->rtl);
15191 s = (struct stack_local_entry *)
15192 ggc_alloc (sizeof (struct stack_local_entry));
15195 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
15197 s->next = ix86_stack_locals;
15198 ix86_stack_locals = s;
15202 /* Construct the SYMBOL_REF for the tls_get_addr function. */
15204 static GTY(()) rtx ix86_tls_symbol;
15206 ix86_tls_get_addr (void)
15209 if (!ix86_tls_symbol)
15211 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
15212 (TARGET_ANY_GNU_TLS
15214 ? "___tls_get_addr"
15215 : "__tls_get_addr");
15218 return ix86_tls_symbol;
15221 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
15223 static GTY(()) rtx ix86_tls_module_base_symbol;
15225 ix86_tls_module_base (void)
15228 if (!ix86_tls_module_base_symbol)
15230 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
15231 "_TLS_MODULE_BASE_");
15232 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
15233 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
15236 return ix86_tls_module_base_symbol;
15239 /* Calculate the length of the memory address in the instruction
15240 encoding. Does not include the one-byte modrm, opcode, or prefix. */
15243 memory_address_length (rtx addr)
15245 struct ix86_address parts;
15246 rtx base, index, disp;
15250 if (GET_CODE (addr) == PRE_DEC
15251 || GET_CODE (addr) == POST_INC
15252 || GET_CODE (addr) == PRE_MODIFY
15253 || GET_CODE (addr) == POST_MODIFY)
15256 ok = ix86_decompose_address (addr, &parts);
15259 if (parts.base && GET_CODE (parts.base) == SUBREG)
15260 parts.base = SUBREG_REG (parts.base);
15261 if (parts.index && GET_CODE (parts.index) == SUBREG)
15262 parts.index = SUBREG_REG (parts.index);
15265 index = parts.index;
15270 - esp as the base always wants an index,
15271 - ebp as the base always wants a displacement. */
15273 /* Register Indirect. */
15274 if (base && !index && !disp)
15276 /* esp (for its index) and ebp (for its displacement) need
15277 the two-byte modrm form. */
15278 if (addr == stack_pointer_rtx
15279 || addr == arg_pointer_rtx
15280 || addr == frame_pointer_rtx
15281 || addr == hard_frame_pointer_rtx)
15285 /* Direct Addressing. */
15286 else if (disp && !base && !index)
15291 /* Find the length of the displacement constant. */
15294 if (base && satisfies_constraint_K (disp))
15299 /* ebp always wants a displacement. */
15300 else if (base == hard_frame_pointer_rtx)
15303 /* An index requires the two-byte modrm form.... */
15305 /* ...like esp, which always wants an index. */
15306 || base == stack_pointer_rtx
15307 || base == arg_pointer_rtx
15308 || base == frame_pointer_rtx)
15315 /* Compute default value for "length_immediate" attribute. When SHORTFORM
15316 is set, expect that insn have 8bit immediate alternative. */
15318 ix86_attr_length_immediate_default (rtx insn, int shortform)
15322 extract_insn_cached (insn);
15323 for (i = recog_data.n_operands - 1; i >= 0; --i)
15324 if (CONSTANT_P (recog_data.operand[i]))
15327 if (shortform && satisfies_constraint_K (recog_data.operand[i]))
15331 switch (get_attr_mode (insn))
15342 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
15347 fatal_insn ("unknown insn mode", insn);
15353 /* Compute default value for "length_address" attribute. */
15355 ix86_attr_length_address_default (rtx insn)
15359 if (get_attr_type (insn) == TYPE_LEA)
15361 rtx set = PATTERN (insn);
15363 if (GET_CODE (set) == PARALLEL)
15364 set = XVECEXP (set, 0, 0);
15366 gcc_assert (GET_CODE (set) == SET);
15368 return memory_address_length (SET_SRC (set));
15371 extract_insn_cached (insn);
15372 for (i = recog_data.n_operands - 1; i >= 0; --i)
15373 if (MEM_P (recog_data.operand[i]))
15375 return memory_address_length (XEXP (recog_data.operand[i], 0));
15381 /* Return the maximum number of instructions a cpu can issue. */
15384 ix86_issue_rate (void)
15388 case PROCESSOR_PENTIUM:
15392 case PROCESSOR_PENTIUMPRO:
15393 case PROCESSOR_PENTIUM4:
15394 case PROCESSOR_ATHLON:
15396 case PROCESSOR_AMDFAM10:
15397 case PROCESSOR_NOCONA:
15398 case PROCESSOR_GENERIC32:
15399 case PROCESSOR_GENERIC64:
15402 case PROCESSOR_CORE2:
15410 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
15411 by DEP_INSN and nothing set by DEP_INSN. */
15414 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
15418 /* Simplify the test for uninteresting insns. */
15419 if (insn_type != TYPE_SETCC
15420 && insn_type != TYPE_ICMOV
15421 && insn_type != TYPE_FCMOV
15422 && insn_type != TYPE_IBR)
15425 if ((set = single_set (dep_insn)) != 0)
15427 set = SET_DEST (set);
15430 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
15431 && XVECLEN (PATTERN (dep_insn), 0) == 2
15432 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
15433 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
15435 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
15436 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
15441 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
15444 /* This test is true if the dependent insn reads the flags but
15445 not any other potentially set register. */
15446 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
15449 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
15455 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
15456 address with operands set by DEP_INSN. */
15459 ix86_agi_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
15463 if (insn_type == TYPE_LEA
15466 addr = PATTERN (insn);
15468 if (GET_CODE (addr) == PARALLEL)
15469 addr = XVECEXP (addr, 0, 0);
15471 gcc_assert (GET_CODE (addr) == SET);
15473 addr = SET_SRC (addr);
15478 extract_insn_cached (insn);
15479 for (i = recog_data.n_operands - 1; i >= 0; --i)
15480 if (MEM_P (recog_data.operand[i]))
15482 addr = XEXP (recog_data.operand[i], 0);
15489 return modified_in_p (addr, dep_insn);
15493 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
15495 enum attr_type insn_type, dep_insn_type;
15496 enum attr_memory memory;
15498 int dep_insn_code_number;
15500 /* Anti and output dependencies have zero cost on all CPUs. */
15501 if (REG_NOTE_KIND (link) != 0)
15504 dep_insn_code_number = recog_memoized (dep_insn);
15506 /* If we can't recognize the insns, we can't really do anything. */
15507 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
15510 insn_type = get_attr_type (insn);
15511 dep_insn_type = get_attr_type (dep_insn);
15515 case PROCESSOR_PENTIUM:
15516 /* Address Generation Interlock adds a cycle of latency. */
15517 if (ix86_agi_dependent (insn, dep_insn, insn_type))
15520 /* ??? Compares pair with jump/setcc. */
15521 if (ix86_flags_dependent (insn, dep_insn, insn_type))
15524 /* Floating point stores require value to be ready one cycle earlier. */
15525 if (insn_type == TYPE_FMOV
15526 && get_attr_memory (insn) == MEMORY_STORE
15527 && !ix86_agi_dependent (insn, dep_insn, insn_type))
15531 case PROCESSOR_PENTIUMPRO:
15532 memory = get_attr_memory (insn);
15534 /* INT->FP conversion is expensive. */
15535 if (get_attr_fp_int_src (dep_insn))
15538 /* There is one cycle extra latency between an FP op and a store. */
15539 if (insn_type == TYPE_FMOV
15540 && (set = single_set (dep_insn)) != NULL_RTX
15541 && (set2 = single_set (insn)) != NULL_RTX
15542 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
15543 && MEM_P (SET_DEST (set2)))
15546 /* Show ability of reorder buffer to hide latency of load by executing
15547 in parallel with previous instruction in case
15548 previous instruction is not needed to compute the address. */
15549 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
15550 && !ix86_agi_dependent (insn, dep_insn, insn_type))
15552 /* Claim moves to take one cycle, as core can issue one load
15553 at time and the next load can start cycle later. */
15554 if (dep_insn_type == TYPE_IMOV
15555 || dep_insn_type == TYPE_FMOV)
15563 memory = get_attr_memory (insn);
15565 /* The esp dependency is resolved before the instruction is really
15567 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
15568 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
15571 /* INT->FP conversion is expensive. */
15572 if (get_attr_fp_int_src (dep_insn))
15575 /* Show ability of reorder buffer to hide latency of load by executing
15576 in parallel with previous instruction in case
15577 previous instruction is not needed to compute the address. */
15578 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
15579 && !ix86_agi_dependent (insn, dep_insn, insn_type))
15581 /* Claim moves to take one cycle, as core can issue one load
15582 at time and the next load can start cycle later. */
15583 if (dep_insn_type == TYPE_IMOV
15584 || dep_insn_type == TYPE_FMOV)
15593 case PROCESSOR_ATHLON:
15595 case PROCESSOR_AMDFAM10:
15596 case PROCESSOR_GENERIC32:
15597 case PROCESSOR_GENERIC64:
15598 memory = get_attr_memory (insn);
15600 /* Show ability of reorder buffer to hide latency of load by executing
15601 in parallel with previous instruction in case
15602 previous instruction is not needed to compute the address. */
15603 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
15604 && !ix86_agi_dependent (insn, dep_insn, insn_type))
15606 enum attr_unit unit = get_attr_unit (insn);
15609 /* Because of the difference between the length of integer and
15610 floating unit pipeline preparation stages, the memory operands
15611 for floating point are cheaper.
15613 ??? For Athlon it the difference is most probably 2. */
15614 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
15617 loadcost = TARGET_ATHLON ? 2 : 0;
15619 if (cost >= loadcost)
15632 /* How many alternative schedules to try. This should be as wide as the
15633 scheduling freedom in the DFA, but no wider. Making this value too
15634 large results extra work for the scheduler. */
15637 ia32_multipass_dfa_lookahead (void)
15639 if (ix86_tune == PROCESSOR_PENTIUM)
15642 if (ix86_tune == PROCESSOR_PENTIUMPRO
15643 || ix86_tune == PROCESSOR_K6)
15651 /* Compute the alignment given to a constant that is being placed in memory.
15652 EXP is the constant and ALIGN is the alignment that the object would
15654 The value of this function is used instead of that alignment to align
15658 ix86_constant_alignment (tree exp, int align)
15660 if (TREE_CODE (exp) == REAL_CST)
15662 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
15664 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
15667 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
15668 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
15669 return BITS_PER_WORD;
15674 /* Compute the alignment for a static variable.
15675 TYPE is the data type, and ALIGN is the alignment that
15676 the object would ordinarily have. The value of this function is used
15677 instead of that alignment to align the object. */
15680 ix86_data_alignment (tree type, int align)
15682 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
15684 if (AGGREGATE_TYPE_P (type)
15685 && TYPE_SIZE (type)
15686 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
15687 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
15688 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
15689 && align < max_align)
15692 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
15693 to 16byte boundary. */
15696 if (AGGREGATE_TYPE_P (type)
15697 && TYPE_SIZE (type)
15698 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
15699 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
15700 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
15704 if (TREE_CODE (type) == ARRAY_TYPE)
15706 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
15708 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
15711 else if (TREE_CODE (type) == COMPLEX_TYPE)
15714 if (TYPE_MODE (type) == DCmode && align < 64)
15716 if (TYPE_MODE (type) == XCmode && align < 128)
15719 else if ((TREE_CODE (type) == RECORD_TYPE
15720 || TREE_CODE (type) == UNION_TYPE
15721 || TREE_CODE (type) == QUAL_UNION_TYPE)
15722 && TYPE_FIELDS (type))
15724 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
15726 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
15729 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
15730 || TREE_CODE (type) == INTEGER_TYPE)
15732 if (TYPE_MODE (type) == DFmode && align < 64)
15734 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
15741 /* Compute the alignment for a local variable.
15742 TYPE is the data type, and ALIGN is the alignment that
15743 the object would ordinarily have. The value of this macro is used
15744 instead of that alignment to align the object. */
15747 ix86_local_alignment (tree type, int align)
15749 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
15750 to 16byte boundary. */
15753 if (AGGREGATE_TYPE_P (type)
15754 && TYPE_SIZE (type)
15755 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
15756 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
15757 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
15760 if (TREE_CODE (type) == ARRAY_TYPE)
15762 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
15764 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
15767 else if (TREE_CODE (type) == COMPLEX_TYPE)
15769 if (TYPE_MODE (type) == DCmode && align < 64)
15771 if (TYPE_MODE (type) == XCmode && align < 128)
15774 else if ((TREE_CODE (type) == RECORD_TYPE
15775 || TREE_CODE (type) == UNION_TYPE
15776 || TREE_CODE (type) == QUAL_UNION_TYPE)
15777 && TYPE_FIELDS (type))
15779 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
15781 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
15784 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
15785 || TREE_CODE (type) == INTEGER_TYPE)
15788 if (TYPE_MODE (type) == DFmode && align < 64)
15790 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
15796 /* Emit RTL insns to initialize the variable parts of a trampoline.
15797 FNADDR is an RTX for the address of the function's pure code.
15798 CXT is an RTX for the static chain value for the function. */
15800 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
15804 /* Compute offset from the end of the jmp to the target function. */
15805 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
15806 plus_constant (tramp, 10),
15807 NULL_RTX, 1, OPTAB_DIRECT);
15808 emit_move_insn (gen_rtx_MEM (QImode, tramp),
15809 gen_int_mode (0xb9, QImode));
15810 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
15811 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
15812 gen_int_mode (0xe9, QImode));
15813 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
15818 /* Try to load address using shorter movl instead of movabs.
15819 We may want to support movq for kernel mode, but kernel does not use
15820 trampolines at the moment. */
15821 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
15823 fnaddr = copy_to_mode_reg (DImode, fnaddr);
15824 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15825 gen_int_mode (0xbb41, HImode));
15826 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
15827 gen_lowpart (SImode, fnaddr));
15832 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15833 gen_int_mode (0xbb49, HImode));
15834 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
15838 /* Load static chain using movabs to r10. */
15839 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15840 gen_int_mode (0xba49, HImode));
15841 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
15844 /* Jump to the r11 */
15845 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15846 gen_int_mode (0xff49, HImode));
15847 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
15848 gen_int_mode (0xe3, QImode));
15850 gcc_assert (offset <= TRAMPOLINE_SIZE);
15853 #ifdef ENABLE_EXECUTE_STACK
15854 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
15855 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
15859 /* Codes for all the SSE/MMX builtins. */
15862 IX86_BUILTIN_ADDPS,
15863 IX86_BUILTIN_ADDSS,
15864 IX86_BUILTIN_DIVPS,
15865 IX86_BUILTIN_DIVSS,
15866 IX86_BUILTIN_MULPS,
15867 IX86_BUILTIN_MULSS,
15868 IX86_BUILTIN_SUBPS,
15869 IX86_BUILTIN_SUBSS,
15871 IX86_BUILTIN_CMPEQPS,
15872 IX86_BUILTIN_CMPLTPS,
15873 IX86_BUILTIN_CMPLEPS,
15874 IX86_BUILTIN_CMPGTPS,
15875 IX86_BUILTIN_CMPGEPS,
15876 IX86_BUILTIN_CMPNEQPS,
15877 IX86_BUILTIN_CMPNLTPS,
15878 IX86_BUILTIN_CMPNLEPS,
15879 IX86_BUILTIN_CMPNGTPS,
15880 IX86_BUILTIN_CMPNGEPS,
15881 IX86_BUILTIN_CMPORDPS,
15882 IX86_BUILTIN_CMPUNORDPS,
15883 IX86_BUILTIN_CMPEQSS,
15884 IX86_BUILTIN_CMPLTSS,
15885 IX86_BUILTIN_CMPLESS,
15886 IX86_BUILTIN_CMPNEQSS,
15887 IX86_BUILTIN_CMPNLTSS,
15888 IX86_BUILTIN_CMPNLESS,
15889 IX86_BUILTIN_CMPNGTSS,
15890 IX86_BUILTIN_CMPNGESS,
15891 IX86_BUILTIN_CMPORDSS,
15892 IX86_BUILTIN_CMPUNORDSS,
15894 IX86_BUILTIN_COMIEQSS,
15895 IX86_BUILTIN_COMILTSS,
15896 IX86_BUILTIN_COMILESS,
15897 IX86_BUILTIN_COMIGTSS,
15898 IX86_BUILTIN_COMIGESS,
15899 IX86_BUILTIN_COMINEQSS,
15900 IX86_BUILTIN_UCOMIEQSS,
15901 IX86_BUILTIN_UCOMILTSS,
15902 IX86_BUILTIN_UCOMILESS,
15903 IX86_BUILTIN_UCOMIGTSS,
15904 IX86_BUILTIN_UCOMIGESS,
15905 IX86_BUILTIN_UCOMINEQSS,
15907 IX86_BUILTIN_CVTPI2PS,
15908 IX86_BUILTIN_CVTPS2PI,
15909 IX86_BUILTIN_CVTSI2SS,
15910 IX86_BUILTIN_CVTSI642SS,
15911 IX86_BUILTIN_CVTSS2SI,
15912 IX86_BUILTIN_CVTSS2SI64,
15913 IX86_BUILTIN_CVTTPS2PI,
15914 IX86_BUILTIN_CVTTSS2SI,
15915 IX86_BUILTIN_CVTTSS2SI64,
15917 IX86_BUILTIN_MAXPS,
15918 IX86_BUILTIN_MAXSS,
15919 IX86_BUILTIN_MINPS,
15920 IX86_BUILTIN_MINSS,
15922 IX86_BUILTIN_LOADUPS,
15923 IX86_BUILTIN_STOREUPS,
15924 IX86_BUILTIN_MOVSS,
15926 IX86_BUILTIN_MOVHLPS,
15927 IX86_BUILTIN_MOVLHPS,
15928 IX86_BUILTIN_LOADHPS,
15929 IX86_BUILTIN_LOADLPS,
15930 IX86_BUILTIN_STOREHPS,
15931 IX86_BUILTIN_STORELPS,
15933 IX86_BUILTIN_MASKMOVQ,
15934 IX86_BUILTIN_MOVMSKPS,
15935 IX86_BUILTIN_PMOVMSKB,
15937 IX86_BUILTIN_MOVNTPS,
15938 IX86_BUILTIN_MOVNTQ,
15940 IX86_BUILTIN_LOADDQU,
15941 IX86_BUILTIN_STOREDQU,
15943 IX86_BUILTIN_PACKSSWB,
15944 IX86_BUILTIN_PACKSSDW,
15945 IX86_BUILTIN_PACKUSWB,
15947 IX86_BUILTIN_PADDB,
15948 IX86_BUILTIN_PADDW,
15949 IX86_BUILTIN_PADDD,
15950 IX86_BUILTIN_PADDQ,
15951 IX86_BUILTIN_PADDSB,
15952 IX86_BUILTIN_PADDSW,
15953 IX86_BUILTIN_PADDUSB,
15954 IX86_BUILTIN_PADDUSW,
15955 IX86_BUILTIN_PSUBB,
15956 IX86_BUILTIN_PSUBW,
15957 IX86_BUILTIN_PSUBD,
15958 IX86_BUILTIN_PSUBQ,
15959 IX86_BUILTIN_PSUBSB,
15960 IX86_BUILTIN_PSUBSW,
15961 IX86_BUILTIN_PSUBUSB,
15962 IX86_BUILTIN_PSUBUSW,
15965 IX86_BUILTIN_PANDN,
15969 IX86_BUILTIN_PAVGB,
15970 IX86_BUILTIN_PAVGW,
15972 IX86_BUILTIN_PCMPEQB,
15973 IX86_BUILTIN_PCMPEQW,
15974 IX86_BUILTIN_PCMPEQD,
15975 IX86_BUILTIN_PCMPGTB,
15976 IX86_BUILTIN_PCMPGTW,
15977 IX86_BUILTIN_PCMPGTD,
15979 IX86_BUILTIN_PMADDWD,
15981 IX86_BUILTIN_PMAXSW,
15982 IX86_BUILTIN_PMAXUB,
15983 IX86_BUILTIN_PMINSW,
15984 IX86_BUILTIN_PMINUB,
15986 IX86_BUILTIN_PMULHUW,
15987 IX86_BUILTIN_PMULHW,
15988 IX86_BUILTIN_PMULLW,
15990 IX86_BUILTIN_PSADBW,
15991 IX86_BUILTIN_PSHUFW,
15993 IX86_BUILTIN_PSLLW,
15994 IX86_BUILTIN_PSLLD,
15995 IX86_BUILTIN_PSLLQ,
15996 IX86_BUILTIN_PSRAW,
15997 IX86_BUILTIN_PSRAD,
15998 IX86_BUILTIN_PSRLW,
15999 IX86_BUILTIN_PSRLD,
16000 IX86_BUILTIN_PSRLQ,
16001 IX86_BUILTIN_PSLLWI,
16002 IX86_BUILTIN_PSLLDI,
16003 IX86_BUILTIN_PSLLQI,
16004 IX86_BUILTIN_PSRAWI,
16005 IX86_BUILTIN_PSRADI,
16006 IX86_BUILTIN_PSRLWI,
16007 IX86_BUILTIN_PSRLDI,
16008 IX86_BUILTIN_PSRLQI,
16010 IX86_BUILTIN_PUNPCKHBW,
16011 IX86_BUILTIN_PUNPCKHWD,
16012 IX86_BUILTIN_PUNPCKHDQ,
16013 IX86_BUILTIN_PUNPCKLBW,
16014 IX86_BUILTIN_PUNPCKLWD,
16015 IX86_BUILTIN_PUNPCKLDQ,
16017 IX86_BUILTIN_SHUFPS,
16019 IX86_BUILTIN_RCPPS,
16020 IX86_BUILTIN_RCPSS,
16021 IX86_BUILTIN_RSQRTPS,
16022 IX86_BUILTIN_RSQRTSS,
16023 IX86_BUILTIN_SQRTPS,
16024 IX86_BUILTIN_SQRTSS,
16026 IX86_BUILTIN_UNPCKHPS,
16027 IX86_BUILTIN_UNPCKLPS,
16029 IX86_BUILTIN_ANDPS,
16030 IX86_BUILTIN_ANDNPS,
16032 IX86_BUILTIN_XORPS,
16035 IX86_BUILTIN_LDMXCSR,
16036 IX86_BUILTIN_STMXCSR,
16037 IX86_BUILTIN_SFENCE,
16039 /* 3DNow! Original */
16040 IX86_BUILTIN_FEMMS,
16041 IX86_BUILTIN_PAVGUSB,
16042 IX86_BUILTIN_PF2ID,
16043 IX86_BUILTIN_PFACC,
16044 IX86_BUILTIN_PFADD,
16045 IX86_BUILTIN_PFCMPEQ,
16046 IX86_BUILTIN_PFCMPGE,
16047 IX86_BUILTIN_PFCMPGT,
16048 IX86_BUILTIN_PFMAX,
16049 IX86_BUILTIN_PFMIN,
16050 IX86_BUILTIN_PFMUL,
16051 IX86_BUILTIN_PFRCP,
16052 IX86_BUILTIN_PFRCPIT1,
16053 IX86_BUILTIN_PFRCPIT2,
16054 IX86_BUILTIN_PFRSQIT1,
16055 IX86_BUILTIN_PFRSQRT,
16056 IX86_BUILTIN_PFSUB,
16057 IX86_BUILTIN_PFSUBR,
16058 IX86_BUILTIN_PI2FD,
16059 IX86_BUILTIN_PMULHRW,
16061 /* 3DNow! Athlon Extensions */
16062 IX86_BUILTIN_PF2IW,
16063 IX86_BUILTIN_PFNACC,
16064 IX86_BUILTIN_PFPNACC,
16065 IX86_BUILTIN_PI2FW,
16066 IX86_BUILTIN_PSWAPDSI,
16067 IX86_BUILTIN_PSWAPDSF,
16070 IX86_BUILTIN_ADDPD,
16071 IX86_BUILTIN_ADDSD,
16072 IX86_BUILTIN_DIVPD,
16073 IX86_BUILTIN_DIVSD,
16074 IX86_BUILTIN_MULPD,
16075 IX86_BUILTIN_MULSD,
16076 IX86_BUILTIN_SUBPD,
16077 IX86_BUILTIN_SUBSD,
16079 IX86_BUILTIN_CMPEQPD,
16080 IX86_BUILTIN_CMPLTPD,
16081 IX86_BUILTIN_CMPLEPD,
16082 IX86_BUILTIN_CMPGTPD,
16083 IX86_BUILTIN_CMPGEPD,
16084 IX86_BUILTIN_CMPNEQPD,
16085 IX86_BUILTIN_CMPNLTPD,
16086 IX86_BUILTIN_CMPNLEPD,
16087 IX86_BUILTIN_CMPNGTPD,
16088 IX86_BUILTIN_CMPNGEPD,
16089 IX86_BUILTIN_CMPORDPD,
16090 IX86_BUILTIN_CMPUNORDPD,
16091 IX86_BUILTIN_CMPEQSD,
16092 IX86_BUILTIN_CMPLTSD,
16093 IX86_BUILTIN_CMPLESD,
16094 IX86_BUILTIN_CMPNEQSD,
16095 IX86_BUILTIN_CMPNLTSD,
16096 IX86_BUILTIN_CMPNLESD,
16097 IX86_BUILTIN_CMPORDSD,
16098 IX86_BUILTIN_CMPUNORDSD,
16100 IX86_BUILTIN_COMIEQSD,
16101 IX86_BUILTIN_COMILTSD,
16102 IX86_BUILTIN_COMILESD,
16103 IX86_BUILTIN_COMIGTSD,
16104 IX86_BUILTIN_COMIGESD,
16105 IX86_BUILTIN_COMINEQSD,
16106 IX86_BUILTIN_UCOMIEQSD,
16107 IX86_BUILTIN_UCOMILTSD,
16108 IX86_BUILTIN_UCOMILESD,
16109 IX86_BUILTIN_UCOMIGTSD,
16110 IX86_BUILTIN_UCOMIGESD,
16111 IX86_BUILTIN_UCOMINEQSD,
16113 IX86_BUILTIN_MAXPD,
16114 IX86_BUILTIN_MAXSD,
16115 IX86_BUILTIN_MINPD,
16116 IX86_BUILTIN_MINSD,
16118 IX86_BUILTIN_ANDPD,
16119 IX86_BUILTIN_ANDNPD,
16121 IX86_BUILTIN_XORPD,
16123 IX86_BUILTIN_SQRTPD,
16124 IX86_BUILTIN_SQRTSD,
16126 IX86_BUILTIN_UNPCKHPD,
16127 IX86_BUILTIN_UNPCKLPD,
16129 IX86_BUILTIN_SHUFPD,
16131 IX86_BUILTIN_LOADUPD,
16132 IX86_BUILTIN_STOREUPD,
16133 IX86_BUILTIN_MOVSD,
16135 IX86_BUILTIN_LOADHPD,
16136 IX86_BUILTIN_LOADLPD,
16138 IX86_BUILTIN_CVTDQ2PD,
16139 IX86_BUILTIN_CVTDQ2PS,
16141 IX86_BUILTIN_CVTPD2DQ,
16142 IX86_BUILTIN_CVTPD2PI,
16143 IX86_BUILTIN_CVTPD2PS,
16144 IX86_BUILTIN_CVTTPD2DQ,
16145 IX86_BUILTIN_CVTTPD2PI,
16147 IX86_BUILTIN_CVTPI2PD,
16148 IX86_BUILTIN_CVTSI2SD,
16149 IX86_BUILTIN_CVTSI642SD,
16151 IX86_BUILTIN_CVTSD2SI,
16152 IX86_BUILTIN_CVTSD2SI64,
16153 IX86_BUILTIN_CVTSD2SS,
16154 IX86_BUILTIN_CVTSS2SD,
16155 IX86_BUILTIN_CVTTSD2SI,
16156 IX86_BUILTIN_CVTTSD2SI64,
16158 IX86_BUILTIN_CVTPS2DQ,
16159 IX86_BUILTIN_CVTPS2PD,
16160 IX86_BUILTIN_CVTTPS2DQ,
16162 IX86_BUILTIN_MOVNTI,
16163 IX86_BUILTIN_MOVNTPD,
16164 IX86_BUILTIN_MOVNTDQ,
16167 IX86_BUILTIN_MASKMOVDQU,
16168 IX86_BUILTIN_MOVMSKPD,
16169 IX86_BUILTIN_PMOVMSKB128,
16171 IX86_BUILTIN_PACKSSWB128,
16172 IX86_BUILTIN_PACKSSDW128,
16173 IX86_BUILTIN_PACKUSWB128,
16175 IX86_BUILTIN_PADDB128,
16176 IX86_BUILTIN_PADDW128,
16177 IX86_BUILTIN_PADDD128,
16178 IX86_BUILTIN_PADDQ128,
16179 IX86_BUILTIN_PADDSB128,
16180 IX86_BUILTIN_PADDSW128,
16181 IX86_BUILTIN_PADDUSB128,
16182 IX86_BUILTIN_PADDUSW128,
16183 IX86_BUILTIN_PSUBB128,
16184 IX86_BUILTIN_PSUBW128,
16185 IX86_BUILTIN_PSUBD128,
16186 IX86_BUILTIN_PSUBQ128,
16187 IX86_BUILTIN_PSUBSB128,
16188 IX86_BUILTIN_PSUBSW128,
16189 IX86_BUILTIN_PSUBUSB128,
16190 IX86_BUILTIN_PSUBUSW128,
16192 IX86_BUILTIN_PAND128,
16193 IX86_BUILTIN_PANDN128,
16194 IX86_BUILTIN_POR128,
16195 IX86_BUILTIN_PXOR128,
16197 IX86_BUILTIN_PAVGB128,
16198 IX86_BUILTIN_PAVGW128,
16200 IX86_BUILTIN_PCMPEQB128,
16201 IX86_BUILTIN_PCMPEQW128,
16202 IX86_BUILTIN_PCMPEQD128,
16203 IX86_BUILTIN_PCMPGTB128,
16204 IX86_BUILTIN_PCMPGTW128,
16205 IX86_BUILTIN_PCMPGTD128,
16207 IX86_BUILTIN_PMADDWD128,
16209 IX86_BUILTIN_PMAXSW128,
16210 IX86_BUILTIN_PMAXUB128,
16211 IX86_BUILTIN_PMINSW128,
16212 IX86_BUILTIN_PMINUB128,
16214 IX86_BUILTIN_PMULUDQ,
16215 IX86_BUILTIN_PMULUDQ128,
16216 IX86_BUILTIN_PMULHUW128,
16217 IX86_BUILTIN_PMULHW128,
16218 IX86_BUILTIN_PMULLW128,
16220 IX86_BUILTIN_PSADBW128,
16221 IX86_BUILTIN_PSHUFHW,
16222 IX86_BUILTIN_PSHUFLW,
16223 IX86_BUILTIN_PSHUFD,
16225 IX86_BUILTIN_PSLLDQI128,
16226 IX86_BUILTIN_PSLLWI128,
16227 IX86_BUILTIN_PSLLDI128,
16228 IX86_BUILTIN_PSLLQI128,
16229 IX86_BUILTIN_PSRAWI128,
16230 IX86_BUILTIN_PSRADI128,
16231 IX86_BUILTIN_PSRLDQI128,
16232 IX86_BUILTIN_PSRLWI128,
16233 IX86_BUILTIN_PSRLDI128,
16234 IX86_BUILTIN_PSRLQI128,
16236 IX86_BUILTIN_PSLLDQ128,
16237 IX86_BUILTIN_PSLLW128,
16238 IX86_BUILTIN_PSLLD128,
16239 IX86_BUILTIN_PSLLQ128,
16240 IX86_BUILTIN_PSRAW128,
16241 IX86_BUILTIN_PSRAD128,
16242 IX86_BUILTIN_PSRLW128,
16243 IX86_BUILTIN_PSRLD128,
16244 IX86_BUILTIN_PSRLQ128,
16246 IX86_BUILTIN_PUNPCKHBW128,
16247 IX86_BUILTIN_PUNPCKHWD128,
16248 IX86_BUILTIN_PUNPCKHDQ128,
16249 IX86_BUILTIN_PUNPCKHQDQ128,
16250 IX86_BUILTIN_PUNPCKLBW128,
16251 IX86_BUILTIN_PUNPCKLWD128,
16252 IX86_BUILTIN_PUNPCKLDQ128,
16253 IX86_BUILTIN_PUNPCKLQDQ128,
16255 IX86_BUILTIN_CLFLUSH,
16256 IX86_BUILTIN_MFENCE,
16257 IX86_BUILTIN_LFENCE,
16259 /* Prescott New Instructions. */
16260 IX86_BUILTIN_ADDSUBPS,
16261 IX86_BUILTIN_HADDPS,
16262 IX86_BUILTIN_HSUBPS,
16263 IX86_BUILTIN_MOVSHDUP,
16264 IX86_BUILTIN_MOVSLDUP,
16265 IX86_BUILTIN_ADDSUBPD,
16266 IX86_BUILTIN_HADDPD,
16267 IX86_BUILTIN_HSUBPD,
16268 IX86_BUILTIN_LDDQU,
16270 IX86_BUILTIN_MONITOR,
16271 IX86_BUILTIN_MWAIT,
16274 IX86_BUILTIN_PHADDW,
16275 IX86_BUILTIN_PHADDD,
16276 IX86_BUILTIN_PHADDSW,
16277 IX86_BUILTIN_PHSUBW,
16278 IX86_BUILTIN_PHSUBD,
16279 IX86_BUILTIN_PHSUBSW,
16280 IX86_BUILTIN_PMADDUBSW,
16281 IX86_BUILTIN_PMULHRSW,
16282 IX86_BUILTIN_PSHUFB,
16283 IX86_BUILTIN_PSIGNB,
16284 IX86_BUILTIN_PSIGNW,
16285 IX86_BUILTIN_PSIGND,
16286 IX86_BUILTIN_PALIGNR,
16287 IX86_BUILTIN_PABSB,
16288 IX86_BUILTIN_PABSW,
16289 IX86_BUILTIN_PABSD,
16291 IX86_BUILTIN_PHADDW128,
16292 IX86_BUILTIN_PHADDD128,
16293 IX86_BUILTIN_PHADDSW128,
16294 IX86_BUILTIN_PHSUBW128,
16295 IX86_BUILTIN_PHSUBD128,
16296 IX86_BUILTIN_PHSUBSW128,
16297 IX86_BUILTIN_PMADDUBSW128,
16298 IX86_BUILTIN_PMULHRSW128,
16299 IX86_BUILTIN_PSHUFB128,
16300 IX86_BUILTIN_PSIGNB128,
16301 IX86_BUILTIN_PSIGNW128,
16302 IX86_BUILTIN_PSIGND128,
16303 IX86_BUILTIN_PALIGNR128,
16304 IX86_BUILTIN_PABSB128,
16305 IX86_BUILTIN_PABSW128,
16306 IX86_BUILTIN_PABSD128,
16308 /* AMDFAM10 - SSE4A New Instructions. */
16309 IX86_BUILTIN_MOVNTSD,
16310 IX86_BUILTIN_MOVNTSS,
16311 IX86_BUILTIN_EXTRQI,
16312 IX86_BUILTIN_EXTRQ,
16313 IX86_BUILTIN_INSERTQI,
16314 IX86_BUILTIN_INSERTQ,
16316 IX86_BUILTIN_VEC_INIT_V2SI,
16317 IX86_BUILTIN_VEC_INIT_V4HI,
16318 IX86_BUILTIN_VEC_INIT_V8QI,
16319 IX86_BUILTIN_VEC_EXT_V2DF,
16320 IX86_BUILTIN_VEC_EXT_V2DI,
16321 IX86_BUILTIN_VEC_EXT_V4SF,
16322 IX86_BUILTIN_VEC_EXT_V4SI,
16323 IX86_BUILTIN_VEC_EXT_V8HI,
16324 IX86_BUILTIN_VEC_EXT_V2SI,
16325 IX86_BUILTIN_VEC_EXT_V4HI,
16326 IX86_BUILTIN_VEC_SET_V8HI,
16327 IX86_BUILTIN_VEC_SET_V4HI,
16332 /* Table for the ix86 builtin decls. */
16333 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
16335 /* Add a ix86 target builtin function with CODE, NAME and TYPE. Do so,
16336 * if the target_flags include one of MASK. Stores the function decl
16337 * in the ix86_builtins array.
16338 * Returns the function decl or NULL_TREE, if the builtin was not added. */
16341 def_builtin (int mask, const char *name, tree type, enum ix86_builtins code)
16343 tree decl = NULL_TREE;
16345 if (mask & target_flags
16346 && (!(mask & MASK_64BIT) || TARGET_64BIT))
16348 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
16350 ix86_builtins[(int) code] = decl;
16356 /* Like def_builtin, but also marks the function decl "const". */
16359 def_builtin_const (int mask, const char *name, tree type,
16360 enum ix86_builtins code)
16362 tree decl = def_builtin (mask, name, type, code);
16364 TREE_READONLY (decl) = 1;
16368 /* Bits for builtin_description.flag. */
16370 /* Set when we don't support the comparison natively, and should
16371 swap_comparison in order to support it. */
16372 #define BUILTIN_DESC_SWAP_OPERANDS 1
16374 struct builtin_description
16376 const unsigned int mask;
16377 const enum insn_code icode;
16378 const char *const name;
16379 const enum ix86_builtins code;
16380 const enum rtx_code comparison;
16381 const unsigned int flag;
16384 static const struct builtin_description bdesc_comi[] =
16386 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
16387 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
16388 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
16389 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
16390 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
16391 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
16392 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
16393 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
16394 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
16395 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
16396 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
16397 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
16398 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
16399 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
16400 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
16401 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
16402 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
16403 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
16404 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
16405 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
16406 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
16407 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
16408 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
16409 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
16412 static const struct builtin_description bdesc_2arg[] =
16415 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
16416 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
16417 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
16418 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
16419 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
16420 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
16421 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
16422 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
16424 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
16425 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
16426 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
16427 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
16428 BUILTIN_DESC_SWAP_OPERANDS },
16429 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
16430 BUILTIN_DESC_SWAP_OPERANDS },
16431 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
16432 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
16433 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
16434 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
16435 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
16436 BUILTIN_DESC_SWAP_OPERANDS },
16437 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
16438 BUILTIN_DESC_SWAP_OPERANDS },
16439 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
16440 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
16441 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
16442 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
16443 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
16444 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
16445 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
16446 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
16447 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
16448 BUILTIN_DESC_SWAP_OPERANDS },
16449 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
16450 BUILTIN_DESC_SWAP_OPERANDS },
16451 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
16453 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
16454 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
16455 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
16456 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
16458 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
16459 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
16460 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
16461 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
16463 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
16464 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
16465 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
16466 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
16467 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
16470 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
16471 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
16472 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
16473 { MASK_SSE2, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
16474 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
16475 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
16476 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
16477 { MASK_SSE2, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
16479 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
16480 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
16481 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
16482 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
16483 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
16484 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
16485 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
16486 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
16488 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
16489 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
16490 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
16492 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
16493 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
16494 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
16495 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
16497 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
16498 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
16500 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
16501 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
16502 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
16503 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
16504 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
16505 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
16507 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
16508 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
16509 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
16510 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
16512 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
16513 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
16514 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
16515 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
16516 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
16517 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
16520 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
16521 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
16522 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
16524 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
16525 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
16526 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
16528 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
16529 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
16530 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
16531 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
16532 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
16533 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
16535 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
16536 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
16537 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
16538 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
16539 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
16540 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
16542 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
16543 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
16544 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
16545 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
16547 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
16548 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
16551 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
16552 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
16553 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
16554 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
16555 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
16556 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
16557 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
16558 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
16560 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
16561 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
16562 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
16563 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
16564 BUILTIN_DESC_SWAP_OPERANDS },
16565 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
16566 BUILTIN_DESC_SWAP_OPERANDS },
16567 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
16568 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
16569 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
16570 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
16571 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
16572 BUILTIN_DESC_SWAP_OPERANDS },
16573 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
16574 BUILTIN_DESC_SWAP_OPERANDS },
16575 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
16576 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
16577 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
16578 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
16579 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
16580 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
16581 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
16582 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
16583 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
16585 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
16586 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
16587 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
16588 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
16590 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
16591 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
16592 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
16593 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
16595 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
16596 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
16597 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
16600 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
16601 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
16602 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
16603 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
16604 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
16605 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
16606 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
16607 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
16609 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
16610 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
16611 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
16612 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
16613 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
16614 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
16615 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
16616 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
16618 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
16619 { MASK_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
16621 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
16622 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
16623 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
16624 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
16626 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
16627 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
16629 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
16630 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
16631 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
16632 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
16633 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
16634 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
16636 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
16637 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
16638 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
16639 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
16641 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
16642 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
16643 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
16644 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
16645 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
16646 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
16647 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
16648 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
16650 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
16651 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
16652 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
16654 { MASK_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
16655 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
16657 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
16658 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
16660 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
16661 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
16662 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
16664 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
16665 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
16666 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
16668 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
16669 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
16671 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
16673 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
16674 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
16675 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
16676 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
16679 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
16680 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
16681 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
16682 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
16683 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
16684 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 },
16687 { MASK_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, 0, 0 },
16688 { MASK_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, 0, 0 },
16689 { MASK_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, 0, 0 },
16690 { MASK_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, 0, 0 },
16691 { MASK_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, 0, 0 },
16692 { MASK_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, 0, 0 },
16693 { MASK_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, 0, 0 },
16694 { MASK_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, 0, 0 },
16695 { MASK_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, 0, 0 },
16696 { MASK_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, 0, 0 },
16697 { MASK_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, 0, 0 },
16698 { MASK_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, 0, 0 },
16699 { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv8hi3, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, 0, 0 },
16700 { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv4hi3, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, 0, 0 },
16701 { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, 0, 0 },
16702 { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, 0, 0 },
16703 { MASK_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, 0, 0 },
16704 { MASK_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, 0, 0 },
16705 { MASK_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, 0, 0 },
16706 { MASK_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, 0, 0 },
16707 { MASK_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, 0, 0 },
16708 { MASK_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, 0, 0 },
16709 { MASK_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, 0, 0 },
16710 { MASK_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, 0, 0 }
16713 static const struct builtin_description bdesc_1arg[] =
16715 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
16716 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
16718 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
16719 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
16720 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
16722 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
16723 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
16724 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
16725 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
16726 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
16727 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
16729 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
16730 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
16732 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
16734 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
16735 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
16737 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
16738 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
16739 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
16740 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
16741 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
16743 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
16745 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
16746 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
16747 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
16748 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
16750 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
16751 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
16752 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
16755 { MASK_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, 0, 0 },
16756 { MASK_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, 0, 0 },
16759 { MASK_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, 0, 0 },
16760 { MASK_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, 0, 0 },
16761 { MASK_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, 0, 0 },
16762 { MASK_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, 0, 0 },
16763 { MASK_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, 0, 0 },
16764 { MASK_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, 0, 0 },
16767 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
16768 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
16771 ix86_init_mmx_sse_builtins (void)
16773 const struct builtin_description * d;
16776 tree V16QI_type_node = build_vector_type_for_mode (char_type_node, V16QImode);
16777 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
16778 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
16779 tree V2DI_type_node
16780 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
16781 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
16782 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
16783 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
16784 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
16785 tree V8QI_type_node = build_vector_type_for_mode (char_type_node, V8QImode);
16786 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
16788 tree pchar_type_node = build_pointer_type (char_type_node);
16789 tree pcchar_type_node = build_pointer_type (
16790 build_type_variant (char_type_node, 1, 0));
16791 tree pfloat_type_node = build_pointer_type (float_type_node);
16792 tree pcfloat_type_node = build_pointer_type (
16793 build_type_variant (float_type_node, 1, 0));
16794 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
16795 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
16796 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
16799 tree int_ftype_v4sf_v4sf
16800 = build_function_type_list (integer_type_node,
16801 V4SF_type_node, V4SF_type_node, NULL_TREE);
16802 tree v4si_ftype_v4sf_v4sf
16803 = build_function_type_list (V4SI_type_node,
16804 V4SF_type_node, V4SF_type_node, NULL_TREE);
16805 /* MMX/SSE/integer conversions. */
16806 tree int_ftype_v4sf
16807 = build_function_type_list (integer_type_node,
16808 V4SF_type_node, NULL_TREE);
16809 tree int64_ftype_v4sf
16810 = build_function_type_list (long_long_integer_type_node,
16811 V4SF_type_node, NULL_TREE);
16812 tree int_ftype_v8qi
16813 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
16814 tree v4sf_ftype_v4sf_int
16815 = build_function_type_list (V4SF_type_node,
16816 V4SF_type_node, integer_type_node, NULL_TREE);
16817 tree v4sf_ftype_v4sf_int64
16818 = build_function_type_list (V4SF_type_node,
16819 V4SF_type_node, long_long_integer_type_node,
16821 tree v4sf_ftype_v4sf_v2si
16822 = build_function_type_list (V4SF_type_node,
16823 V4SF_type_node, V2SI_type_node, NULL_TREE);
16825 /* Miscellaneous. */
16826 tree v8qi_ftype_v4hi_v4hi
16827 = build_function_type_list (V8QI_type_node,
16828 V4HI_type_node, V4HI_type_node, NULL_TREE);
16829 tree v4hi_ftype_v2si_v2si
16830 = build_function_type_list (V4HI_type_node,
16831 V2SI_type_node, V2SI_type_node, NULL_TREE);
16832 tree v4sf_ftype_v4sf_v4sf_int
16833 = build_function_type_list (V4SF_type_node,
16834 V4SF_type_node, V4SF_type_node,
16835 integer_type_node, NULL_TREE);
16836 tree v2si_ftype_v4hi_v4hi
16837 = build_function_type_list (V2SI_type_node,
16838 V4HI_type_node, V4HI_type_node, NULL_TREE);
16839 tree v4hi_ftype_v4hi_int
16840 = build_function_type_list (V4HI_type_node,
16841 V4HI_type_node, integer_type_node, NULL_TREE);
16842 tree v4hi_ftype_v4hi_di
16843 = build_function_type_list (V4HI_type_node,
16844 V4HI_type_node, long_long_unsigned_type_node,
16846 tree v2si_ftype_v2si_di
16847 = build_function_type_list (V2SI_type_node,
16848 V2SI_type_node, long_long_unsigned_type_node,
16850 tree void_ftype_void
16851 = build_function_type (void_type_node, void_list_node);
16852 tree void_ftype_unsigned
16853 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
16854 tree void_ftype_unsigned_unsigned
16855 = build_function_type_list (void_type_node, unsigned_type_node,
16856 unsigned_type_node, NULL_TREE);
16857 tree void_ftype_pcvoid_unsigned_unsigned
16858 = build_function_type_list (void_type_node, const_ptr_type_node,
16859 unsigned_type_node, unsigned_type_node,
16861 tree unsigned_ftype_void
16862 = build_function_type (unsigned_type_node, void_list_node);
16863 tree v2si_ftype_v4sf
16864 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
16865 /* Loads/stores. */
16866 tree void_ftype_v8qi_v8qi_pchar
16867 = build_function_type_list (void_type_node,
16868 V8QI_type_node, V8QI_type_node,
16869 pchar_type_node, NULL_TREE);
16870 tree v4sf_ftype_pcfloat
16871 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
16872 /* @@@ the type is bogus */
16873 tree v4sf_ftype_v4sf_pv2si
16874 = build_function_type_list (V4SF_type_node,
16875 V4SF_type_node, pv2si_type_node, NULL_TREE);
16876 tree void_ftype_pv2si_v4sf
16877 = build_function_type_list (void_type_node,
16878 pv2si_type_node, V4SF_type_node, NULL_TREE);
16879 tree void_ftype_pfloat_v4sf
16880 = build_function_type_list (void_type_node,
16881 pfloat_type_node, V4SF_type_node, NULL_TREE);
16882 tree void_ftype_pdi_di
16883 = build_function_type_list (void_type_node,
16884 pdi_type_node, long_long_unsigned_type_node,
16886 tree void_ftype_pv2di_v2di
16887 = build_function_type_list (void_type_node,
16888 pv2di_type_node, V2DI_type_node, NULL_TREE);
16889 /* Normal vector unops. */
16890 tree v4sf_ftype_v4sf
16891 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16892 tree v16qi_ftype_v16qi
16893 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16894 tree v8hi_ftype_v8hi
16895 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16896 tree v4si_ftype_v4si
16897 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16898 tree v8qi_ftype_v8qi
16899 = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
16900 tree v4hi_ftype_v4hi
16901 = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
16903 /* Normal vector binops. */
16904 tree v4sf_ftype_v4sf_v4sf
16905 = build_function_type_list (V4SF_type_node,
16906 V4SF_type_node, V4SF_type_node, NULL_TREE);
16907 tree v8qi_ftype_v8qi_v8qi
16908 = build_function_type_list (V8QI_type_node,
16909 V8QI_type_node, V8QI_type_node, NULL_TREE);
16910 tree v4hi_ftype_v4hi_v4hi
16911 = build_function_type_list (V4HI_type_node,
16912 V4HI_type_node, V4HI_type_node, NULL_TREE);
16913 tree v2si_ftype_v2si_v2si
16914 = build_function_type_list (V2SI_type_node,
16915 V2SI_type_node, V2SI_type_node, NULL_TREE);
16916 tree di_ftype_di_di
16917 = build_function_type_list (long_long_unsigned_type_node,
16918 long_long_unsigned_type_node,
16919 long_long_unsigned_type_node, NULL_TREE);
16921 tree di_ftype_di_di_int
16922 = build_function_type_list (long_long_unsigned_type_node,
16923 long_long_unsigned_type_node,
16924 long_long_unsigned_type_node,
16925 integer_type_node, NULL_TREE);
16927 tree v2si_ftype_v2sf
16928 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
16929 tree v2sf_ftype_v2si
16930 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
16931 tree v2si_ftype_v2si
16932 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
16933 tree v2sf_ftype_v2sf
16934 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
16935 tree v2sf_ftype_v2sf_v2sf
16936 = build_function_type_list (V2SF_type_node,
16937 V2SF_type_node, V2SF_type_node, NULL_TREE);
16938 tree v2si_ftype_v2sf_v2sf
16939 = build_function_type_list (V2SI_type_node,
16940 V2SF_type_node, V2SF_type_node, NULL_TREE);
16941 tree pint_type_node = build_pointer_type (integer_type_node);
16942 tree pdouble_type_node = build_pointer_type (double_type_node);
16943 tree pcdouble_type_node = build_pointer_type (
16944 build_type_variant (double_type_node, 1, 0));
16945 tree int_ftype_v2df_v2df
16946 = build_function_type_list (integer_type_node,
16947 V2DF_type_node, V2DF_type_node, NULL_TREE);
16949 tree void_ftype_pcvoid
16950 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
16951 tree v4sf_ftype_v4si
16952 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
16953 tree v4si_ftype_v4sf
16954 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
16955 tree v2df_ftype_v4si
16956 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
16957 tree v4si_ftype_v2df
16958 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
16959 tree v2si_ftype_v2df
16960 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
16961 tree v4sf_ftype_v2df
16962 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
16963 tree v2df_ftype_v2si
16964 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
16965 tree v2df_ftype_v4sf
16966 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
16967 tree int_ftype_v2df
16968 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
16969 tree int64_ftype_v2df
16970 = build_function_type_list (long_long_integer_type_node,
16971 V2DF_type_node, NULL_TREE);
16972 tree v2df_ftype_v2df_int
16973 = build_function_type_list (V2DF_type_node,
16974 V2DF_type_node, integer_type_node, NULL_TREE);
16975 tree v2df_ftype_v2df_int64
16976 = build_function_type_list (V2DF_type_node,
16977 V2DF_type_node, long_long_integer_type_node,
16979 tree v4sf_ftype_v4sf_v2df
16980 = build_function_type_list (V4SF_type_node,
16981 V4SF_type_node, V2DF_type_node, NULL_TREE);
16982 tree v2df_ftype_v2df_v4sf
16983 = build_function_type_list (V2DF_type_node,
16984 V2DF_type_node, V4SF_type_node, NULL_TREE);
16985 tree v2df_ftype_v2df_v2df_int
16986 = build_function_type_list (V2DF_type_node,
16987 V2DF_type_node, V2DF_type_node,
16990 tree v2df_ftype_v2df_pcdouble
16991 = build_function_type_list (V2DF_type_node,
16992 V2DF_type_node, pcdouble_type_node, NULL_TREE);
16993 tree void_ftype_pdouble_v2df
16994 = build_function_type_list (void_type_node,
16995 pdouble_type_node, V2DF_type_node, NULL_TREE);
16996 tree void_ftype_pint_int
16997 = build_function_type_list (void_type_node,
16998 pint_type_node, integer_type_node, NULL_TREE);
16999 tree void_ftype_v16qi_v16qi_pchar
17000 = build_function_type_list (void_type_node,
17001 V16QI_type_node, V16QI_type_node,
17002 pchar_type_node, NULL_TREE);
17003 tree v2df_ftype_pcdouble
17004 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
17005 tree v2df_ftype_v2df_v2df
17006 = build_function_type_list (V2DF_type_node,
17007 V2DF_type_node, V2DF_type_node, NULL_TREE);
17008 tree v16qi_ftype_v16qi_v16qi
17009 = build_function_type_list (V16QI_type_node,
17010 V16QI_type_node, V16QI_type_node, NULL_TREE);
17011 tree v8hi_ftype_v8hi_v8hi
17012 = build_function_type_list (V8HI_type_node,
17013 V8HI_type_node, V8HI_type_node, NULL_TREE);
17014 tree v4si_ftype_v4si_v4si
17015 = build_function_type_list (V4SI_type_node,
17016 V4SI_type_node, V4SI_type_node, NULL_TREE);
17017 tree v2di_ftype_v2di_v2di
17018 = build_function_type_list (V2DI_type_node,
17019 V2DI_type_node, V2DI_type_node, NULL_TREE);
17020 tree v2di_ftype_v2df_v2df
17021 = build_function_type_list (V2DI_type_node,
17022 V2DF_type_node, V2DF_type_node, NULL_TREE);
17023 tree v2df_ftype_v2df
17024 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
17025 tree v2di_ftype_v2di_int
17026 = build_function_type_list (V2DI_type_node,
17027 V2DI_type_node, integer_type_node, NULL_TREE);
17028 tree v2di_ftype_v2di_v2di_int
17029 = build_function_type_list (V2DI_type_node, V2DI_type_node,
17030 V2DI_type_node, integer_type_node, NULL_TREE);
17031 tree v4si_ftype_v4si_int
17032 = build_function_type_list (V4SI_type_node,
17033 V4SI_type_node, integer_type_node, NULL_TREE);
17034 tree v8hi_ftype_v8hi_int
17035 = build_function_type_list (V8HI_type_node,
17036 V8HI_type_node, integer_type_node, NULL_TREE);
17037 tree v4si_ftype_v8hi_v8hi
17038 = build_function_type_list (V4SI_type_node,
17039 V8HI_type_node, V8HI_type_node, NULL_TREE);
17040 tree di_ftype_v8qi_v8qi
17041 = build_function_type_list (long_long_unsigned_type_node,
17042 V8QI_type_node, V8QI_type_node, NULL_TREE);
17043 tree di_ftype_v2si_v2si
17044 = build_function_type_list (long_long_unsigned_type_node,
17045 V2SI_type_node, V2SI_type_node, NULL_TREE);
17046 tree v2di_ftype_v16qi_v16qi
17047 = build_function_type_list (V2DI_type_node,
17048 V16QI_type_node, V16QI_type_node, NULL_TREE);
17049 tree v2di_ftype_v4si_v4si
17050 = build_function_type_list (V2DI_type_node,
17051 V4SI_type_node, V4SI_type_node, NULL_TREE);
17052 tree int_ftype_v16qi
17053 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
17054 tree v16qi_ftype_pcchar
17055 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
17056 tree void_ftype_pchar_v16qi
17057 = build_function_type_list (void_type_node,
17058 pchar_type_node, V16QI_type_node, NULL_TREE);
17060 tree v2di_ftype_v2di_unsigned_unsigned
17061 = build_function_type_list (V2DI_type_node, V2DI_type_node,
17062 unsigned_type_node, unsigned_type_node,
17064 tree v2di_ftype_v2di_v2di_unsigned_unsigned
17065 = build_function_type_list (V2DI_type_node, V2DI_type_node, V2DI_type_node,
17066 unsigned_type_node, unsigned_type_node,
17068 tree v2di_ftype_v2di_v16qi
17069 = build_function_type_list (V2DI_type_node, V2DI_type_node, V16QI_type_node,
17073 tree float128_type;
17076 /* The __float80 type. */
17077 if (TYPE_MODE (long_double_type_node) == XFmode)
17078 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
17082 /* The __float80 type. */
17083 float80_type = make_node (REAL_TYPE);
17084 TYPE_PRECISION (float80_type) = 80;
17085 layout_type (float80_type);
17086 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
17091 float128_type = make_node (REAL_TYPE);
17092 TYPE_PRECISION (float128_type) = 128;
17093 layout_type (float128_type);
17094 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
17097 /* Add all builtins that are more or less simple operations on two
17099 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17101 /* Use one of the operands; the target can have a different mode for
17102 mask-generating compares. */
17103 enum machine_mode mode;
17108 mode = insn_data[d->icode].operand[1].mode;
17113 type = v16qi_ftype_v16qi_v16qi;
17116 type = v8hi_ftype_v8hi_v8hi;
17119 type = v4si_ftype_v4si_v4si;
17122 type = v2di_ftype_v2di_v2di;
17125 type = v2df_ftype_v2df_v2df;
17128 type = v4sf_ftype_v4sf_v4sf;
17131 type = v8qi_ftype_v8qi_v8qi;
17134 type = v4hi_ftype_v4hi_v4hi;
17137 type = v2si_ftype_v2si_v2si;
17140 type = di_ftype_di_di;
17144 gcc_unreachable ();
17147 /* Override for comparisons. */
17148 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
17149 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
17150 type = v4si_ftype_v4sf_v4sf;
17152 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
17153 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
17154 type = v2di_ftype_v2df_v2df;
17156 def_builtin (d->mask, d->name, type, d->code);
17159 /* Add all builtins that are more or less simple operations on 1 operand. */
17160 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17162 enum machine_mode mode;
17167 mode = insn_data[d->icode].operand[1].mode;
17172 type = v16qi_ftype_v16qi;
17175 type = v8hi_ftype_v8hi;
17178 type = v4si_ftype_v4si;
17181 type = v2df_ftype_v2df;
17184 type = v4sf_ftype_v4sf;
17187 type = v8qi_ftype_v8qi;
17190 type = v4hi_ftype_v4hi;
17193 type = v2si_ftype_v2si;
17200 def_builtin (d->mask, d->name, type, d->code);
17203 /* Add the remaining MMX insns with somewhat more complicated types. */
17204 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
17205 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
17206 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
17207 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
17209 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
17210 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
17211 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
17213 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
17214 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
17216 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
17217 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
17219 /* comi/ucomi insns. */
17220 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
17221 if (d->mask == MASK_SSE2)
17222 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
17224 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
17226 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
17227 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
17228 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
17230 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
17231 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
17232 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
17233 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
17234 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
17235 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
17236 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
17237 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
17238 def_builtin_const (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
17239 def_builtin_const (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
17240 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
17242 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
17244 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
17245 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
17247 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
17248 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
17249 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
17250 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
17252 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
17253 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
17254 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
17255 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
17257 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
17259 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
17261 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
17262 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
17263 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
17264 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
17265 def_builtin_const (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
17266 def_builtin_const (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
17268 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
17270 /* Original 3DNow! */
17271 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
17272 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
17273 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
17274 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
17275 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
17276 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
17277 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
17278 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
17279 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
17280 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
17281 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
17282 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
17283 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
17284 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
17285 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
17286 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
17287 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
17288 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
17289 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
17290 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
17292 /* 3DNow! extension as used in the Athlon CPU. */
17293 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
17294 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
17295 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
17296 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
17297 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
17298 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
17301 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
17303 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
17304 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
17306 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
17307 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
17309 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
17310 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
17311 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
17312 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
17313 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
17315 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
17316 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
17317 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
17318 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
17320 def_builtin_const (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
17321 def_builtin_const (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
17323 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
17325 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
17326 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
17328 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
17329 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
17330 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
17331 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
17332 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
17334 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
17336 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
17337 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
17338 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
17339 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
17341 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
17342 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
17343 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
17345 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
17346 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
17347 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
17348 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
17350 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
17351 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
17352 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
17354 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
17355 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
17357 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
17358 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
17360 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
17361 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
17362 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
17363 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
17364 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v8hi, IX86_BUILTIN_PSLLW128);
17365 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v4si, IX86_BUILTIN_PSLLD128);
17366 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
17368 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
17369 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
17370 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
17371 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
17372 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v8hi, IX86_BUILTIN_PSRLW128);
17373 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v4si, IX86_BUILTIN_PSRLD128);
17374 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
17376 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
17377 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
17378 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v8hi, IX86_BUILTIN_PSRAW128);
17379 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v4si, IX86_BUILTIN_PSRAD128);
17381 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
17383 /* Prescott New Instructions. */
17384 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
17385 void_ftype_pcvoid_unsigned_unsigned,
17386 IX86_BUILTIN_MONITOR);
17387 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
17388 void_ftype_unsigned_unsigned,
17389 IX86_BUILTIN_MWAIT);
17390 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
17391 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
17394 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr128",
17395 v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PALIGNR128);
17396 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr", di_ftype_di_di_int,
17397 IX86_BUILTIN_PALIGNR);
17399 /* AMDFAM10 SSE4A New built-ins */
17400 def_builtin (MASK_SSE4A, "__builtin_ia32_movntsd",
17401 void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTSD);
17402 def_builtin (MASK_SSE4A, "__builtin_ia32_movntss",
17403 void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTSS);
17404 def_builtin (MASK_SSE4A, "__builtin_ia32_extrqi",
17405 v2di_ftype_v2di_unsigned_unsigned, IX86_BUILTIN_EXTRQI);
17406 def_builtin (MASK_SSE4A, "__builtin_ia32_extrq",
17407 v2di_ftype_v2di_v16qi, IX86_BUILTIN_EXTRQ);
17408 def_builtin (MASK_SSE4A, "__builtin_ia32_insertqi",
17409 v2di_ftype_v2di_v2di_unsigned_unsigned, IX86_BUILTIN_INSERTQI);
17410 def_builtin (MASK_SSE4A, "__builtin_ia32_insertq",
17411 v2di_ftype_v2di_v2di, IX86_BUILTIN_INSERTQ);
17413 /* Access to the vec_init patterns. */
17414 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
17415 integer_type_node, NULL_TREE);
17416 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
17417 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
17419 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
17420 short_integer_type_node,
17421 short_integer_type_node,
17422 short_integer_type_node, NULL_TREE);
17423 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
17424 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
17426 ftype = build_function_type_list (V8QI_type_node, char_type_node,
17427 char_type_node, char_type_node,
17428 char_type_node, char_type_node,
17429 char_type_node, char_type_node,
17430 char_type_node, NULL_TREE);
17431 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
17432 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
17434 /* Access to the vec_extract patterns. */
17435 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17436 integer_type_node, NULL_TREE);
17437 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
17438 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
17440 ftype = build_function_type_list (long_long_integer_type_node,
17441 V2DI_type_node, integer_type_node,
17443 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
17444 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
17446 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17447 integer_type_node, NULL_TREE);
17448 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
17449 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
17451 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17452 integer_type_node, NULL_TREE);
17453 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4si",
17454 ftype, IX86_BUILTIN_VEC_EXT_V4SI);
17456 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17457 integer_type_node, NULL_TREE);
17458 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
17459 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
17461 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
17462 integer_type_node, NULL_TREE);
17463 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
17464 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
17466 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
17467 integer_type_node, NULL_TREE);
17468 def_builtin (MASK_MMX, "__builtin_ia32_vec_ext_v2si",
17469 ftype, IX86_BUILTIN_VEC_EXT_V2SI);
17471 /* Access to the vec_set patterns. */
17472 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17474 integer_type_node, NULL_TREE);
17475 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
17476 ftype, IX86_BUILTIN_VEC_SET_V8HI);
17478 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
17480 integer_type_node, NULL_TREE);
17481 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
17482 ftype, IX86_BUILTIN_VEC_SET_V4HI);
17486 ix86_init_builtins (void)
17489 ix86_init_mmx_sse_builtins ();
17492 /* Errors in the source file can cause expand_expr to return const0_rtx
17493 where we expect a vector. To avoid crashing, use one of the vector
17494 clear instructions. */
17496 safe_vector_operand (rtx x, enum machine_mode mode)
17498 if (x == const0_rtx)
17499 x = CONST0_RTX (mode);
17503 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
17506 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
17509 tree arg0 = CALL_EXPR_ARG (exp, 0);
17510 tree arg1 = CALL_EXPR_ARG (exp, 1);
17511 rtx op0 = expand_normal (arg0);
17512 rtx op1 = expand_normal (arg1);
17513 enum machine_mode tmode = insn_data[icode].operand[0].mode;
17514 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
17515 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
17517 if (VECTOR_MODE_P (mode0))
17518 op0 = safe_vector_operand (op0, mode0);
17519 if (VECTOR_MODE_P (mode1))
17520 op1 = safe_vector_operand (op1, mode1);
17522 if (optimize || !target
17523 || GET_MODE (target) != tmode
17524 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17525 target = gen_reg_rtx (tmode);
17527 if (GET_MODE (op1) == SImode && mode1 == TImode)
17529 rtx x = gen_reg_rtx (V4SImode);
17530 emit_insn (gen_sse2_loadd (x, op1));
17531 op1 = gen_lowpart (TImode, x);
17534 /* The insn must want input operands in the same modes as the
17536 gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
17537 && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode));
17539 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
17540 op0 = copy_to_mode_reg (mode0, op0);
17541 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
17542 op1 = copy_to_mode_reg (mode1, op1);
17544 /* ??? Using ix86_fixup_binary_operands is problematic when
17545 we've got mismatched modes. Fake it. */
17551 if (tmode == mode0 && tmode == mode1)
17553 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
17557 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
17559 op0 = force_reg (mode0, op0);
17560 op1 = force_reg (mode1, op1);
17561 target = gen_reg_rtx (tmode);
17564 pat = GEN_FCN (icode) (target, op0, op1);
17571 /* Subroutine of ix86_expand_builtin to take care of stores. */
17574 ix86_expand_store_builtin (enum insn_code icode, tree exp)
17577 tree arg0 = CALL_EXPR_ARG (exp, 0);
17578 tree arg1 = CALL_EXPR_ARG (exp, 1);
17579 rtx op0 = expand_normal (arg0);
17580 rtx op1 = expand_normal (arg1);
17581 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
17582 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
17584 if (VECTOR_MODE_P (mode1))
17585 op1 = safe_vector_operand (op1, mode1);
17587 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
17588 op1 = copy_to_mode_reg (mode1, op1);
17590 pat = GEN_FCN (icode) (op0, op1);
17596 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
17599 ix86_expand_unop_builtin (enum insn_code icode, tree exp,
17600 rtx target, int do_load)
17603 tree arg0 = CALL_EXPR_ARG (exp, 0);
17604 rtx op0 = expand_normal (arg0);
17605 enum machine_mode tmode = insn_data[icode].operand[0].mode;
17606 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
17608 if (optimize || !target
17609 || GET_MODE (target) != tmode
17610 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17611 target = gen_reg_rtx (tmode);
17613 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
17616 if (VECTOR_MODE_P (mode0))
17617 op0 = safe_vector_operand (op0, mode0);
17619 if ((optimize && !register_operand (op0, mode0))
17620 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
17621 op0 = copy_to_mode_reg (mode0, op0);
17624 pat = GEN_FCN (icode) (target, op0);
17631 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
17632 sqrtss, rsqrtss, rcpss. */
17635 ix86_expand_unop1_builtin (enum insn_code icode, tree exp, rtx target)
17638 tree arg0 = CALL_EXPR_ARG (exp, 0);
17639 rtx op1, op0 = expand_normal (arg0);
17640 enum machine_mode tmode = insn_data[icode].operand[0].mode;
17641 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
17643 if (optimize || !target
17644 || GET_MODE (target) != tmode
17645 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17646 target = gen_reg_rtx (tmode);
17648 if (VECTOR_MODE_P (mode0))
17649 op0 = safe_vector_operand (op0, mode0);
17651 if ((optimize && !register_operand (op0, mode0))
17652 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
17653 op0 = copy_to_mode_reg (mode0, op0);
17656 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
17657 op1 = copy_to_mode_reg (mode0, op1);
17659 pat = GEN_FCN (icode) (target, op0, op1);
17666 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
17669 ix86_expand_sse_compare (const struct builtin_description *d, tree exp,
17673 tree arg0 = CALL_EXPR_ARG (exp, 0);
17674 tree arg1 = CALL_EXPR_ARG (exp, 1);
17675 rtx op0 = expand_normal (arg0);
17676 rtx op1 = expand_normal (arg1);
17678 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
17679 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
17680 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
17681 enum rtx_code comparison = d->comparison;
17683 if (VECTOR_MODE_P (mode0))
17684 op0 = safe_vector_operand (op0, mode0);
17685 if (VECTOR_MODE_P (mode1))
17686 op1 = safe_vector_operand (op1, mode1);
17688 /* Swap operands if we have a comparison that isn't available in
17690 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
17692 rtx tmp = gen_reg_rtx (mode1);
17693 emit_move_insn (tmp, op1);
17698 if (optimize || !target
17699 || GET_MODE (target) != tmode
17700 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
17701 target = gen_reg_rtx (tmode);
17703 if ((optimize && !register_operand (op0, mode0))
17704 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
17705 op0 = copy_to_mode_reg (mode0, op0);
17706 if ((optimize && !register_operand (op1, mode1))
17707 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
17708 op1 = copy_to_mode_reg (mode1, op1);
17710 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
17711 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
17718 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
17721 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
17725 tree arg0 = CALL_EXPR_ARG (exp, 0);
17726 tree arg1 = CALL_EXPR_ARG (exp, 1);
17727 rtx op0 = expand_normal (arg0);
17728 rtx op1 = expand_normal (arg1);
17730 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
17731 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
17732 enum rtx_code comparison = d->comparison;
17734 if (VECTOR_MODE_P (mode0))
17735 op0 = safe_vector_operand (op0, mode0);
17736 if (VECTOR_MODE_P (mode1))
17737 op1 = safe_vector_operand (op1, mode1);
17739 /* Swap operands if we have a comparison that isn't available in
17741 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
17748 target = gen_reg_rtx (SImode);
17749 emit_move_insn (target, const0_rtx);
17750 target = gen_rtx_SUBREG (QImode, target, 0);
17752 if ((optimize && !register_operand (op0, mode0))
17753 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
17754 op0 = copy_to_mode_reg (mode0, op0);
17755 if ((optimize && !register_operand (op1, mode1))
17756 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
17757 op1 = copy_to_mode_reg (mode1, op1);
17759 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
17760 pat = GEN_FCN (d->icode) (op0, op1);
17764 emit_insn (gen_rtx_SET (VOIDmode,
17765 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
17766 gen_rtx_fmt_ee (comparison, QImode,
17770 return SUBREG_REG (target);
17773 /* Return the integer constant in ARG. Constrain it to be in the range
17774 of the subparts of VEC_TYPE; issue an error if not. */
17777 get_element_number (tree vec_type, tree arg)
17779 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
17781 if (!host_integerp (arg, 1)
17782 || (elt = tree_low_cst (arg, 1), elt > max))
17784 error ("selector must be an integer constant in the range 0..%wi", max);
17791 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17792 ix86_expand_vector_init. We DO have language-level syntax for this, in
17793 the form of (type){ init-list }. Except that since we can't place emms
17794 instructions from inside the compiler, we can't allow the use of MMX
17795 registers unless the user explicitly asks for it. So we do *not* define
17796 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
17797 we have builtins invoked by mmintrin.h that gives us license to emit
17798 these sorts of instructions. */
17801 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
17803 enum machine_mode tmode = TYPE_MODE (type);
17804 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
17805 int i, n_elt = GET_MODE_NUNITS (tmode);
17806 rtvec v = rtvec_alloc (n_elt);
17808 gcc_assert (VECTOR_MODE_P (tmode));
17809 gcc_assert (call_expr_nargs (exp) == n_elt);
17811 for (i = 0; i < n_elt; ++i)
17813 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
17814 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
17817 if (!target || !register_operand (target, tmode))
17818 target = gen_reg_rtx (tmode);
17820 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
17824 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17825 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
17826 had a language-level syntax for referencing vector elements. */
17829 ix86_expand_vec_ext_builtin (tree exp, rtx target)
17831 enum machine_mode tmode, mode0;
17836 arg0 = CALL_EXPR_ARG (exp, 0);
17837 arg1 = CALL_EXPR_ARG (exp, 1);
17839 op0 = expand_normal (arg0);
17840 elt = get_element_number (TREE_TYPE (arg0), arg1);
17842 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
17843 mode0 = TYPE_MODE (TREE_TYPE (arg0));
17844 gcc_assert (VECTOR_MODE_P (mode0));
17846 op0 = force_reg (mode0, op0);
17848 if (optimize || !target || !register_operand (target, tmode))
17849 target = gen_reg_rtx (tmode);
17851 ix86_expand_vector_extract (true, target, op0, elt);
17856 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17857 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
17858 a language-level syntax for referencing vector elements. */
17861 ix86_expand_vec_set_builtin (tree exp)
17863 enum machine_mode tmode, mode1;
17864 tree arg0, arg1, arg2;
17868 arg0 = CALL_EXPR_ARG (exp, 0);
17869 arg1 = CALL_EXPR_ARG (exp, 1);
17870 arg2 = CALL_EXPR_ARG (exp, 2);
17872 tmode = TYPE_MODE (TREE_TYPE (arg0));
17873 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
17874 gcc_assert (VECTOR_MODE_P (tmode));
17876 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
17877 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
17878 elt = get_element_number (TREE_TYPE (arg0), arg2);
17880 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
17881 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
17883 op0 = force_reg (tmode, op0);
17884 op1 = force_reg (mode1, op1);
17886 ix86_expand_vector_set (true, op0, op1, elt);
17891 /* Expand an expression EXP that calls a built-in function,
17892 with result going to TARGET if that's convenient
17893 (and in mode MODE if that's convenient).
17894 SUBTARGET may be used as the target for computing one of EXP's operands.
17895 IGNORE is nonzero if the value is to be ignored. */
17898 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
17899 enum machine_mode mode ATTRIBUTE_UNUSED,
17900 int ignore ATTRIBUTE_UNUSED)
17902 const struct builtin_description *d;
17904 enum insn_code icode;
17905 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
17906 tree arg0, arg1, arg2, arg3;
17907 rtx op0, op1, op2, op3, pat;
17908 enum machine_mode tmode, mode0, mode1, mode2, mode3, mode4;
17909 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
17913 case IX86_BUILTIN_EMMS:
17914 emit_insn (gen_mmx_emms ());
17917 case IX86_BUILTIN_SFENCE:
17918 emit_insn (gen_sse_sfence ());
17921 case IX86_BUILTIN_MASKMOVQ:
17922 case IX86_BUILTIN_MASKMOVDQU:
17923 icode = (fcode == IX86_BUILTIN_MASKMOVQ
17924 ? CODE_FOR_mmx_maskmovq
17925 : CODE_FOR_sse2_maskmovdqu);
17926 /* Note the arg order is different from the operand order. */
17927 arg1 = CALL_EXPR_ARG (exp, 0);
17928 arg2 = CALL_EXPR_ARG (exp, 1);
17929 arg0 = CALL_EXPR_ARG (exp, 2);
17930 op0 = expand_normal (arg0);
17931 op1 = expand_normal (arg1);
17932 op2 = expand_normal (arg2);
17933 mode0 = insn_data[icode].operand[0].mode;
17934 mode1 = insn_data[icode].operand[1].mode;
17935 mode2 = insn_data[icode].operand[2].mode;
17937 op0 = force_reg (Pmode, op0);
17938 op0 = gen_rtx_MEM (mode1, op0);
17940 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
17941 op0 = copy_to_mode_reg (mode0, op0);
17942 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
17943 op1 = copy_to_mode_reg (mode1, op1);
17944 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
17945 op2 = copy_to_mode_reg (mode2, op2);
17946 pat = GEN_FCN (icode) (op0, op1, op2);
17952 case IX86_BUILTIN_SQRTSS:
17953 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, exp, target);
17954 case IX86_BUILTIN_RSQRTSS:
17955 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, exp, target);
17956 case IX86_BUILTIN_RCPSS:
17957 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, exp, target);
17959 case IX86_BUILTIN_LOADUPS:
17960 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, exp, target, 1);
17962 case IX86_BUILTIN_STOREUPS:
17963 return ix86_expand_store_builtin (CODE_FOR_sse_movups, exp);
17965 case IX86_BUILTIN_LOADHPS:
17966 case IX86_BUILTIN_LOADLPS:
17967 case IX86_BUILTIN_LOADHPD:
17968 case IX86_BUILTIN_LOADLPD:
17969 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
17970 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
17971 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
17972 : CODE_FOR_sse2_loadlpd);
17973 arg0 = CALL_EXPR_ARG (exp, 0);
17974 arg1 = CALL_EXPR_ARG (exp, 1);
17975 op0 = expand_normal (arg0);
17976 op1 = expand_normal (arg1);
17977 tmode = insn_data[icode].operand[0].mode;
17978 mode0 = insn_data[icode].operand[1].mode;
17979 mode1 = insn_data[icode].operand[2].mode;
17981 op0 = force_reg (mode0, op0);
17982 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
17983 if (optimize || target == 0
17984 || GET_MODE (target) != tmode
17985 || !register_operand (target, tmode))
17986 target = gen_reg_rtx (tmode);
17987 pat = GEN_FCN (icode) (target, op0, op1);
17993 case IX86_BUILTIN_STOREHPS:
17994 case IX86_BUILTIN_STORELPS:
17995 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
17996 : CODE_FOR_sse_storelps);
17997 arg0 = CALL_EXPR_ARG (exp, 0);
17998 arg1 = CALL_EXPR_ARG (exp, 1);
17999 op0 = expand_normal (arg0);
18000 op1 = expand_normal (arg1);
18001 mode0 = insn_data[icode].operand[0].mode;
18002 mode1 = insn_data[icode].operand[1].mode;
18004 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
18005 op1 = force_reg (mode1, op1);
18007 pat = GEN_FCN (icode) (op0, op1);
18013 case IX86_BUILTIN_MOVNTPS:
18014 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, exp);
18015 case IX86_BUILTIN_MOVNTQ:
18016 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, exp);
18018 case IX86_BUILTIN_LDMXCSR:
18019 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
18020 target = assign_386_stack_local (SImode, SLOT_TEMP);
18021 emit_move_insn (target, op0);
18022 emit_insn (gen_sse_ldmxcsr (target));
18025 case IX86_BUILTIN_STMXCSR:
18026 target = assign_386_stack_local (SImode, SLOT_TEMP);
18027 emit_insn (gen_sse_stmxcsr (target));
18028 return copy_to_mode_reg (SImode, target);
18030 case IX86_BUILTIN_SHUFPS:
18031 case IX86_BUILTIN_SHUFPD:
18032 icode = (fcode == IX86_BUILTIN_SHUFPS
18033 ? CODE_FOR_sse_shufps
18034 : CODE_FOR_sse2_shufpd);
18035 arg0 = CALL_EXPR_ARG (exp, 0);
18036 arg1 = CALL_EXPR_ARG (exp, 1);
18037 arg2 = CALL_EXPR_ARG (exp, 2);
18038 op0 = expand_normal (arg0);
18039 op1 = expand_normal (arg1);
18040 op2 = expand_normal (arg2);
18041 tmode = insn_data[icode].operand[0].mode;
18042 mode0 = insn_data[icode].operand[1].mode;
18043 mode1 = insn_data[icode].operand[2].mode;
18044 mode2 = insn_data[icode].operand[3].mode;
18046 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
18047 op0 = copy_to_mode_reg (mode0, op0);
18048 if ((optimize && !register_operand (op1, mode1))
18049 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
18050 op1 = copy_to_mode_reg (mode1, op1);
18051 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
18053 /* @@@ better error message */
18054 error ("mask must be an immediate");
18055 return gen_reg_rtx (tmode);
18057 if (optimize || target == 0
18058 || GET_MODE (target) != tmode
18059 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
18060 target = gen_reg_rtx (tmode);
18061 pat = GEN_FCN (icode) (target, op0, op1, op2);
18067 case IX86_BUILTIN_PSHUFW:
18068 case IX86_BUILTIN_PSHUFD:
18069 case IX86_BUILTIN_PSHUFHW:
18070 case IX86_BUILTIN_PSHUFLW:
18071 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
18072 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
18073 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
18074 : CODE_FOR_mmx_pshufw);
18075 arg0 = CALL_EXPR_ARG (exp, 0);
18076 arg1 = CALL_EXPR_ARG (exp, 1);
18077 op0 = expand_normal (arg0);
18078 op1 = expand_normal (arg1);
18079 tmode = insn_data[icode].operand[0].mode;
18080 mode1 = insn_data[icode].operand[1].mode;
18081 mode2 = insn_data[icode].operand[2].mode;
18083 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
18084 op0 = copy_to_mode_reg (mode1, op0);
18085 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
18087 /* @@@ better error message */
18088 error ("mask must be an immediate");
18092 || GET_MODE (target) != tmode
18093 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
18094 target = gen_reg_rtx (tmode);
18095 pat = GEN_FCN (icode) (target, op0, op1);
18101 case IX86_BUILTIN_PSLLWI128:
18102 icode = CODE_FOR_ashlv8hi3;
18104 case IX86_BUILTIN_PSLLDI128:
18105 icode = CODE_FOR_ashlv4si3;
18107 case IX86_BUILTIN_PSLLQI128:
18108 icode = CODE_FOR_ashlv2di3;
18110 case IX86_BUILTIN_PSRAWI128:
18111 icode = CODE_FOR_ashrv8hi3;
18113 case IX86_BUILTIN_PSRADI128:
18114 icode = CODE_FOR_ashrv4si3;
18116 case IX86_BUILTIN_PSRLWI128:
18117 icode = CODE_FOR_lshrv8hi3;
18119 case IX86_BUILTIN_PSRLDI128:
18120 icode = CODE_FOR_lshrv4si3;
18122 case IX86_BUILTIN_PSRLQI128:
18123 icode = CODE_FOR_lshrv2di3;
18126 arg0 = CALL_EXPR_ARG (exp, 0);
18127 arg1 = CALL_EXPR_ARG (exp, 1);
18128 op0 = expand_normal (arg0);
18129 op1 = expand_normal (arg1);
18131 if (!CONST_INT_P (op1))
18133 error ("shift must be an immediate");
18136 if (INTVAL (op1) < 0 || INTVAL (op1) > 255)
18137 op1 = GEN_INT (255);
18139 tmode = insn_data[icode].operand[0].mode;
18140 mode1 = insn_data[icode].operand[1].mode;
18141 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
18142 op0 = copy_to_reg (op0);
18144 target = gen_reg_rtx (tmode);
18145 pat = GEN_FCN (icode) (target, op0, op1);
18151 case IX86_BUILTIN_PSLLW128:
18152 icode = CODE_FOR_ashlv8hi3;
18154 case IX86_BUILTIN_PSLLD128:
18155 icode = CODE_FOR_ashlv4si3;
18157 case IX86_BUILTIN_PSLLQ128:
18158 icode = CODE_FOR_ashlv2di3;
18160 case IX86_BUILTIN_PSRAW128:
18161 icode = CODE_FOR_ashrv8hi3;
18163 case IX86_BUILTIN_PSRAD128:
18164 icode = CODE_FOR_ashrv4si3;
18166 case IX86_BUILTIN_PSRLW128:
18167 icode = CODE_FOR_lshrv8hi3;
18169 case IX86_BUILTIN_PSRLD128:
18170 icode = CODE_FOR_lshrv4si3;
18172 case IX86_BUILTIN_PSRLQ128:
18173 icode = CODE_FOR_lshrv2di3;
18176 arg0 = CALL_EXPR_ARG (exp, 0);
18177 arg1 = CALL_EXPR_ARG (exp, 1);
18178 op0 = expand_normal (arg0);
18179 op1 = expand_normal (arg1);
18181 tmode = insn_data[icode].operand[0].mode;
18182 mode1 = insn_data[icode].operand[1].mode;
18184 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
18185 op0 = copy_to_reg (op0);
18187 op1 = simplify_gen_subreg (TImode, op1, GET_MODE (op1), 0);
18188 if (! (*insn_data[icode].operand[2].predicate) (op1, TImode))
18189 op1 = copy_to_reg (op1);
18191 target = gen_reg_rtx (tmode);
18192 pat = GEN_FCN (icode) (target, op0, op1);
18198 case IX86_BUILTIN_PSLLDQI128:
18199 case IX86_BUILTIN_PSRLDQI128:
18200 icode = (fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
18201 : CODE_FOR_sse2_lshrti3);
18202 arg0 = CALL_EXPR_ARG (exp, 0);
18203 arg1 = CALL_EXPR_ARG (exp, 1);
18204 op0 = expand_normal (arg0);
18205 op1 = expand_normal (arg1);
18206 tmode = insn_data[icode].operand[0].mode;
18207 mode1 = insn_data[icode].operand[1].mode;
18208 mode2 = insn_data[icode].operand[2].mode;
18210 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
18212 op0 = copy_to_reg (op0);
18213 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
18215 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
18217 error ("shift must be an immediate");
18220 target = gen_reg_rtx (V2DImode);
18221 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0),
18228 case IX86_BUILTIN_FEMMS:
18229 emit_insn (gen_mmx_femms ());
18232 case IX86_BUILTIN_PAVGUSB:
18233 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, exp, target);
18235 case IX86_BUILTIN_PF2ID:
18236 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, exp, target, 0);
18238 case IX86_BUILTIN_PFACC:
18239 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, exp, target);
18241 case IX86_BUILTIN_PFADD:
18242 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, exp, target);
18244 case IX86_BUILTIN_PFCMPEQ:
18245 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, exp, target);
18247 case IX86_BUILTIN_PFCMPGE:
18248 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, exp, target);
18250 case IX86_BUILTIN_PFCMPGT:
18251 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, exp, target);
18253 case IX86_BUILTIN_PFMAX:
18254 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, exp, target);
18256 case IX86_BUILTIN_PFMIN:
18257 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, exp, target);
18259 case IX86_BUILTIN_PFMUL:
18260 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, exp, target);
18262 case IX86_BUILTIN_PFRCP:
18263 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, exp, target, 0);
18265 case IX86_BUILTIN_PFRCPIT1:
18266 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, exp, target);
18268 case IX86_BUILTIN_PFRCPIT2:
18269 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, exp, target);
18271 case IX86_BUILTIN_PFRSQIT1:
18272 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, exp, target);
18274 case IX86_BUILTIN_PFRSQRT:
18275 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, exp, target, 0);
18277 case IX86_BUILTIN_PFSUB:
18278 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, exp, target);
18280 case IX86_BUILTIN_PFSUBR:
18281 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, exp, target);
18283 case IX86_BUILTIN_PI2FD:
18284 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, exp, target, 0);
18286 case IX86_BUILTIN_PMULHRW:
18287 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, exp, target);
18289 case IX86_BUILTIN_PF2IW:
18290 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, exp, target, 0);
18292 case IX86_BUILTIN_PFNACC:
18293 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, exp, target);
18295 case IX86_BUILTIN_PFPNACC:
18296 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, exp, target);
18298 case IX86_BUILTIN_PI2FW:
18299 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, exp, target, 0);
18301 case IX86_BUILTIN_PSWAPDSI:
18302 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, exp, target, 0);
18304 case IX86_BUILTIN_PSWAPDSF:
18305 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, exp, target, 0);
18307 case IX86_BUILTIN_SQRTSD:
18308 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, exp, target);
18309 case IX86_BUILTIN_LOADUPD:
18310 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, exp, target, 1);
18311 case IX86_BUILTIN_STOREUPD:
18312 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, exp);
18314 case IX86_BUILTIN_MFENCE:
18315 emit_insn (gen_sse2_mfence ());
18317 case IX86_BUILTIN_LFENCE:
18318 emit_insn (gen_sse2_lfence ());
18321 case IX86_BUILTIN_CLFLUSH:
18322 arg0 = CALL_EXPR_ARG (exp, 0);
18323 op0 = expand_normal (arg0);
18324 icode = CODE_FOR_sse2_clflush;
18325 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
18326 op0 = copy_to_mode_reg (Pmode, op0);
18328 emit_insn (gen_sse2_clflush (op0));
18331 case IX86_BUILTIN_MOVNTPD:
18332 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, exp);
18333 case IX86_BUILTIN_MOVNTDQ:
18334 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, exp);
18335 case IX86_BUILTIN_MOVNTI:
18336 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, exp);
18338 case IX86_BUILTIN_LOADDQU:
18339 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, exp, target, 1);
18340 case IX86_BUILTIN_STOREDQU:
18341 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, exp);
18343 case IX86_BUILTIN_MONITOR:
18344 arg0 = CALL_EXPR_ARG (exp, 0);
18345 arg1 = CALL_EXPR_ARG (exp, 1);
18346 arg2 = CALL_EXPR_ARG (exp, 2);
18347 op0 = expand_normal (arg0);
18348 op1 = expand_normal (arg1);
18349 op2 = expand_normal (arg2);
18351 op0 = copy_to_mode_reg (Pmode, op0);
18353 op1 = copy_to_mode_reg (SImode, op1);
18355 op2 = copy_to_mode_reg (SImode, op2);
18357 emit_insn (gen_sse3_monitor (op0, op1, op2));
18359 emit_insn (gen_sse3_monitor64 (op0, op1, op2));
18362 case IX86_BUILTIN_MWAIT:
18363 arg0 = CALL_EXPR_ARG (exp, 0);
18364 arg1 = CALL_EXPR_ARG (exp, 1);
18365 op0 = expand_normal (arg0);
18366 op1 = expand_normal (arg1);
18368 op0 = copy_to_mode_reg (SImode, op0);
18370 op1 = copy_to_mode_reg (SImode, op1);
18371 emit_insn (gen_sse3_mwait (op0, op1));
18374 case IX86_BUILTIN_LDDQU:
18375 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, exp,
18378 case IX86_BUILTIN_PALIGNR:
18379 case IX86_BUILTIN_PALIGNR128:
18380 if (fcode == IX86_BUILTIN_PALIGNR)
18382 icode = CODE_FOR_ssse3_palignrdi;
18387 icode = CODE_FOR_ssse3_palignrti;
18390 arg0 = CALL_EXPR_ARG (exp, 0);
18391 arg1 = CALL_EXPR_ARG (exp, 1);
18392 arg2 = CALL_EXPR_ARG (exp, 2);
18393 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
18394 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
18395 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
18396 tmode = insn_data[icode].operand[0].mode;
18397 mode1 = insn_data[icode].operand[1].mode;
18398 mode2 = insn_data[icode].operand[2].mode;
18399 mode3 = insn_data[icode].operand[3].mode;
18401 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
18403 op0 = copy_to_reg (op0);
18404 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
18406 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
18408 op1 = copy_to_reg (op1);
18409 op1 = simplify_gen_subreg (mode2, op1, GET_MODE (op1), 0);
18411 if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
18413 error ("shift must be an immediate");
18416 target = gen_reg_rtx (mode);
18417 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, mode, 0),
18424 case IX86_BUILTIN_MOVNTSD:
18425 return ix86_expand_store_builtin (CODE_FOR_sse4a_vmmovntv2df, exp);
18427 case IX86_BUILTIN_MOVNTSS:
18428 return ix86_expand_store_builtin (CODE_FOR_sse4a_vmmovntv4sf, exp);
18430 case IX86_BUILTIN_INSERTQ:
18431 case IX86_BUILTIN_EXTRQ:
18432 icode = (fcode == IX86_BUILTIN_EXTRQ
18433 ? CODE_FOR_sse4a_extrq
18434 : CODE_FOR_sse4a_insertq);
18435 arg0 = CALL_EXPR_ARG (exp, 0);
18436 arg1 = CALL_EXPR_ARG (exp, 1);
18437 op0 = expand_normal (arg0);
18438 op1 = expand_normal (arg1);
18439 tmode = insn_data[icode].operand[0].mode;
18440 mode1 = insn_data[icode].operand[1].mode;
18441 mode2 = insn_data[icode].operand[2].mode;
18442 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
18443 op0 = copy_to_mode_reg (mode1, op0);
18444 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
18445 op1 = copy_to_mode_reg (mode2, op1);
18446 if (optimize || target == 0
18447 || GET_MODE (target) != tmode
18448 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
18449 target = gen_reg_rtx (tmode);
18450 pat = GEN_FCN (icode) (target, op0, op1);
18456 case IX86_BUILTIN_EXTRQI:
18457 icode = CODE_FOR_sse4a_extrqi;
18458 arg0 = CALL_EXPR_ARG (exp, 0);
18459 arg1 = CALL_EXPR_ARG (exp, 1);
18460 arg2 = CALL_EXPR_ARG (exp, 2);
18461 op0 = expand_normal (arg0);
18462 op1 = expand_normal (arg1);
18463 op2 = expand_normal (arg2);
18464 tmode = insn_data[icode].operand[0].mode;
18465 mode1 = insn_data[icode].operand[1].mode;
18466 mode2 = insn_data[icode].operand[2].mode;
18467 mode3 = insn_data[icode].operand[3].mode;
18468 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
18469 op0 = copy_to_mode_reg (mode1, op0);
18470 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
18472 error ("index mask must be an immediate");
18473 return gen_reg_rtx (tmode);
18475 if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
18477 error ("length mask must be an immediate");
18478 return gen_reg_rtx (tmode);
18480 if (optimize || target == 0
18481 || GET_MODE (target) != tmode
18482 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
18483 target = gen_reg_rtx (tmode);
18484 pat = GEN_FCN (icode) (target, op0, op1, op2);
18490 case IX86_BUILTIN_INSERTQI:
18491 icode = CODE_FOR_sse4a_insertqi;
18492 arg0 = CALL_EXPR_ARG (exp, 0);
18493 arg1 = CALL_EXPR_ARG (exp, 1);
18494 arg2 = CALL_EXPR_ARG (exp, 2);
18495 arg3 = CALL_EXPR_ARG (exp, 3);
18496 op0 = expand_normal (arg0);
18497 op1 = expand_normal (arg1);
18498 op2 = expand_normal (arg2);
18499 op3 = expand_normal (arg3);
18500 tmode = insn_data[icode].operand[0].mode;
18501 mode1 = insn_data[icode].operand[1].mode;
18502 mode2 = insn_data[icode].operand[2].mode;
18503 mode3 = insn_data[icode].operand[3].mode;
18504 mode4 = insn_data[icode].operand[4].mode;
18506 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
18507 op0 = copy_to_mode_reg (mode1, op0);
18509 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
18510 op1 = copy_to_mode_reg (mode2, op1);
18512 if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
18514 error ("index mask must be an immediate");
18515 return gen_reg_rtx (tmode);
18517 if (! (*insn_data[icode].operand[4].predicate) (op3, mode4))
18519 error ("length mask must be an immediate");
18520 return gen_reg_rtx (tmode);
18522 if (optimize || target == 0
18523 || GET_MODE (target) != tmode
18524 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
18525 target = gen_reg_rtx (tmode);
18526 pat = GEN_FCN (icode) (target, op0, op1, op2, op3);
18532 case IX86_BUILTIN_VEC_INIT_V2SI:
18533 case IX86_BUILTIN_VEC_INIT_V4HI:
18534 case IX86_BUILTIN_VEC_INIT_V8QI:
18535 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
18537 case IX86_BUILTIN_VEC_EXT_V2DF:
18538 case IX86_BUILTIN_VEC_EXT_V2DI:
18539 case IX86_BUILTIN_VEC_EXT_V4SF:
18540 case IX86_BUILTIN_VEC_EXT_V4SI:
18541 case IX86_BUILTIN_VEC_EXT_V8HI:
18542 case IX86_BUILTIN_VEC_EXT_V2SI:
18543 case IX86_BUILTIN_VEC_EXT_V4HI:
18544 return ix86_expand_vec_ext_builtin (exp, target);
18546 case IX86_BUILTIN_VEC_SET_V8HI:
18547 case IX86_BUILTIN_VEC_SET_V4HI:
18548 return ix86_expand_vec_set_builtin (exp);
18554 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
18555 if (d->code == fcode)
18557 /* Compares are treated specially. */
18558 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
18559 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
18560 || d->icode == CODE_FOR_sse2_maskcmpv2df3
18561 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
18562 return ix86_expand_sse_compare (d, exp, target);
18564 return ix86_expand_binop_builtin (d->icode, exp, target);
18567 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
18568 if (d->code == fcode)
18569 return ix86_expand_unop_builtin (d->icode, exp, target, 0);
18571 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
18572 if (d->code == fcode)
18573 return ix86_expand_sse_comi (d, exp, target);
18575 gcc_unreachable ();
18578 /* Returns a function decl for a vectorized version of the builtin function
18579 with builtin function code FN and the result vector type TYPE, or NULL_TREE
18580 if it is not available. */
18583 ix86_builtin_vectorized_function (enum built_in_function fn, tree type_out,
18586 enum machine_mode in_mode, out_mode;
18589 if (TREE_CODE (type_out) != VECTOR_TYPE
18590 || TREE_CODE (type_in) != VECTOR_TYPE)
18593 out_mode = TYPE_MODE (TREE_TYPE (type_out));
18594 out_n = TYPE_VECTOR_SUBPARTS (type_out);
18595 in_mode = TYPE_MODE (TREE_TYPE (type_in));
18596 in_n = TYPE_VECTOR_SUBPARTS (type_in);
18600 case BUILT_IN_SQRT:
18601 if (out_mode == DFmode && out_n == 2
18602 && in_mode == DFmode && in_n == 2)
18603 return ix86_builtins[IX86_BUILTIN_SQRTPD];
18606 case BUILT_IN_SQRTF:
18607 if (out_mode == SFmode && out_n == 4
18608 && in_mode == SFmode && in_n == 4)
18609 return ix86_builtins[IX86_BUILTIN_SQRTPS];
18612 case BUILT_IN_LRINTF:
18613 if (out_mode == SImode && out_n == 4
18614 && in_mode == SFmode && in_n == 4)
18615 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
18625 /* Returns a decl of a function that implements conversion of the
18626 input vector of type TYPE, or NULL_TREE if it is not available. */
18629 ix86_builtin_conversion (enum tree_code code, tree type)
18631 if (TREE_CODE (type) != VECTOR_TYPE)
18637 switch (TYPE_MODE (type))
18640 return ix86_builtins[IX86_BUILTIN_CVTDQ2PS];
18645 case FIX_TRUNC_EXPR:
18646 switch (TYPE_MODE (type))
18649 return ix86_builtins[IX86_BUILTIN_CVTTPS2DQ];
18659 /* Store OPERAND to the memory after reload is completed. This means
18660 that we can't easily use assign_stack_local. */
18662 ix86_force_to_memory (enum machine_mode mode, rtx operand)
18666 gcc_assert (reload_completed);
18667 if (TARGET_RED_ZONE)
18669 result = gen_rtx_MEM (mode,
18670 gen_rtx_PLUS (Pmode,
18672 GEN_INT (-RED_ZONE_SIZE)));
18673 emit_move_insn (result, operand);
18675 else if (!TARGET_RED_ZONE && TARGET_64BIT)
18681 operand = gen_lowpart (DImode, operand);
18685 gen_rtx_SET (VOIDmode,
18686 gen_rtx_MEM (DImode,
18687 gen_rtx_PRE_DEC (DImode,
18688 stack_pointer_rtx)),
18692 gcc_unreachable ();
18694 result = gen_rtx_MEM (mode, stack_pointer_rtx);
18703 split_di (&operand, 1, operands, operands + 1);
18705 gen_rtx_SET (VOIDmode,
18706 gen_rtx_MEM (SImode,
18707 gen_rtx_PRE_DEC (Pmode,
18708 stack_pointer_rtx)),
18711 gen_rtx_SET (VOIDmode,
18712 gen_rtx_MEM (SImode,
18713 gen_rtx_PRE_DEC (Pmode,
18714 stack_pointer_rtx)),
18719 /* Store HImodes as SImodes. */
18720 operand = gen_lowpart (SImode, operand);
18724 gen_rtx_SET (VOIDmode,
18725 gen_rtx_MEM (GET_MODE (operand),
18726 gen_rtx_PRE_DEC (SImode,
18727 stack_pointer_rtx)),
18731 gcc_unreachable ();
18733 result = gen_rtx_MEM (mode, stack_pointer_rtx);
18738 /* Free operand from the memory. */
18740 ix86_free_from_memory (enum machine_mode mode)
18742 if (!TARGET_RED_ZONE)
18746 if (mode == DImode || TARGET_64BIT)
18750 /* Use LEA to deallocate stack space. In peephole2 it will be converted
18751 to pop or add instruction if registers are available. */
18752 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
18753 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
18758 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
18759 QImode must go into class Q_REGS.
18760 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
18761 movdf to do mem-to-mem moves through integer regs. */
18763 ix86_preferred_reload_class (rtx x, enum reg_class class)
18765 enum machine_mode mode = GET_MODE (x);
18767 /* We're only allowed to return a subclass of CLASS. Many of the
18768 following checks fail for NO_REGS, so eliminate that early. */
18769 if (class == NO_REGS)
18772 /* All classes can load zeros. */
18773 if (x == CONST0_RTX (mode))
18776 /* Force constants into memory if we are loading a (nonzero) constant into
18777 an MMX or SSE register. This is because there are no MMX/SSE instructions
18778 to load from a constant. */
18780 && (MAYBE_MMX_CLASS_P (class) || MAYBE_SSE_CLASS_P (class)))
18783 /* Prefer SSE regs only, if we can use them for math. */
18784 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
18785 return SSE_CLASS_P (class) ? class : NO_REGS;
18787 /* Floating-point constants need more complex checks. */
18788 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
18790 /* General regs can load everything. */
18791 if (reg_class_subset_p (class, GENERAL_REGS))
18794 /* Floats can load 0 and 1 plus some others. Note that we eliminated
18795 zero above. We only want to wind up preferring 80387 registers if
18796 we plan on doing computation with them. */
18798 && standard_80387_constant_p (x))
18800 /* Limit class to non-sse. */
18801 if (class == FLOAT_SSE_REGS)
18803 if (class == FP_TOP_SSE_REGS)
18805 if (class == FP_SECOND_SSE_REGS)
18806 return FP_SECOND_REG;
18807 if (class == FLOAT_INT_REGS || class == FLOAT_REGS)
18814 /* Generally when we see PLUS here, it's the function invariant
18815 (plus soft-fp const_int). Which can only be computed into general
18817 if (GET_CODE (x) == PLUS)
18818 return reg_class_subset_p (class, GENERAL_REGS) ? class : NO_REGS;
18820 /* QImode constants are easy to load, but non-constant QImode data
18821 must go into Q_REGS. */
18822 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
18824 if (reg_class_subset_p (class, Q_REGS))
18826 if (reg_class_subset_p (Q_REGS, class))
18834 /* Discourage putting floating-point values in SSE registers unless
18835 SSE math is being used, and likewise for the 387 registers. */
18837 ix86_preferred_output_reload_class (rtx x, enum reg_class class)
18839 enum machine_mode mode = GET_MODE (x);
18841 /* Restrict the output reload class to the register bank that we are doing
18842 math on. If we would like not to return a subset of CLASS, reject this
18843 alternative: if reload cannot do this, it will still use its choice. */
18844 mode = GET_MODE (x);
18845 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
18846 return MAYBE_SSE_CLASS_P (class) ? SSE_REGS : NO_REGS;
18848 if (TARGET_80387 && SCALAR_FLOAT_MODE_P (mode))
18850 if (class == FP_TOP_SSE_REGS)
18852 else if (class == FP_SECOND_SSE_REGS)
18853 return FP_SECOND_REG;
18855 return FLOAT_CLASS_P (class) ? class : NO_REGS;
18861 /* If we are copying between general and FP registers, we need a memory
18862 location. The same is true for SSE and MMX registers.
18864 The macro can't work reliably when one of the CLASSES is class containing
18865 registers from multiple units (SSE, MMX, integer). We avoid this by never
18866 combining those units in single alternative in the machine description.
18867 Ensure that this constraint holds to avoid unexpected surprises.
18869 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
18870 enforce these sanity checks. */
18873 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
18874 enum machine_mode mode, int strict)
18876 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
18877 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
18878 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
18879 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
18880 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
18881 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
18883 gcc_assert (!strict);
18887 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
18890 /* ??? This is a lie. We do have moves between mmx/general, and for
18891 mmx/sse2. But by saying we need secondary memory we discourage the
18892 register allocator from using the mmx registers unless needed. */
18893 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
18896 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
18898 /* SSE1 doesn't have any direct moves from other classes. */
18902 /* If the target says that inter-unit moves are more expensive
18903 than moving through memory, then don't generate them. */
18904 if (!TARGET_INTER_UNIT_MOVES)
18907 /* Between SSE and general, we have moves no larger than word size. */
18908 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
18915 /* Return true if the registers in CLASS cannot represent the change from
18916 modes FROM to TO. */
18919 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
18920 enum reg_class class)
18925 /* x87 registers can't do subreg at all, as all values are reformatted
18926 to extended precision. */
18927 if (MAYBE_FLOAT_CLASS_P (class))
18930 if (MAYBE_SSE_CLASS_P (class) || MAYBE_MMX_CLASS_P (class))
18932 /* Vector registers do not support QI or HImode loads. If we don't
18933 disallow a change to these modes, reload will assume it's ok to
18934 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
18935 the vec_dupv4hi pattern. */
18936 if (GET_MODE_SIZE (from) < 4)
18939 /* Vector registers do not support subreg with nonzero offsets, which
18940 are otherwise valid for integer registers. Since we can't see
18941 whether we have a nonzero offset from here, prohibit all
18942 nonparadoxical subregs changing size. */
18943 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
18950 /* Return the cost of moving data from a register in class CLASS1 to
18951 one in class CLASS2.
18953 It is not required that the cost always equal 2 when FROM is the same as TO;
18954 on some machines it is expensive to move between registers if they are not
18955 general registers. */
18958 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
18959 enum reg_class class2)
18961 /* In case we require secondary memory, compute cost of the store followed
18962 by load. In order to avoid bad register allocation choices, we need
18963 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
18965 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
18969 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
18970 MEMORY_MOVE_COST (mode, class1, 1));
18971 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
18972 MEMORY_MOVE_COST (mode, class2, 1));
18974 /* In case of copying from general_purpose_register we may emit multiple
18975 stores followed by single load causing memory size mismatch stall.
18976 Count this as arbitrarily high cost of 20. */
18977 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
18980 /* In the case of FP/MMX moves, the registers actually overlap, and we
18981 have to switch modes in order to treat them differently. */
18982 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
18983 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
18989 /* Moves between SSE/MMX and integer unit are expensive. */
18990 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
18991 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
18992 return ix86_cost->mmxsse_to_integer;
18993 if (MAYBE_FLOAT_CLASS_P (class1))
18994 return ix86_cost->fp_move;
18995 if (MAYBE_SSE_CLASS_P (class1))
18996 return ix86_cost->sse_move;
18997 if (MAYBE_MMX_CLASS_P (class1))
18998 return ix86_cost->mmx_move;
19002 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
19005 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
19007 /* Flags and only flags can only hold CCmode values. */
19008 if (CC_REGNO_P (regno))
19009 return GET_MODE_CLASS (mode) == MODE_CC;
19010 if (GET_MODE_CLASS (mode) == MODE_CC
19011 || GET_MODE_CLASS (mode) == MODE_RANDOM
19012 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
19014 if (FP_REGNO_P (regno))
19015 return VALID_FP_MODE_P (mode);
19016 if (SSE_REGNO_P (regno))
19018 /* We implement the move patterns for all vector modes into and
19019 out of SSE registers, even when no operation instructions
19021 return (VALID_SSE_REG_MODE (mode)
19022 || VALID_SSE2_REG_MODE (mode)
19023 || VALID_MMX_REG_MODE (mode)
19024 || VALID_MMX_REG_MODE_3DNOW (mode));
19026 if (MMX_REGNO_P (regno))
19028 /* We implement the move patterns for 3DNOW modes even in MMX mode,
19029 so if the register is available at all, then we can move data of
19030 the given mode into or out of it. */
19031 return (VALID_MMX_REG_MODE (mode)
19032 || VALID_MMX_REG_MODE_3DNOW (mode));
19035 if (mode == QImode)
19037 /* Take care for QImode values - they can be in non-QI regs,
19038 but then they do cause partial register stalls. */
19039 if (regno < 4 || TARGET_64BIT)
19041 if (!TARGET_PARTIAL_REG_STALL)
19043 return reload_in_progress || reload_completed;
19045 /* We handle both integer and floats in the general purpose registers. */
19046 else if (VALID_INT_MODE_P (mode))
19048 else if (VALID_FP_MODE_P (mode))
19050 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
19051 on to use that value in smaller contexts, this can easily force a
19052 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
19053 supporting DImode, allow it. */
19054 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
19060 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
19061 tieable integer mode. */
19064 ix86_tieable_integer_mode_p (enum machine_mode mode)
19073 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
19076 return TARGET_64BIT;
19083 /* Return true if MODE1 is accessible in a register that can hold MODE2
19084 without copying. That is, all register classes that can hold MODE2
19085 can also hold MODE1. */
19088 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
19090 if (mode1 == mode2)
19093 if (ix86_tieable_integer_mode_p (mode1)
19094 && ix86_tieable_integer_mode_p (mode2))
19097 /* MODE2 being XFmode implies fp stack or general regs, which means we
19098 can tie any smaller floating point modes to it. Note that we do not
19099 tie this with TFmode. */
19100 if (mode2 == XFmode)
19101 return mode1 == SFmode || mode1 == DFmode;
19103 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
19104 that we can tie it with SFmode. */
19105 if (mode2 == DFmode)
19106 return mode1 == SFmode;
19108 /* If MODE2 is only appropriate for an SSE register, then tie with
19109 any other mode acceptable to SSE registers. */
19110 if (GET_MODE_SIZE (mode2) == 16
19111 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
19112 return (GET_MODE_SIZE (mode1) == 16
19113 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
19115 /* If MODE2 is appropriate for an MMX register, then tie
19116 with any other mode acceptable to MMX registers. */
19117 if (GET_MODE_SIZE (mode2) == 8
19118 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
19119 return (GET_MODE_SIZE (mode1) == 8
19120 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
19125 /* Return the cost of moving data of mode M between a
19126 register and memory. A value of 2 is the default; this cost is
19127 relative to those in `REGISTER_MOVE_COST'.
19129 If moving between registers and memory is more expensive than
19130 between two registers, you should define this macro to express the
19133 Model also increased moving costs of QImode registers in non
19137 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
19139 if (FLOAT_CLASS_P (class))
19156 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
19158 if (SSE_CLASS_P (class))
19161 switch (GET_MODE_SIZE (mode))
19175 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
19177 if (MMX_CLASS_P (class))
19180 switch (GET_MODE_SIZE (mode))
19191 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
19193 switch (GET_MODE_SIZE (mode))
19197 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
19198 : ix86_cost->movzbl_load);
19200 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
19201 : ix86_cost->int_store[0] + 4);
19204 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
19206 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
19207 if (mode == TFmode)
19209 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
19210 * (((int) GET_MODE_SIZE (mode)
19211 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
19215 /* Compute a (partial) cost for rtx X. Return true if the complete
19216 cost has been computed, and false if subexpressions should be
19217 scanned. In either case, *TOTAL contains the cost result. */
19220 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
19222 enum machine_mode mode = GET_MODE (x);
19230 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
19232 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
19234 else if (flag_pic && SYMBOLIC_CONST (x)
19236 || (!GET_CODE (x) != LABEL_REF
19237 && (GET_CODE (x) != SYMBOL_REF
19238 || !SYMBOL_REF_LOCAL_P (x)))))
19245 if (mode == VOIDmode)
19248 switch (standard_80387_constant_p (x))
19253 default: /* Other constants */
19258 /* Start with (MEM (SYMBOL_REF)), since that's where
19259 it'll probably end up. Add a penalty for size. */
19260 *total = (COSTS_N_INSNS (1)
19261 + (flag_pic != 0 && !TARGET_64BIT)
19262 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
19268 /* The zero extensions is often completely free on x86_64, so make
19269 it as cheap as possible. */
19270 if (TARGET_64BIT && mode == DImode
19271 && GET_MODE (XEXP (x, 0)) == SImode)
19273 else if (TARGET_ZERO_EXTEND_WITH_AND)
19274 *total = ix86_cost->add;
19276 *total = ix86_cost->movzx;
19280 *total = ix86_cost->movsx;
19284 if (CONST_INT_P (XEXP (x, 1))
19285 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
19287 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
19290 *total = ix86_cost->add;
19293 if ((value == 2 || value == 3)
19294 && ix86_cost->lea <= ix86_cost->shift_const)
19296 *total = ix86_cost->lea;
19306 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
19308 if (CONST_INT_P (XEXP (x, 1)))
19310 if (INTVAL (XEXP (x, 1)) > 32)
19311 *total = ix86_cost->shift_const + COSTS_N_INSNS (2);
19313 *total = ix86_cost->shift_const * 2;
19317 if (GET_CODE (XEXP (x, 1)) == AND)
19318 *total = ix86_cost->shift_var * 2;
19320 *total = ix86_cost->shift_var * 6 + COSTS_N_INSNS (2);
19325 if (CONST_INT_P (XEXP (x, 1)))
19326 *total = ix86_cost->shift_const;
19328 *total = ix86_cost->shift_var;
19333 if (FLOAT_MODE_P (mode))
19335 *total = ix86_cost->fmul;
19340 rtx op0 = XEXP (x, 0);
19341 rtx op1 = XEXP (x, 1);
19343 if (CONST_INT_P (XEXP (x, 1)))
19345 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
19346 for (nbits = 0; value != 0; value &= value - 1)
19350 /* This is arbitrary. */
19353 /* Compute costs correctly for widening multiplication. */
19354 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
19355 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
19356 == GET_MODE_SIZE (mode))
19358 int is_mulwiden = 0;
19359 enum machine_mode inner_mode = GET_MODE (op0);
19361 if (GET_CODE (op0) == GET_CODE (op1))
19362 is_mulwiden = 1, op1 = XEXP (op1, 0);
19363 else if (CONST_INT_P (op1))
19365 if (GET_CODE (op0) == SIGN_EXTEND)
19366 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
19369 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
19373 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
19376 *total = (ix86_cost->mult_init[MODE_INDEX (mode)]
19377 + nbits * ix86_cost->mult_bit
19378 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code));
19387 if (FLOAT_MODE_P (mode))
19388 *total = ix86_cost->fdiv;
19390 *total = ix86_cost->divide[MODE_INDEX (mode)];
19394 if (FLOAT_MODE_P (mode))
19395 *total = ix86_cost->fadd;
19396 else if (GET_MODE_CLASS (mode) == MODE_INT
19397 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
19399 if (GET_CODE (XEXP (x, 0)) == PLUS
19400 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
19401 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
19402 && CONSTANT_P (XEXP (x, 1)))
19404 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
19405 if (val == 2 || val == 4 || val == 8)
19407 *total = ix86_cost->lea;
19408 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
19409 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
19411 *total += rtx_cost (XEXP (x, 1), outer_code);
19415 else if (GET_CODE (XEXP (x, 0)) == MULT
19416 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
19418 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
19419 if (val == 2 || val == 4 || val == 8)
19421 *total = ix86_cost->lea;
19422 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
19423 *total += rtx_cost (XEXP (x, 1), outer_code);
19427 else if (GET_CODE (XEXP (x, 0)) == PLUS)
19429 *total = ix86_cost->lea;
19430 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
19431 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
19432 *total += rtx_cost (XEXP (x, 1), outer_code);
19439 if (FLOAT_MODE_P (mode))
19441 *total = ix86_cost->fadd;
19449 if (!TARGET_64BIT && mode == DImode)
19451 *total = (ix86_cost->add * 2
19452 + (rtx_cost (XEXP (x, 0), outer_code)
19453 << (GET_MODE (XEXP (x, 0)) != DImode))
19454 + (rtx_cost (XEXP (x, 1), outer_code)
19455 << (GET_MODE (XEXP (x, 1)) != DImode)));
19461 if (FLOAT_MODE_P (mode))
19463 *total = ix86_cost->fchs;
19469 if (!TARGET_64BIT && mode == DImode)
19470 *total = ix86_cost->add * 2;
19472 *total = ix86_cost->add;
19476 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
19477 && XEXP (XEXP (x, 0), 1) == const1_rtx
19478 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
19479 && XEXP (x, 1) == const0_rtx)
19481 /* This kind of construct is implemented using test[bwl].
19482 Treat it as if we had an AND. */
19483 *total = (ix86_cost->add
19484 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
19485 + rtx_cost (const1_rtx, outer_code));
19491 if (!TARGET_SSE_MATH
19493 || (mode == DFmode && !TARGET_SSE2))
19498 if (FLOAT_MODE_P (mode))
19499 *total = ix86_cost->fabs;
19503 if (FLOAT_MODE_P (mode))
19504 *total = ix86_cost->fsqrt;
19508 if (XINT (x, 1) == UNSPEC_TP)
19519 static int current_machopic_label_num;
19521 /* Given a symbol name and its associated stub, write out the
19522 definition of the stub. */
19525 machopic_output_stub (FILE *file, const char *symb, const char *stub)
19527 unsigned int length;
19528 char *binder_name, *symbol_name, lazy_ptr_name[32];
19529 int label = ++current_machopic_label_num;
19531 /* For 64-bit we shouldn't get here. */
19532 gcc_assert (!TARGET_64BIT);
19534 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
19535 symb = (*targetm.strip_name_encoding) (symb);
19537 length = strlen (stub);
19538 binder_name = alloca (length + 32);
19539 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
19541 length = strlen (symb);
19542 symbol_name = alloca (length + 32);
19543 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
19545 sprintf (lazy_ptr_name, "L%d$lz", label);
19548 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
19550 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
19552 fprintf (file, "%s:\n", stub);
19553 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
19557 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
19558 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
19559 fprintf (file, "\tjmp\t*%%edx\n");
19562 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
19564 fprintf (file, "%s:\n", binder_name);
19568 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
19569 fprintf (file, "\tpushl\t%%eax\n");
19572 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
19574 fprintf (file, "\tjmp\tdyld_stub_binding_helper\n");
19576 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
19577 fprintf (file, "%s:\n", lazy_ptr_name);
19578 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
19579 fprintf (file, "\t.long %s\n", binder_name);
19583 darwin_x86_file_end (void)
19585 darwin_file_end ();
19588 #endif /* TARGET_MACHO */
19590 /* Order the registers for register allocator. */
19593 x86_order_regs_for_local_alloc (void)
19598 /* First allocate the local general purpose registers. */
19599 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
19600 if (GENERAL_REGNO_P (i) && call_used_regs[i])
19601 reg_alloc_order [pos++] = i;
19603 /* Global general purpose registers. */
19604 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
19605 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
19606 reg_alloc_order [pos++] = i;
19608 /* x87 registers come first in case we are doing FP math
19610 if (!TARGET_SSE_MATH)
19611 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
19612 reg_alloc_order [pos++] = i;
19614 /* SSE registers. */
19615 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
19616 reg_alloc_order [pos++] = i;
19617 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
19618 reg_alloc_order [pos++] = i;
19620 /* x87 registers. */
19621 if (TARGET_SSE_MATH)
19622 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
19623 reg_alloc_order [pos++] = i;
19625 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
19626 reg_alloc_order [pos++] = i;
19628 /* Initialize the rest of array as we do not allocate some registers
19630 while (pos < FIRST_PSEUDO_REGISTER)
19631 reg_alloc_order [pos++] = 0;
19634 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
19635 struct attribute_spec.handler. */
19637 ix86_handle_struct_attribute (tree *node, tree name,
19638 tree args ATTRIBUTE_UNUSED,
19639 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
19642 if (DECL_P (*node))
19644 if (TREE_CODE (*node) == TYPE_DECL)
19645 type = &TREE_TYPE (*node);
19650 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
19651 || TREE_CODE (*type) == UNION_TYPE)))
19653 warning (OPT_Wattributes, "%qs attribute ignored",
19654 IDENTIFIER_POINTER (name));
19655 *no_add_attrs = true;
19658 else if ((is_attribute_p ("ms_struct", name)
19659 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
19660 || ((is_attribute_p ("gcc_struct", name)
19661 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
19663 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
19664 IDENTIFIER_POINTER (name));
19665 *no_add_attrs = true;
19672 ix86_ms_bitfield_layout_p (tree record_type)
19674 return (TARGET_MS_BITFIELD_LAYOUT &&
19675 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
19676 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
19679 /* Returns an expression indicating where the this parameter is
19680 located on entry to the FUNCTION. */
19683 x86_this_parameter (tree function)
19685 tree type = TREE_TYPE (function);
19686 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
19690 const int *parm_regs;
19692 if (TARGET_64BIT_MS_ABI)
19693 parm_regs = x86_64_ms_abi_int_parameter_registers;
19695 parm_regs = x86_64_int_parameter_registers;
19696 return gen_rtx_REG (DImode, parm_regs[aggr]);
19699 if (ix86_function_regparm (type, function) > 0
19700 && !type_has_variadic_args_p (type))
19703 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
19705 return gen_rtx_REG (SImode, regno);
19708 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
19711 /* Determine whether x86_output_mi_thunk can succeed. */
19714 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
19715 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
19716 HOST_WIDE_INT vcall_offset, tree function)
19718 /* 64-bit can handle anything. */
19722 /* For 32-bit, everything's fine if we have one free register. */
19723 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
19726 /* Need a free register for vcall_offset. */
19730 /* Need a free register for GOT references. */
19731 if (flag_pic && !(*targetm.binds_local_p) (function))
19734 /* Otherwise ok. */
19738 /* Output the assembler code for a thunk function. THUNK_DECL is the
19739 declaration for the thunk function itself, FUNCTION is the decl for
19740 the target function. DELTA is an immediate constant offset to be
19741 added to THIS. If VCALL_OFFSET is nonzero, the word at
19742 *(*this + vcall_offset) should be added to THIS. */
19745 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
19746 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
19747 HOST_WIDE_INT vcall_offset, tree function)
19750 rtx this = x86_this_parameter (function);
19753 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
19754 pull it in now and let DELTA benefit. */
19757 else if (vcall_offset)
19759 /* Put the this parameter into %eax. */
19761 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
19762 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
19765 this_reg = NULL_RTX;
19767 /* Adjust the this parameter by a fixed constant. */
19770 xops[0] = GEN_INT (delta);
19771 xops[1] = this_reg ? this_reg : this;
19774 if (!x86_64_general_operand (xops[0], DImode))
19776 tmp = gen_rtx_REG (DImode, R10_REG);
19778 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
19782 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
19785 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
19788 /* Adjust the this parameter by a value stored in the vtable. */
19792 tmp = gen_rtx_REG (DImode, R10_REG);
19795 int tmp_regno = 2 /* ECX */;
19796 if (lookup_attribute ("fastcall",
19797 TYPE_ATTRIBUTES (TREE_TYPE (function))))
19798 tmp_regno = 0 /* EAX */;
19799 tmp = gen_rtx_REG (SImode, tmp_regno);
19802 xops[0] = gen_rtx_MEM (Pmode, this_reg);
19805 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
19807 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
19809 /* Adjust the this parameter. */
19810 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
19811 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
19813 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
19814 xops[0] = GEN_INT (vcall_offset);
19816 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
19817 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
19819 xops[1] = this_reg;
19821 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
19823 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
19826 /* If necessary, drop THIS back to its stack slot. */
19827 if (this_reg && this_reg != this)
19829 xops[0] = this_reg;
19831 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
19834 xops[0] = XEXP (DECL_RTL (function), 0);
19837 if (!flag_pic || (*targetm.binds_local_p) (function))
19838 output_asm_insn ("jmp\t%P0", xops);
19839 /* All thunks should be in the same object as their target,
19840 and thus binds_local_p should be true. */
19841 else if (TARGET_64BIT_MS_ABI)
19842 gcc_unreachable ();
19845 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
19846 tmp = gen_rtx_CONST (Pmode, tmp);
19847 tmp = gen_rtx_MEM (QImode, tmp);
19849 output_asm_insn ("jmp\t%A0", xops);
19854 if (!flag_pic || (*targetm.binds_local_p) (function))
19855 output_asm_insn ("jmp\t%P0", xops);
19860 rtx sym_ref = XEXP (DECL_RTL (function), 0);
19861 tmp = (gen_rtx_SYMBOL_REF
19863 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
19864 tmp = gen_rtx_MEM (QImode, tmp);
19866 output_asm_insn ("jmp\t%0", xops);
19869 #endif /* TARGET_MACHO */
19871 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
19872 output_set_got (tmp, NULL_RTX);
19875 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
19876 output_asm_insn ("jmp\t{*}%1", xops);
19882 x86_file_start (void)
19884 default_file_start ();
19886 darwin_file_start ();
19888 if (X86_FILE_START_VERSION_DIRECTIVE)
19889 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
19890 if (X86_FILE_START_FLTUSED)
19891 fputs ("\t.global\t__fltused\n", asm_out_file);
19892 if (ix86_asm_dialect == ASM_INTEL)
19893 fputs ("\t.intel_syntax\n", asm_out_file);
19897 x86_field_alignment (tree field, int computed)
19899 enum machine_mode mode;
19900 tree type = TREE_TYPE (field);
19902 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
19904 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
19905 ? get_inner_array_type (type) : type);
19906 if (mode == DFmode || mode == DCmode
19907 || GET_MODE_CLASS (mode) == MODE_INT
19908 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
19909 return MIN (32, computed);
19913 /* Output assembler code to FILE to increment profiler label # LABELNO
19914 for profiling a function entry. */
19916 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
19920 #ifndef NO_PROFILE_COUNTERS
19921 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
19924 if (!TARGET_64BIT_MS_ABI && flag_pic)
19925 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
19927 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
19931 #ifndef NO_PROFILE_COUNTERS
19932 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
19933 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
19935 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
19939 #ifndef NO_PROFILE_COUNTERS
19940 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
19941 PROFILE_COUNT_REGISTER);
19943 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
19947 /* We don't have exact information about the insn sizes, but we may assume
19948 quite safely that we are informed about all 1 byte insns and memory
19949 address sizes. This is enough to eliminate unnecessary padding in
19953 min_insn_size (rtx insn)
19957 if (!INSN_P (insn) || !active_insn_p (insn))
19960 /* Discard alignments we've emit and jump instructions. */
19961 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
19962 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
19965 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
19966 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
19969 /* Important case - calls are always 5 bytes.
19970 It is common to have many calls in the row. */
19972 && symbolic_reference_mentioned_p (PATTERN (insn))
19973 && !SIBLING_CALL_P (insn))
19975 if (get_attr_length (insn) <= 1)
19978 /* For normal instructions we may rely on the sizes of addresses
19979 and the presence of symbol to require 4 bytes of encoding.
19980 This is not the case for jumps where references are PC relative. */
19981 if (!JUMP_P (insn))
19983 l = get_attr_length_address (insn);
19984 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
19993 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
19997 ix86_avoid_jump_misspredicts (void)
19999 rtx insn, start = get_insns ();
20000 int nbytes = 0, njumps = 0;
20003 /* Look for all minimal intervals of instructions containing 4 jumps.
20004 The intervals are bounded by START and INSN. NBYTES is the total
20005 size of instructions in the interval including INSN and not including
20006 START. When the NBYTES is smaller than 16 bytes, it is possible
20007 that the end of START and INSN ends up in the same 16byte page.
20009 The smallest offset in the page INSN can start is the case where START
20010 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
20011 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
20013 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
20016 nbytes += min_insn_size (insn);
20018 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
20019 INSN_UID (insn), min_insn_size (insn));
20021 && GET_CODE (PATTERN (insn)) != ADDR_VEC
20022 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
20030 start = NEXT_INSN (start);
20031 if ((JUMP_P (start)
20032 && GET_CODE (PATTERN (start)) != ADDR_VEC
20033 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
20035 njumps--, isjump = 1;
20038 nbytes -= min_insn_size (start);
20040 gcc_assert (njumps >= 0);
20042 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
20043 INSN_UID (start), INSN_UID (insn), nbytes);
20045 if (njumps == 3 && isjump && nbytes < 16)
20047 int padsize = 15 - nbytes + min_insn_size (insn);
20050 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
20051 INSN_UID (insn), padsize);
20052 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
20057 /* AMD Athlon works faster
20058 when RET is not destination of conditional jump or directly preceded
20059 by other jump instruction. We avoid the penalty by inserting NOP just
20060 before the RET instructions in such cases. */
20062 ix86_pad_returns (void)
20067 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
20069 basic_block bb = e->src;
20070 rtx ret = BB_END (bb);
20072 bool replace = false;
20074 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
20075 || !maybe_hot_bb_p (bb))
20077 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
20078 if (active_insn_p (prev) || LABEL_P (prev))
20080 if (prev && LABEL_P (prev))
20085 FOR_EACH_EDGE (e, ei, bb->preds)
20086 if (EDGE_FREQUENCY (e) && e->src->index >= 0
20087 && !(e->flags & EDGE_FALLTHRU))
20092 prev = prev_active_insn (ret);
20094 && ((JUMP_P (prev) && any_condjump_p (prev))
20097 /* Empty functions get branch mispredict even when the jump destination
20098 is not visible to us. */
20099 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
20104 emit_insn_before (gen_return_internal_long (), ret);
20110 /* Implement machine specific optimizations. We implement padding of returns
20111 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
20115 if (TARGET_PAD_RETURNS && optimize && !optimize_size)
20116 ix86_pad_returns ();
20117 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
20118 ix86_avoid_jump_misspredicts ();
20121 /* Return nonzero when QImode register that must be represented via REX prefix
20124 x86_extended_QIreg_mentioned_p (rtx insn)
20127 extract_insn_cached (insn);
20128 for (i = 0; i < recog_data.n_operands; i++)
20129 if (REG_P (recog_data.operand[i])
20130 && REGNO (recog_data.operand[i]) >= 4)
20135 /* Return nonzero when P points to register encoded via REX prefix.
20136 Called via for_each_rtx. */
20138 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
20140 unsigned int regno;
20143 regno = REGNO (*p);
20144 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
20147 /* Return true when INSN mentions register that must be encoded using REX
20150 x86_extended_reg_mentioned_p (rtx insn)
20152 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
20155 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
20156 optabs would emit if we didn't have TFmode patterns. */
20159 x86_emit_floatuns (rtx operands[2])
20161 rtx neglab, donelab, i0, i1, f0, in, out;
20162 enum machine_mode mode, inmode;
20164 inmode = GET_MODE (operands[1]);
20165 gcc_assert (inmode == SImode || inmode == DImode);
20168 in = force_reg (inmode, operands[1]);
20169 mode = GET_MODE (out);
20170 neglab = gen_label_rtx ();
20171 donelab = gen_label_rtx ();
20172 f0 = gen_reg_rtx (mode);
20174 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
20176 expand_float (out, in, 0);
20178 emit_jump_insn (gen_jump (donelab));
20181 emit_label (neglab);
20183 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
20185 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
20187 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
20189 expand_float (f0, i0, 0);
20191 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
20193 emit_label (donelab);
20196 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
20197 with all elements equal to VAR. Return true if successful. */
20200 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
20201 rtx target, rtx val)
20203 enum machine_mode smode, wsmode, wvmode;
20218 val = force_reg (GET_MODE_INNER (mode), val);
20219 x = gen_rtx_VEC_DUPLICATE (mode, val);
20220 emit_insn (gen_rtx_SET (VOIDmode, target, x));
20226 if (TARGET_SSE || TARGET_3DNOW_A)
20228 val = gen_lowpart (SImode, val);
20229 x = gen_rtx_TRUNCATE (HImode, val);
20230 x = gen_rtx_VEC_DUPLICATE (mode, x);
20231 emit_insn (gen_rtx_SET (VOIDmode, target, x));
20253 /* Extend HImode to SImode using a paradoxical SUBREG. */
20254 tmp1 = gen_reg_rtx (SImode);
20255 emit_move_insn (tmp1, gen_lowpart (SImode, val));
20256 /* Insert the SImode value as low element of V4SImode vector. */
20257 tmp2 = gen_reg_rtx (V4SImode);
20258 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
20259 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
20260 CONST0_RTX (V4SImode),
20262 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
20263 /* Cast the V4SImode vector back to a V8HImode vector. */
20264 tmp1 = gen_reg_rtx (V8HImode);
20265 emit_move_insn (tmp1, gen_lowpart (V8HImode, tmp2));
20266 /* Duplicate the low short through the whole low SImode word. */
20267 emit_insn (gen_sse2_punpcklwd (tmp1, tmp1, tmp1));
20268 /* Cast the V8HImode vector back to a V4SImode vector. */
20269 tmp2 = gen_reg_rtx (V4SImode);
20270 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
20271 /* Replicate the low element of the V4SImode vector. */
20272 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
20273 /* Cast the V2SImode back to V8HImode, and store in target. */
20274 emit_move_insn (target, gen_lowpart (V8HImode, tmp2));
20285 /* Extend QImode to SImode using a paradoxical SUBREG. */
20286 tmp1 = gen_reg_rtx (SImode);
20287 emit_move_insn (tmp1, gen_lowpart (SImode, val));
20288 /* Insert the SImode value as low element of V4SImode vector. */
20289 tmp2 = gen_reg_rtx (V4SImode);
20290 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
20291 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
20292 CONST0_RTX (V4SImode),
20294 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
20295 /* Cast the V4SImode vector back to a V16QImode vector. */
20296 tmp1 = gen_reg_rtx (V16QImode);
20297 emit_move_insn (tmp1, gen_lowpart (V16QImode, tmp2));
20298 /* Duplicate the low byte through the whole low SImode word. */
20299 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
20300 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
20301 /* Cast the V16QImode vector back to a V4SImode vector. */
20302 tmp2 = gen_reg_rtx (V4SImode);
20303 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
20304 /* Replicate the low element of the V4SImode vector. */
20305 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
20306 /* Cast the V2SImode back to V16QImode, and store in target. */
20307 emit_move_insn (target, gen_lowpart (V16QImode, tmp2));
20315 /* Replicate the value once into the next wider mode and recurse. */
20316 val = convert_modes (wsmode, smode, val, true);
20317 x = expand_simple_binop (wsmode, ASHIFT, val,
20318 GEN_INT (GET_MODE_BITSIZE (smode)),
20319 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20320 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
20322 x = gen_reg_rtx (wvmode);
20323 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
20324 gcc_unreachable ();
20325 emit_move_insn (target, gen_lowpart (mode, x));
20333 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
20334 whose ONE_VAR element is VAR, and other elements are zero. Return true
20338 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
20339 rtx target, rtx var, int one_var)
20341 enum machine_mode vsimode;
20357 var = force_reg (GET_MODE_INNER (mode), var);
20358 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
20359 emit_insn (gen_rtx_SET (VOIDmode, target, x));
20364 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
20365 new_target = gen_reg_rtx (mode);
20367 new_target = target;
20368 var = force_reg (GET_MODE_INNER (mode), var);
20369 x = gen_rtx_VEC_DUPLICATE (mode, var);
20370 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
20371 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
20374 /* We need to shuffle the value to the correct position, so
20375 create a new pseudo to store the intermediate result. */
20377 /* With SSE2, we can use the integer shuffle insns. */
20378 if (mode != V4SFmode && TARGET_SSE2)
20380 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
20382 GEN_INT (one_var == 1 ? 0 : 1),
20383 GEN_INT (one_var == 2 ? 0 : 1),
20384 GEN_INT (one_var == 3 ? 0 : 1)));
20385 if (target != new_target)
20386 emit_move_insn (target, new_target);
20390 /* Otherwise convert the intermediate result to V4SFmode and
20391 use the SSE1 shuffle instructions. */
20392 if (mode != V4SFmode)
20394 tmp = gen_reg_rtx (V4SFmode);
20395 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
20400 emit_insn (gen_sse_shufps_1 (tmp, tmp, tmp,
20402 GEN_INT (one_var == 1 ? 0 : 1),
20403 GEN_INT (one_var == 2 ? 0+4 : 1+4),
20404 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
20406 if (mode != V4SFmode)
20407 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
20408 else if (tmp != target)
20409 emit_move_insn (target, tmp);
20411 else if (target != new_target)
20412 emit_move_insn (target, new_target);
20417 vsimode = V4SImode;
20423 vsimode = V2SImode;
20429 /* Zero extend the variable element to SImode and recurse. */
20430 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
20432 x = gen_reg_rtx (vsimode);
20433 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
20435 gcc_unreachable ();
20437 emit_move_insn (target, gen_lowpart (mode, x));
20445 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
20446 consisting of the values in VALS. It is known that all elements
20447 except ONE_VAR are constants. Return true if successful. */
20450 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
20451 rtx target, rtx vals, int one_var)
20453 rtx var = XVECEXP (vals, 0, one_var);
20454 enum machine_mode wmode;
20457 const_vec = copy_rtx (vals);
20458 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
20459 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
20467 /* For the two element vectors, it's just as easy to use
20468 the general case. */
20484 /* There's no way to set one QImode entry easily. Combine
20485 the variable value with its adjacent constant value, and
20486 promote to an HImode set. */
20487 x = XVECEXP (vals, 0, one_var ^ 1);
20490 var = convert_modes (HImode, QImode, var, true);
20491 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
20492 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20493 x = GEN_INT (INTVAL (x) & 0xff);
20497 var = convert_modes (HImode, QImode, var, true);
20498 x = gen_int_mode (INTVAL (x) << 8, HImode);
20500 if (x != const0_rtx)
20501 var = expand_simple_binop (HImode, IOR, var, x, var,
20502 1, OPTAB_LIB_WIDEN);
20504 x = gen_reg_rtx (wmode);
20505 emit_move_insn (x, gen_lowpart (wmode, const_vec));
20506 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
20508 emit_move_insn (target, gen_lowpart (mode, x));
20515 emit_move_insn (target, const_vec);
20516 ix86_expand_vector_set (mmx_ok, target, var, one_var);
20520 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
20521 all values variable, and none identical. */
20524 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
20525 rtx target, rtx vals)
20527 enum machine_mode half_mode = GET_MODE_INNER (mode);
20528 rtx op0 = NULL, op1 = NULL;
20529 bool use_vec_concat = false;
20535 if (!mmx_ok && !TARGET_SSE)
20541 /* For the two element vectors, we always implement VEC_CONCAT. */
20542 op0 = XVECEXP (vals, 0, 0);
20543 op1 = XVECEXP (vals, 0, 1);
20544 use_vec_concat = true;
20548 half_mode = V2SFmode;
20551 half_mode = V2SImode;
20557 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
20558 Recurse to load the two halves. */
20560 op0 = gen_reg_rtx (half_mode);
20561 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
20562 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
20564 op1 = gen_reg_rtx (half_mode);
20565 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
20566 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
20568 use_vec_concat = true;
20579 gcc_unreachable ();
20582 if (use_vec_concat)
20584 if (!register_operand (op0, half_mode))
20585 op0 = force_reg (half_mode, op0);
20586 if (!register_operand (op1, half_mode))
20587 op1 = force_reg (half_mode, op1);
20589 emit_insn (gen_rtx_SET (VOIDmode, target,
20590 gen_rtx_VEC_CONCAT (mode, op0, op1)));
20594 int i, j, n_elts, n_words, n_elt_per_word;
20595 enum machine_mode inner_mode;
20596 rtx words[4], shift;
20598 inner_mode = GET_MODE_INNER (mode);
20599 n_elts = GET_MODE_NUNITS (mode);
20600 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
20601 n_elt_per_word = n_elts / n_words;
20602 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
20604 for (i = 0; i < n_words; ++i)
20606 rtx word = NULL_RTX;
20608 for (j = 0; j < n_elt_per_word; ++j)
20610 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
20611 elt = convert_modes (word_mode, inner_mode, elt, true);
20617 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
20618 word, 1, OPTAB_LIB_WIDEN);
20619 word = expand_simple_binop (word_mode, IOR, word, elt,
20620 word, 1, OPTAB_LIB_WIDEN);
20628 emit_move_insn (target, gen_lowpart (mode, words[0]));
20629 else if (n_words == 2)
20631 rtx tmp = gen_reg_rtx (mode);
20632 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
20633 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
20634 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
20635 emit_move_insn (target, tmp);
20637 else if (n_words == 4)
20639 rtx tmp = gen_reg_rtx (V4SImode);
20640 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
20641 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
20642 emit_move_insn (target, gen_lowpart (mode, tmp));
20645 gcc_unreachable ();
20649 /* Initialize vector TARGET via VALS. Suppress the use of MMX
20650 instructions unless MMX_OK is true. */
20653 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
20655 enum machine_mode mode = GET_MODE (target);
20656 enum machine_mode inner_mode = GET_MODE_INNER (mode);
20657 int n_elts = GET_MODE_NUNITS (mode);
20658 int n_var = 0, one_var = -1;
20659 bool all_same = true, all_const_zero = true;
20663 for (i = 0; i < n_elts; ++i)
20665 x = XVECEXP (vals, 0, i);
20666 if (!CONSTANT_P (x))
20667 n_var++, one_var = i;
20668 else if (x != CONST0_RTX (inner_mode))
20669 all_const_zero = false;
20670 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
20674 /* Constants are best loaded from the constant pool. */
20677 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
20681 /* If all values are identical, broadcast the value. */
20683 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
20684 XVECEXP (vals, 0, 0)))
20687 /* Values where only one field is non-constant are best loaded from
20688 the pool and overwritten via move later. */
20692 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
20693 XVECEXP (vals, 0, one_var),
20697 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
20701 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
20705 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
20707 enum machine_mode mode = GET_MODE (target);
20708 enum machine_mode inner_mode = GET_MODE_INNER (mode);
20709 bool use_vec_merge = false;
20718 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
20719 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
20721 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
20723 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
20724 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
20734 /* For the two element vectors, we implement a VEC_CONCAT with
20735 the extraction of the other element. */
20737 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
20738 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
20741 op0 = val, op1 = tmp;
20743 op0 = tmp, op1 = val;
20745 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
20746 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
20754 use_vec_merge = true;
20758 /* tmp = target = A B C D */
20759 tmp = copy_to_reg (target);
20760 /* target = A A B B */
20761 emit_insn (gen_sse_unpcklps (target, target, target));
20762 /* target = X A B B */
20763 ix86_expand_vector_set (false, target, val, 0);
20764 /* target = A X C D */
20765 emit_insn (gen_sse_shufps_1 (target, target, tmp,
20766 GEN_INT (1), GEN_INT (0),
20767 GEN_INT (2+4), GEN_INT (3+4)));
20771 /* tmp = target = A B C D */
20772 tmp = copy_to_reg (target);
20773 /* tmp = X B C D */
20774 ix86_expand_vector_set (false, tmp, val, 0);
20775 /* target = A B X D */
20776 emit_insn (gen_sse_shufps_1 (target, target, tmp,
20777 GEN_INT (0), GEN_INT (1),
20778 GEN_INT (0+4), GEN_INT (3+4)));
20782 /* tmp = target = A B C D */
20783 tmp = copy_to_reg (target);
20784 /* tmp = X B C D */
20785 ix86_expand_vector_set (false, tmp, val, 0);
20786 /* target = A B X D */
20787 emit_insn (gen_sse_shufps_1 (target, target, tmp,
20788 GEN_INT (0), GEN_INT (1),
20789 GEN_INT (2+4), GEN_INT (0+4)));
20793 gcc_unreachable ();
20798 /* Element 0 handled by vec_merge below. */
20801 use_vec_merge = true;
20807 /* With SSE2, use integer shuffles to swap element 0 and ELT,
20808 store into element 0, then shuffle them back. */
20812 order[0] = GEN_INT (elt);
20813 order[1] = const1_rtx;
20814 order[2] = const2_rtx;
20815 order[3] = GEN_INT (3);
20816 order[elt] = const0_rtx;
20818 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
20819 order[1], order[2], order[3]));
20821 ix86_expand_vector_set (false, target, val, 0);
20823 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
20824 order[1], order[2], order[3]));
20828 /* For SSE1, we have to reuse the V4SF code. */
20829 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
20830 gen_lowpart (SFmode, val), elt);
20835 use_vec_merge = TARGET_SSE2;
20838 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
20849 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
20850 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
20851 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
20855 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
20857 emit_move_insn (mem, target);
20859 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
20860 emit_move_insn (tmp, val);
20862 emit_move_insn (target, mem);
20867 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
20869 enum machine_mode mode = GET_MODE (vec);
20870 enum machine_mode inner_mode = GET_MODE_INNER (mode);
20871 bool use_vec_extr = false;
20884 use_vec_extr = true;
20896 tmp = gen_reg_rtx (mode);
20897 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
20898 GEN_INT (elt), GEN_INT (elt),
20899 GEN_INT (elt+4), GEN_INT (elt+4)));
20903 tmp = gen_reg_rtx (mode);
20904 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
20908 gcc_unreachable ();
20911 use_vec_extr = true;
20926 tmp = gen_reg_rtx (mode);
20927 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
20928 GEN_INT (elt), GEN_INT (elt),
20929 GEN_INT (elt), GEN_INT (elt)));
20933 tmp = gen_reg_rtx (mode);
20934 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
20938 gcc_unreachable ();
20941 use_vec_extr = true;
20946 /* For SSE1, we have to reuse the V4SF code. */
20947 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
20948 gen_lowpart (V4SFmode, vec), elt);
20954 use_vec_extr = TARGET_SSE2;
20957 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
20962 /* ??? Could extract the appropriate HImode element and shift. */
20969 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
20970 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
20972 /* Let the rtl optimizers know about the zero extension performed. */
20973 if (inner_mode == HImode)
20975 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
20976 target = gen_lowpart (SImode, target);
20979 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
20983 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
20985 emit_move_insn (mem, vec);
20987 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
20988 emit_move_insn (target, tmp);
20992 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
20993 pattern to reduce; DEST is the destination; IN is the input vector. */
20996 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
20998 rtx tmp1, tmp2, tmp3;
21000 tmp1 = gen_reg_rtx (V4SFmode);
21001 tmp2 = gen_reg_rtx (V4SFmode);
21002 tmp3 = gen_reg_rtx (V4SFmode);
21004 emit_insn (gen_sse_movhlps (tmp1, in, in));
21005 emit_insn (fn (tmp2, tmp1, in));
21007 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
21008 GEN_INT (1), GEN_INT (1),
21009 GEN_INT (1+4), GEN_INT (1+4)));
21010 emit_insn (fn (dest, tmp2, tmp3));
21013 /* Target hook for scalar_mode_supported_p. */
21015 ix86_scalar_mode_supported_p (enum machine_mode mode)
21017 if (DECIMAL_FLOAT_MODE_P (mode))
21020 return default_scalar_mode_supported_p (mode);
21023 /* Implements target hook vector_mode_supported_p. */
21025 ix86_vector_mode_supported_p (enum machine_mode mode)
21027 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
21029 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
21031 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
21033 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
21038 /* Worker function for TARGET_MD_ASM_CLOBBERS.
21040 We do this in the new i386 backend to maintain source compatibility
21041 with the old cc0-based compiler. */
21044 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
21045 tree inputs ATTRIBUTE_UNUSED,
21048 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
21050 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
21055 /* Implementes target vector targetm.asm.encode_section_info. This
21056 is not used by netware. */
21058 static void ATTRIBUTE_UNUSED
21059 ix86_encode_section_info (tree decl, rtx rtl, int first)
21061 default_encode_section_info (decl, rtl, first);
21063 if (TREE_CODE (decl) == VAR_DECL
21064 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
21065 && ix86_in_large_data_p (decl))
21066 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
21069 /* Worker function for REVERSE_CONDITION. */
21072 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
21074 return (mode != CCFPmode && mode != CCFPUmode
21075 ? reverse_condition (code)
21076 : reverse_condition_maybe_unordered (code));
21079 /* Output code to perform an x87 FP register move, from OPERANDS[1]
21083 output_387_reg_move (rtx insn, rtx *operands)
21085 if (REG_P (operands[1])
21086 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
21088 if (REGNO (operands[0]) == FIRST_STACK_REG)
21089 return output_387_ffreep (operands, 0);
21090 return "fstp\t%y0";
21092 if (STACK_TOP_P (operands[0]))
21093 return "fld%z1\t%y1";
21097 /* Output code to perform a conditional jump to LABEL, if C2 flag in
21098 FP status register is set. */
21101 ix86_emit_fp_unordered_jump (rtx label)
21103 rtx reg = gen_reg_rtx (HImode);
21106 emit_insn (gen_x86_fnstsw_1 (reg));
21108 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_size))
21110 emit_insn (gen_x86_sahf_1 (reg));
21112 temp = gen_rtx_REG (CCmode, FLAGS_REG);
21113 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
21117 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
21119 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21120 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
21123 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
21124 gen_rtx_LABEL_REF (VOIDmode, label),
21126 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
21128 emit_jump_insn (temp);
21129 predict_jump (REG_BR_PROB_BASE * 10 / 100);
21132 /* Output code to perform a log1p XFmode calculation. */
21134 void ix86_emit_i387_log1p (rtx op0, rtx op1)
21136 rtx label1 = gen_label_rtx ();
21137 rtx label2 = gen_label_rtx ();
21139 rtx tmp = gen_reg_rtx (XFmode);
21140 rtx tmp2 = gen_reg_rtx (XFmode);
21142 emit_insn (gen_absxf2 (tmp, op1));
21143 emit_insn (gen_cmpxf (tmp,
21144 CONST_DOUBLE_FROM_REAL_VALUE (
21145 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
21147 emit_jump_insn (gen_bge (label1));
21149 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
21150 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
21151 emit_jump (label2);
21153 emit_label (label1);
21154 emit_move_insn (tmp, CONST1_RTX (XFmode));
21155 emit_insn (gen_addxf3 (tmp, op1, tmp));
21156 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
21157 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
21159 emit_label (label2);
21162 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
21164 static void ATTRIBUTE_UNUSED
21165 i386_solaris_elf_named_section (const char *name, unsigned int flags,
21168 /* With Binutils 2.15, the "@unwind" marker must be specified on
21169 every occurrence of the ".eh_frame" section, not just the first
21172 && strcmp (name, ".eh_frame") == 0)
21174 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
21175 flags & SECTION_WRITE ? "aw" : "a");
21178 default_elf_asm_named_section (name, flags, decl);
21181 /* Return the mangling of TYPE if it is an extended fundamental type. */
21183 static const char *
21184 ix86_mangle_fundamental_type (tree type)
21186 switch (TYPE_MODE (type))
21189 /* __float128 is "g". */
21192 /* "long double" or __float80 is "e". */
21199 /* For 32-bit code we can save PIC register setup by using
21200 __stack_chk_fail_local hidden function instead of calling
21201 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
21202 register, so it is better to call __stack_chk_fail directly. */
21205 ix86_stack_protect_fail (void)
21207 return TARGET_64BIT
21208 ? default_external_stack_protect_fail ()
21209 : default_hidden_stack_protect_fail ();
21212 /* Select a format to encode pointers in exception handling data. CODE
21213 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
21214 true if the symbol may be affected by dynamic relocations.
21216 ??? All x86 object file formats are capable of representing this.
21217 After all, the relocation needed is the same as for the call insn.
21218 Whether or not a particular assembler allows us to enter such, I
21219 guess we'll have to see. */
21221 asm_preferred_eh_data_format (int code, int global)
21225 int type = DW_EH_PE_sdata8;
21227 || ix86_cmodel == CM_SMALL_PIC
21228 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
21229 type = DW_EH_PE_sdata4;
21230 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
21232 if (ix86_cmodel == CM_SMALL
21233 || (ix86_cmodel == CM_MEDIUM && code))
21234 return DW_EH_PE_udata4;
21235 return DW_EH_PE_absptr;
21238 /* Expand copysign from SIGN to the positive value ABS_VALUE
21239 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
21242 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
21244 enum machine_mode mode = GET_MODE (sign);
21245 rtx sgn = gen_reg_rtx (mode);
21246 if (mask == NULL_RTX)
21248 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
21249 if (!VECTOR_MODE_P (mode))
21251 /* We need to generate a scalar mode mask in this case. */
21252 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
21253 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
21254 mask = gen_reg_rtx (mode);
21255 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
21259 mask = gen_rtx_NOT (mode, mask);
21260 emit_insn (gen_rtx_SET (VOIDmode, sgn,
21261 gen_rtx_AND (mode, mask, sign)));
21262 emit_insn (gen_rtx_SET (VOIDmode, result,
21263 gen_rtx_IOR (mode, abs_value, sgn)));
21266 /* Expand fabs (OP0) and return a new rtx that holds the result. The
21267 mask for masking out the sign-bit is stored in *SMASK, if that is
21270 ix86_expand_sse_fabs (rtx op0, rtx *smask)
21272 enum machine_mode mode = GET_MODE (op0);
21275 xa = gen_reg_rtx (mode);
21276 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
21277 if (!VECTOR_MODE_P (mode))
21279 /* We need to generate a scalar mode mask in this case. */
21280 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
21281 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
21282 mask = gen_reg_rtx (mode);
21283 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
21285 emit_insn (gen_rtx_SET (VOIDmode, xa,
21286 gen_rtx_AND (mode, op0, mask)));
21294 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
21295 swapping the operands if SWAP_OPERANDS is true. The expanded
21296 code is a forward jump to a newly created label in case the
21297 comparison is true. The generated label rtx is returned. */
21299 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
21300 bool swap_operands)
21311 label = gen_label_rtx ();
21312 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
21313 emit_insn (gen_rtx_SET (VOIDmode, tmp,
21314 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
21315 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
21316 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
21317 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
21318 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
21319 JUMP_LABEL (tmp) = label;
21324 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
21325 using comparison code CODE. Operands are swapped for the comparison if
21326 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
21328 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
21329 bool swap_operands)
21331 enum machine_mode mode = GET_MODE (op0);
21332 rtx mask = gen_reg_rtx (mode);
21341 if (mode == DFmode)
21342 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
21343 gen_rtx_fmt_ee (code, mode, op0, op1)));
21345 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
21346 gen_rtx_fmt_ee (code, mode, op0, op1)));
21351 /* Generate and return a rtx of mode MODE for 2**n where n is the number
21352 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
21354 ix86_gen_TWO52 (enum machine_mode mode)
21356 REAL_VALUE_TYPE TWO52r;
21359 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
21360 TWO52 = const_double_from_real_value (TWO52r, mode);
21361 TWO52 = force_reg (mode, TWO52);
21366 /* Expand SSE sequence for computing lround from OP1 storing
21369 ix86_expand_lround (rtx op0, rtx op1)
21371 /* C code for the stuff we're doing below:
21372 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
21375 enum machine_mode mode = GET_MODE (op1);
21376 const struct real_format *fmt;
21377 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
21380 /* load nextafter (0.5, 0.0) */
21381 fmt = REAL_MODE_FORMAT (mode);
21382 real_2expN (&half_minus_pred_half, -(fmt->p) - 1);
21383 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
21385 /* adj = copysign (0.5, op1) */
21386 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
21387 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
21389 /* adj = op1 + adj */
21390 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
21392 /* op0 = (imode)adj */
21393 expand_fix (op0, adj, 0);
21396 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
21399 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
21401 /* C code for the stuff we're doing below (for do_floor):
21403 xi -= (double)xi > op1 ? 1 : 0;
21406 enum machine_mode fmode = GET_MODE (op1);
21407 enum machine_mode imode = GET_MODE (op0);
21408 rtx ireg, freg, label, tmp;
21410 /* reg = (long)op1 */
21411 ireg = gen_reg_rtx (imode);
21412 expand_fix (ireg, op1, 0);
21414 /* freg = (double)reg */
21415 freg = gen_reg_rtx (fmode);
21416 expand_float (freg, ireg, 0);
21418 /* ireg = (freg > op1) ? ireg - 1 : ireg */
21419 label = ix86_expand_sse_compare_and_jump (UNLE,
21420 freg, op1, !do_floor);
21421 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
21422 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
21423 emit_move_insn (ireg, tmp);
21425 emit_label (label);
21426 LABEL_NUSES (label) = 1;
21428 emit_move_insn (op0, ireg);
21431 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
21432 result in OPERAND0. */
21434 ix86_expand_rint (rtx operand0, rtx operand1)
21436 /* C code for the stuff we're doing below:
21437 xa = fabs (operand1);
21438 if (!isless (xa, 2**52))
21440 xa = xa + 2**52 - 2**52;
21441 return copysign (xa, operand1);
21443 enum machine_mode mode = GET_MODE (operand0);
21444 rtx res, xa, label, TWO52, mask;
21446 res = gen_reg_rtx (mode);
21447 emit_move_insn (res, operand1);
21449 /* xa = abs (operand1) */
21450 xa = ix86_expand_sse_fabs (res, &mask);
21452 /* if (!isless (xa, TWO52)) goto label; */
21453 TWO52 = ix86_gen_TWO52 (mode);
21454 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
21456 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
21457 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
21459 ix86_sse_copysign_to_positive (res, xa, res, mask);
21461 emit_label (label);
21462 LABEL_NUSES (label) = 1;
21464 emit_move_insn (operand0, res);
21467 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
21470 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
21472 /* C code for the stuff we expand below.
21473 double xa = fabs (x), x2;
21474 if (!isless (xa, TWO52))
21476 xa = xa + TWO52 - TWO52;
21477 x2 = copysign (xa, x);
21486 enum machine_mode mode = GET_MODE (operand0);
21487 rtx xa, TWO52, tmp, label, one, res, mask;
21489 TWO52 = ix86_gen_TWO52 (mode);
21491 /* Temporary for holding the result, initialized to the input
21492 operand to ease control flow. */
21493 res = gen_reg_rtx (mode);
21494 emit_move_insn (res, operand1);
21496 /* xa = abs (operand1) */
21497 xa = ix86_expand_sse_fabs (res, &mask);
21499 /* if (!isless (xa, TWO52)) goto label; */
21500 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
21502 /* xa = xa + TWO52 - TWO52; */
21503 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
21504 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
21506 /* xa = copysign (xa, operand1) */
21507 ix86_sse_copysign_to_positive (xa, xa, res, mask);
21509 /* generate 1.0 or -1.0 */
21510 one = force_reg (mode,
21511 const_double_from_real_value (do_floor
21512 ? dconst1 : dconstm1, mode));
21514 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
21515 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
21516 emit_insn (gen_rtx_SET (VOIDmode, tmp,
21517 gen_rtx_AND (mode, one, tmp)));
21518 /* We always need to subtract here to preserve signed zero. */
21519 tmp = expand_simple_binop (mode, MINUS,
21520 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
21521 emit_move_insn (res, tmp);
21523 emit_label (label);
21524 LABEL_NUSES (label) = 1;
21526 emit_move_insn (operand0, res);
21529 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
21532 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
21534 /* C code for the stuff we expand below.
21535 double xa = fabs (x), x2;
21536 if (!isless (xa, TWO52))
21538 x2 = (double)(long)x;
21545 if (HONOR_SIGNED_ZEROS (mode))
21546 return copysign (x2, x);
21549 enum machine_mode mode = GET_MODE (operand0);
21550 rtx xa, xi, TWO52, tmp, label, one, res, mask;
21552 TWO52 = ix86_gen_TWO52 (mode);
21554 /* Temporary for holding the result, initialized to the input
21555 operand to ease control flow. */
21556 res = gen_reg_rtx (mode);
21557 emit_move_insn (res, operand1);
21559 /* xa = abs (operand1) */
21560 xa = ix86_expand_sse_fabs (res, &mask);
21562 /* if (!isless (xa, TWO52)) goto label; */
21563 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
21565 /* xa = (double)(long)x */
21566 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
21567 expand_fix (xi, res, 0);
21568 expand_float (xa, xi, 0);
21571 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
21573 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
21574 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
21575 emit_insn (gen_rtx_SET (VOIDmode, tmp,
21576 gen_rtx_AND (mode, one, tmp)));
21577 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
21578 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
21579 emit_move_insn (res, tmp);
21581 if (HONOR_SIGNED_ZEROS (mode))
21582 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
21584 emit_label (label);
21585 LABEL_NUSES (label) = 1;
21587 emit_move_insn (operand0, res);
21590 /* Expand SSE sequence for computing round from OPERAND1 storing
21591 into OPERAND0. Sequence that works without relying on DImode truncation
21592 via cvttsd2siq that is only available on 64bit targets. */
21594 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
21596 /* C code for the stuff we expand below.
21597 double xa = fabs (x), xa2, x2;
21598 if (!isless (xa, TWO52))
21600 Using the absolute value and copying back sign makes
21601 -0.0 -> -0.0 correct.
21602 xa2 = xa + TWO52 - TWO52;
21607 else if (dxa > 0.5)
21609 x2 = copysign (xa2, x);
21612 enum machine_mode mode = GET_MODE (operand0);
21613 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
21615 TWO52 = ix86_gen_TWO52 (mode);
21617 /* Temporary for holding the result, initialized to the input
21618 operand to ease control flow. */
21619 res = gen_reg_rtx (mode);
21620 emit_move_insn (res, operand1);
21622 /* xa = abs (operand1) */
21623 xa = ix86_expand_sse_fabs (res, &mask);
21625 /* if (!isless (xa, TWO52)) goto label; */
21626 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
21628 /* xa2 = xa + TWO52 - TWO52; */
21629 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
21630 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
21632 /* dxa = xa2 - xa; */
21633 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
21635 /* generate 0.5, 1.0 and -0.5 */
21636 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
21637 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
21638 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
21642 tmp = gen_reg_rtx (mode);
21643 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
21644 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
21645 emit_insn (gen_rtx_SET (VOIDmode, tmp,
21646 gen_rtx_AND (mode, one, tmp)));
21647 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
21648 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
21649 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
21650 emit_insn (gen_rtx_SET (VOIDmode, tmp,
21651 gen_rtx_AND (mode, one, tmp)));
21652 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
21654 /* res = copysign (xa2, operand1) */
21655 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
21657 emit_label (label);
21658 LABEL_NUSES (label) = 1;
21660 emit_move_insn (operand0, res);
21663 /* Expand SSE sequence for computing trunc from OPERAND1 storing
21666 ix86_expand_trunc (rtx operand0, rtx operand1)
21668 /* C code for SSE variant we expand below.
21669 double xa = fabs (x), x2;
21670 if (!isless (xa, TWO52))
21672 x2 = (double)(long)x;
21673 if (HONOR_SIGNED_ZEROS (mode))
21674 return copysign (x2, x);
21677 enum machine_mode mode = GET_MODE (operand0);
21678 rtx xa, xi, TWO52, label, res, mask;
21680 TWO52 = ix86_gen_TWO52 (mode);
21682 /* Temporary for holding the result, initialized to the input
21683 operand to ease control flow. */
21684 res = gen_reg_rtx (mode);
21685 emit_move_insn (res, operand1);
21687 /* xa = abs (operand1) */
21688 xa = ix86_expand_sse_fabs (res, &mask);
21690 /* if (!isless (xa, TWO52)) goto label; */
21691 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
21693 /* x = (double)(long)x */
21694 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
21695 expand_fix (xi, res, 0);
21696 expand_float (res, xi, 0);
21698 if (HONOR_SIGNED_ZEROS (mode))
21699 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
21701 emit_label (label);
21702 LABEL_NUSES (label) = 1;
21704 emit_move_insn (operand0, res);
21707 /* Expand SSE sequence for computing trunc from OPERAND1 storing
21710 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
21712 enum machine_mode mode = GET_MODE (operand0);
21713 rtx xa, mask, TWO52, label, one, res, smask, tmp;
21715 /* C code for SSE variant we expand below.
21716 double xa = fabs (x), x2;
21717 if (!isless (xa, TWO52))
21719 xa2 = xa + TWO52 - TWO52;
21723 x2 = copysign (xa2, x);
21727 TWO52 = ix86_gen_TWO52 (mode);
21729 /* Temporary for holding the result, initialized to the input
21730 operand to ease control flow. */
21731 res = gen_reg_rtx (mode);
21732 emit_move_insn (res, operand1);
21734 /* xa = abs (operand1) */
21735 xa = ix86_expand_sse_fabs (res, &smask);
21737 /* if (!isless (xa, TWO52)) goto label; */
21738 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
21740 /* res = xa + TWO52 - TWO52; */
21741 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
21742 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
21743 emit_move_insn (res, tmp);
21746 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
21748 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
21749 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
21750 emit_insn (gen_rtx_SET (VOIDmode, mask,
21751 gen_rtx_AND (mode, mask, one)));
21752 tmp = expand_simple_binop (mode, MINUS,
21753 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
21754 emit_move_insn (res, tmp);
21756 /* res = copysign (res, operand1) */
21757 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
21759 emit_label (label);
21760 LABEL_NUSES (label) = 1;
21762 emit_move_insn (operand0, res);
21765 /* Expand SSE sequence for computing round from OPERAND1 storing
21768 ix86_expand_round (rtx operand0, rtx operand1)
21770 /* C code for the stuff we're doing below:
21771 double xa = fabs (x);
21772 if (!isless (xa, TWO52))
21774 xa = (double)(long)(xa + nextafter (0.5, 0.0));
21775 return copysign (xa, x);
21777 enum machine_mode mode = GET_MODE (operand0);
21778 rtx res, TWO52, xa, label, xi, half, mask;
21779 const struct real_format *fmt;
21780 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
21782 /* Temporary for holding the result, initialized to the input
21783 operand to ease control flow. */
21784 res = gen_reg_rtx (mode);
21785 emit_move_insn (res, operand1);
21787 TWO52 = ix86_gen_TWO52 (mode);
21788 xa = ix86_expand_sse_fabs (res, &mask);
21789 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
21791 /* load nextafter (0.5, 0.0) */
21792 fmt = REAL_MODE_FORMAT (mode);
21793 real_2expN (&half_minus_pred_half, -(fmt->p) - 1);
21794 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
21796 /* xa = xa + 0.5 */
21797 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
21798 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
21800 /* xa = (double)(int64_t)xa */
21801 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
21802 expand_fix (xi, xa, 0);
21803 expand_float (xa, xi, 0);
21805 /* res = copysign (xa, operand1) */
21806 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
21808 emit_label (label);
21809 LABEL_NUSES (label) = 1;
21811 emit_move_insn (operand0, res);
21815 /* Table of valid machine attributes. */
21816 static const struct attribute_spec ix86_attribute_table[] =
21818 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
21819 /* Stdcall attribute says callee is responsible for popping arguments
21820 if they are not variable. */
21821 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
21822 /* Fastcall attribute says callee is responsible for popping arguments
21823 if they are not variable. */
21824 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
21825 /* Cdecl attribute says the callee is a normal C declaration */
21826 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
21827 /* Regparm attribute specifies how many integer arguments are to be
21828 passed in registers. */
21829 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
21830 /* Sseregparm attribute says we are using x86_64 calling conventions
21831 for FP arguments. */
21832 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
21833 /* force_align_arg_pointer says this function realigns the stack at entry. */
21834 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
21835 false, true, true, ix86_handle_cconv_attribute },
21836 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
21837 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
21838 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
21839 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
21841 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
21842 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
21843 #ifdef SUBTARGET_ATTRIBUTE_TABLE
21844 SUBTARGET_ATTRIBUTE_TABLE,
21846 { NULL, 0, 0, false, false, false, NULL }
21849 /* Initialize the GCC target structure. */
21850 #undef TARGET_ATTRIBUTE_TABLE
21851 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
21852 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
21853 # undef TARGET_MERGE_DECL_ATTRIBUTES
21854 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
21857 #undef TARGET_COMP_TYPE_ATTRIBUTES
21858 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
21860 #undef TARGET_INIT_BUILTINS
21861 #define TARGET_INIT_BUILTINS ix86_init_builtins
21862 #undef TARGET_EXPAND_BUILTIN
21863 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
21865 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
21866 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION ix86_builtin_vectorized_function
21867 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
21868 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_builtin_conversion
21870 #undef TARGET_ASM_FUNCTION_EPILOGUE
21871 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
21873 #undef TARGET_ENCODE_SECTION_INFO
21874 #ifndef SUBTARGET_ENCODE_SECTION_INFO
21875 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
21877 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
21880 #undef TARGET_ASM_OPEN_PAREN
21881 #define TARGET_ASM_OPEN_PAREN ""
21882 #undef TARGET_ASM_CLOSE_PAREN
21883 #define TARGET_ASM_CLOSE_PAREN ""
21885 #undef TARGET_ASM_ALIGNED_HI_OP
21886 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
21887 #undef TARGET_ASM_ALIGNED_SI_OP
21888 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
21890 #undef TARGET_ASM_ALIGNED_DI_OP
21891 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
21894 #undef TARGET_ASM_UNALIGNED_HI_OP
21895 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
21896 #undef TARGET_ASM_UNALIGNED_SI_OP
21897 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
21898 #undef TARGET_ASM_UNALIGNED_DI_OP
21899 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
21901 #undef TARGET_SCHED_ADJUST_COST
21902 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
21903 #undef TARGET_SCHED_ISSUE_RATE
21904 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
21905 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
21906 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
21907 ia32_multipass_dfa_lookahead
21909 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
21910 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
21913 #undef TARGET_HAVE_TLS
21914 #define TARGET_HAVE_TLS true
21916 #undef TARGET_CANNOT_FORCE_CONST_MEM
21917 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
21918 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
21919 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_rtx_true
21921 #undef TARGET_DELEGITIMIZE_ADDRESS
21922 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
21924 #undef TARGET_MS_BITFIELD_LAYOUT_P
21925 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
21928 #undef TARGET_BINDS_LOCAL_P
21929 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
21931 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
21932 #undef TARGET_BINDS_LOCAL_P
21933 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
21936 #undef TARGET_ASM_OUTPUT_MI_THUNK
21937 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
21938 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
21939 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
21941 #undef TARGET_ASM_FILE_START
21942 #define TARGET_ASM_FILE_START x86_file_start
21944 #undef TARGET_DEFAULT_TARGET_FLAGS
21945 #define TARGET_DEFAULT_TARGET_FLAGS \
21947 | TARGET_64BIT_DEFAULT \
21948 | TARGET_SUBTARGET_DEFAULT \
21949 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
21951 #undef TARGET_HANDLE_OPTION
21952 #define TARGET_HANDLE_OPTION ix86_handle_option
21954 #undef TARGET_RTX_COSTS
21955 #define TARGET_RTX_COSTS ix86_rtx_costs
21956 #undef TARGET_ADDRESS_COST
21957 #define TARGET_ADDRESS_COST ix86_address_cost
21959 #undef TARGET_FIXED_CONDITION_CODE_REGS
21960 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
21961 #undef TARGET_CC_MODES_COMPATIBLE
21962 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
21964 #undef TARGET_MACHINE_DEPENDENT_REORG
21965 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
21967 #undef TARGET_BUILD_BUILTIN_VA_LIST
21968 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
21970 #undef TARGET_MD_ASM_CLOBBERS
21971 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
21973 #undef TARGET_PROMOTE_PROTOTYPES
21974 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
21975 #undef TARGET_STRUCT_VALUE_RTX
21976 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
21977 #undef TARGET_SETUP_INCOMING_VARARGS
21978 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
21979 #undef TARGET_MUST_PASS_IN_STACK
21980 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
21981 #undef TARGET_PASS_BY_REFERENCE
21982 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
21983 #undef TARGET_INTERNAL_ARG_POINTER
21984 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
21985 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
21986 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
21987 #undef TARGET_STRICT_ARGUMENT_NAMING
21988 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
21990 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
21991 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
21993 #undef TARGET_SCALAR_MODE_SUPPORTED_P
21994 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
21996 #undef TARGET_VECTOR_MODE_SUPPORTED_P
21997 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
22000 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
22001 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
22004 #ifdef SUBTARGET_INSERT_ATTRIBUTES
22005 #undef TARGET_INSERT_ATTRIBUTES
22006 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
22009 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
22010 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ix86_mangle_fundamental_type
22012 #undef TARGET_STACK_PROTECT_FAIL
22013 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
22015 #undef TARGET_FUNCTION_VALUE
22016 #define TARGET_FUNCTION_VALUE ix86_function_value
22018 struct gcc_target targetm = TARGET_INITIALIZER;
22020 #include "gt-i386.h"