1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
34 #include "insn-codes.h"
35 #include "insn-attr.h"
42 #include "diagnostic-core.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "langhooks.h"
53 #include "tm-constrs.h"
57 #include "dwarf2out.h"
58 #include "sched-int.h"
60 typedef struct block_info_def
62 /* TRUE if the upper 128bits of any AVX registers are live at exit. */
63 bool upper_128bits_set;
64 /* TRUE if block has been processed. */
68 #define BLOCK_INFO(B) ((block_info) (B)->aux)
70 enum call_avx256_state
72 /* Callee returns 256bit AVX register. */
73 callee_return_avx256 = -1,
74 /* Callee returns and passes 256bit AVX register. */
75 callee_return_pass_avx256,
76 /* Callee passes 256bit AVX register. */
78 /* Callee doesn't return nor passe 256bit AVX register, or no
79 256bit AVX register in function return. */
81 /* vzeroupper intrinsic. */
85 /* Check if a 256bit AVX register is referenced in stores. */
88 check_avx256_stores (rtx dest, const_rtx set, void *data)
91 && VALID_AVX256_REG_MODE (GET_MODE (dest)))
92 || (GET_CODE (set) == SET
93 && REG_P (SET_SRC (set))
94 && VALID_AVX256_REG_MODE (GET_MODE (SET_SRC (set)))))
96 bool *upper_128bits_set = (bool *) data;
97 *upper_128bits_set = true;
101 /* Helper function for move_or_delete_vzeroupper_1. Look for vzeroupper
102 in basic block BB. Delete it if upper 128bit AVX registers are
103 unused. If it isn't deleted, move it to just before a jump insn.
105 UPPER_128BITS_LIVE is TRUE if the upper 128bits of any AVX registers
106 are live at entry. */
109 move_or_delete_vzeroupper_2 (basic_block bb, bool upper_128bits_set)
112 rtx vzeroupper_insn = NULL_RTX;
117 fprintf (dump_file, " BB [%i] entry: upper 128bits: %d\n",
118 bb->index, upper_128bits_set);
121 while (insn != BB_END (bb))
123 insn = NEXT_INSN (insn);
125 if (!NONDEBUG_INSN_P (insn))
128 /* Move vzeroupper before jump/call. */
129 if (JUMP_P (insn) || CALL_P (insn))
131 if (!vzeroupper_insn)
134 if (PREV_INSN (insn) != vzeroupper_insn)
138 fprintf (dump_file, "Move vzeroupper after:\n");
139 print_rtl_single (dump_file, PREV_INSN (insn));
140 fprintf (dump_file, "before:\n");
141 print_rtl_single (dump_file, insn);
143 reorder_insns_nobb (vzeroupper_insn, vzeroupper_insn,
146 vzeroupper_insn = NULL_RTX;
150 pat = PATTERN (insn);
152 /* Check insn for vzeroupper intrinsic. */
153 if (GET_CODE (pat) == UNSPEC_VOLATILE
154 && XINT (pat, 1) == UNSPECV_VZEROUPPER)
158 /* Found vzeroupper intrinsic. */
159 fprintf (dump_file, "Found vzeroupper:\n");
160 print_rtl_single (dump_file, insn);
165 /* Check insn for vzeroall intrinsic. */
166 if (GET_CODE (pat) == PARALLEL
167 && GET_CODE (XVECEXP (pat, 0, 0)) == UNSPEC_VOLATILE
168 && XINT (XVECEXP (pat, 0, 0), 1) == UNSPECV_VZEROALL)
170 upper_128bits_set = false;
172 /* Delete pending vzeroupper insertion. */
175 delete_insn (vzeroupper_insn);
176 vzeroupper_insn = NULL_RTX;
179 else if (!upper_128bits_set)
180 note_stores (pat, check_avx256_stores, &upper_128bits_set);
184 /* Process vzeroupper intrinsic. */
185 avx256 = INTVAL (XVECEXP (pat, 0, 0));
187 if (!upper_128bits_set)
189 /* Since the upper 128bits are cleared, callee must not pass
190 256bit AVX register. We only need to check if callee
191 returns 256bit AVX register. */
192 upper_128bits_set = (avx256 == callee_return_avx256);
194 /* Remove unnecessary vzeroupper since
195 upper 128bits are cleared. */
198 fprintf (dump_file, "Delete redundant vzeroupper:\n");
199 print_rtl_single (dump_file, insn);
203 else if (avx256 == callee_return_pass_avx256
204 || avx256 == callee_pass_avx256)
206 /* Callee passes 256bit AVX register. Check if callee
207 returns 256bit AVX register. */
208 upper_128bits_set = (avx256 == callee_return_pass_avx256);
210 /* Must remove vzeroupper since
211 callee passes in 256bit AVX register. */
214 fprintf (dump_file, "Delete callee pass vzeroupper:\n");
215 print_rtl_single (dump_file, insn);
221 upper_128bits_set = false;
222 vzeroupper_insn = insn;
226 BLOCK_INFO (bb)->upper_128bits_set = upper_128bits_set;
229 fprintf (dump_file, " BB [%i] exit: upper 128bits: %d\n",
230 bb->index, upper_128bits_set);
233 /* Helper function for move_or_delete_vzeroupper. Process vzeroupper
234 in BLOCK and its predecessor blocks recursively. */
237 move_or_delete_vzeroupper_1 (basic_block block)
241 bool upper_128bits_set;
244 fprintf (dump_file, " Process BB [%i]: status: %d\n",
245 block->index, BLOCK_INFO (block)->done);
247 if (BLOCK_INFO (block)->done)
250 BLOCK_INFO (block)->done = true;
252 upper_128bits_set = false;
254 /* Process all predecessor edges of this block. */
255 FOR_EACH_EDGE (e, ei, block->preds)
259 move_or_delete_vzeroupper_1 (e->src);
260 if (BLOCK_INFO (e->src)->upper_128bits_set)
261 upper_128bits_set = true;
264 /* Process this block. */
265 move_or_delete_vzeroupper_2 (block, upper_128bits_set);
268 /* Go through the instruction stream looking for vzeroupper. Delete
269 it if upper 128bit AVX registers are unused. If it isn't deleted,
270 move it to just before a jump insn. */
273 move_or_delete_vzeroupper (void)
278 /* Set up block info for each basic block. */
279 alloc_aux_for_blocks (sizeof (struct block_info_def));
281 /* Process successor blocks of all entry points. */
283 fprintf (dump_file, "Process all entry points\n");
285 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
287 move_or_delete_vzeroupper_2 (e->dest,
288 cfun->machine->caller_pass_avx256_p);
289 BLOCK_INFO (e->dest)->done = true;
292 /* Process predecessor blocks of all exit points. */
294 fprintf (dump_file, "Process all exit points\n");
296 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
297 move_or_delete_vzeroupper_1 (e->src);
299 free_aux_for_blocks ();
302 static rtx legitimize_dllimport_symbol (rtx, bool);
304 #ifndef CHECK_STACK_LIMIT
305 #define CHECK_STACK_LIMIT (-1)
308 /* Return index of given mode in mult and division cost tables. */
309 #define MODE_INDEX(mode) \
310 ((mode) == QImode ? 0 \
311 : (mode) == HImode ? 1 \
312 : (mode) == SImode ? 2 \
313 : (mode) == DImode ? 3 \
316 /* Processor costs (relative to an add) */
317 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
318 #define COSTS_N_BYTES(N) ((N) * 2)
320 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
323 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
324 COSTS_N_BYTES (2), /* cost of an add instruction */
325 COSTS_N_BYTES (3), /* cost of a lea instruction */
326 COSTS_N_BYTES (2), /* variable shift costs */
327 COSTS_N_BYTES (3), /* constant shift costs */
328 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
329 COSTS_N_BYTES (3), /* HI */
330 COSTS_N_BYTES (3), /* SI */
331 COSTS_N_BYTES (3), /* DI */
332 COSTS_N_BYTES (5)}, /* other */
333 0, /* cost of multiply per each bit set */
334 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
335 COSTS_N_BYTES (3), /* HI */
336 COSTS_N_BYTES (3), /* SI */
337 COSTS_N_BYTES (3), /* DI */
338 COSTS_N_BYTES (5)}, /* other */
339 COSTS_N_BYTES (3), /* cost of movsx */
340 COSTS_N_BYTES (3), /* cost of movzx */
341 0, /* "large" insn */
343 2, /* cost for loading QImode using movzbl */
344 {2, 2, 2}, /* cost of loading integer registers
345 in QImode, HImode and SImode.
346 Relative to reg-reg move (2). */
347 {2, 2, 2}, /* cost of storing integer registers */
348 2, /* cost of reg,reg fld/fst */
349 {2, 2, 2}, /* cost of loading fp registers
350 in SFmode, DFmode and XFmode */
351 {2, 2, 2}, /* cost of storing fp registers
352 in SFmode, DFmode and XFmode */
353 3, /* cost of moving MMX register */
354 {3, 3}, /* cost of loading MMX registers
355 in SImode and DImode */
356 {3, 3}, /* cost of storing MMX registers
357 in SImode and DImode */
358 3, /* cost of moving SSE register */
359 {3, 3, 3}, /* cost of loading SSE registers
360 in SImode, DImode and TImode */
361 {3, 3, 3}, /* cost of storing SSE registers
362 in SImode, DImode and TImode */
363 3, /* MMX or SSE register to integer */
364 0, /* size of l1 cache */
365 0, /* size of l2 cache */
366 0, /* size of prefetch block */
367 0, /* number of parallel prefetches */
369 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
370 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
371 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
372 COSTS_N_BYTES (2), /* cost of FABS instruction. */
373 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
374 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
375 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
376 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
377 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
378 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
379 1, /* scalar_stmt_cost. */
380 1, /* scalar load_cost. */
381 1, /* scalar_store_cost. */
382 1, /* vec_stmt_cost. */
383 1, /* vec_to_scalar_cost. */
384 1, /* scalar_to_vec_cost. */
385 1, /* vec_align_load_cost. */
386 1, /* vec_unalign_load_cost. */
387 1, /* vec_store_cost. */
388 1, /* cond_taken_branch_cost. */
389 1, /* cond_not_taken_branch_cost. */
392 /* Processor costs (relative to an add) */
394 struct processor_costs i386_cost = { /* 386 specific costs */
395 COSTS_N_INSNS (1), /* cost of an add instruction */
396 COSTS_N_INSNS (1), /* cost of a lea instruction */
397 COSTS_N_INSNS (3), /* variable shift costs */
398 COSTS_N_INSNS (2), /* constant shift costs */
399 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
400 COSTS_N_INSNS (6), /* HI */
401 COSTS_N_INSNS (6), /* SI */
402 COSTS_N_INSNS (6), /* DI */
403 COSTS_N_INSNS (6)}, /* other */
404 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
405 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
406 COSTS_N_INSNS (23), /* HI */
407 COSTS_N_INSNS (23), /* SI */
408 COSTS_N_INSNS (23), /* DI */
409 COSTS_N_INSNS (23)}, /* other */
410 COSTS_N_INSNS (3), /* cost of movsx */
411 COSTS_N_INSNS (2), /* cost of movzx */
412 15, /* "large" insn */
414 4, /* cost for loading QImode using movzbl */
415 {2, 4, 2}, /* cost of loading integer registers
416 in QImode, HImode and SImode.
417 Relative to reg-reg move (2). */
418 {2, 4, 2}, /* cost of storing integer registers */
419 2, /* cost of reg,reg fld/fst */
420 {8, 8, 8}, /* cost of loading fp registers
421 in SFmode, DFmode and XFmode */
422 {8, 8, 8}, /* cost of storing fp registers
423 in SFmode, DFmode and XFmode */
424 2, /* cost of moving MMX register */
425 {4, 8}, /* cost of loading MMX registers
426 in SImode and DImode */
427 {4, 8}, /* cost of storing MMX registers
428 in SImode and DImode */
429 2, /* cost of moving SSE register */
430 {4, 8, 16}, /* cost of loading SSE registers
431 in SImode, DImode and TImode */
432 {4, 8, 16}, /* cost of storing SSE registers
433 in SImode, DImode and TImode */
434 3, /* MMX or SSE register to integer */
435 0, /* size of l1 cache */
436 0, /* size of l2 cache */
437 0, /* size of prefetch block */
438 0, /* number of parallel prefetches */
440 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
441 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
442 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
443 COSTS_N_INSNS (22), /* cost of FABS instruction. */
444 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
445 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
446 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
447 DUMMY_STRINGOP_ALGS},
448 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
449 DUMMY_STRINGOP_ALGS},
450 1, /* scalar_stmt_cost. */
451 1, /* scalar load_cost. */
452 1, /* scalar_store_cost. */
453 1, /* vec_stmt_cost. */
454 1, /* vec_to_scalar_cost. */
455 1, /* scalar_to_vec_cost. */
456 1, /* vec_align_load_cost. */
457 2, /* vec_unalign_load_cost. */
458 1, /* vec_store_cost. */
459 3, /* cond_taken_branch_cost. */
460 1, /* cond_not_taken_branch_cost. */
464 struct processor_costs i486_cost = { /* 486 specific costs */
465 COSTS_N_INSNS (1), /* cost of an add instruction */
466 COSTS_N_INSNS (1), /* cost of a lea instruction */
467 COSTS_N_INSNS (3), /* variable shift costs */
468 COSTS_N_INSNS (2), /* constant shift costs */
469 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
470 COSTS_N_INSNS (12), /* HI */
471 COSTS_N_INSNS (12), /* SI */
472 COSTS_N_INSNS (12), /* DI */
473 COSTS_N_INSNS (12)}, /* other */
474 1, /* cost of multiply per each bit set */
475 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
476 COSTS_N_INSNS (40), /* HI */
477 COSTS_N_INSNS (40), /* SI */
478 COSTS_N_INSNS (40), /* DI */
479 COSTS_N_INSNS (40)}, /* other */
480 COSTS_N_INSNS (3), /* cost of movsx */
481 COSTS_N_INSNS (2), /* cost of movzx */
482 15, /* "large" insn */
484 4, /* cost for loading QImode using movzbl */
485 {2, 4, 2}, /* cost of loading integer registers
486 in QImode, HImode and SImode.
487 Relative to reg-reg move (2). */
488 {2, 4, 2}, /* cost of storing integer registers */
489 2, /* cost of reg,reg fld/fst */
490 {8, 8, 8}, /* cost of loading fp registers
491 in SFmode, DFmode and XFmode */
492 {8, 8, 8}, /* cost of storing fp registers
493 in SFmode, DFmode and XFmode */
494 2, /* cost of moving MMX register */
495 {4, 8}, /* cost of loading MMX registers
496 in SImode and DImode */
497 {4, 8}, /* cost of storing MMX registers
498 in SImode and DImode */
499 2, /* cost of moving SSE register */
500 {4, 8, 16}, /* cost of loading SSE registers
501 in SImode, DImode and TImode */
502 {4, 8, 16}, /* cost of storing SSE registers
503 in SImode, DImode and TImode */
504 3, /* MMX or SSE register to integer */
505 4, /* size of l1 cache. 486 has 8kB cache
506 shared for code and data, so 4kB is
507 not really precise. */
508 4, /* size of l2 cache */
509 0, /* size of prefetch block */
510 0, /* number of parallel prefetches */
512 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
513 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
514 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
515 COSTS_N_INSNS (3), /* cost of FABS instruction. */
516 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
517 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
518 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
519 DUMMY_STRINGOP_ALGS},
520 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
521 DUMMY_STRINGOP_ALGS},
522 1, /* scalar_stmt_cost. */
523 1, /* scalar load_cost. */
524 1, /* scalar_store_cost. */
525 1, /* vec_stmt_cost. */
526 1, /* vec_to_scalar_cost. */
527 1, /* scalar_to_vec_cost. */
528 1, /* vec_align_load_cost. */
529 2, /* vec_unalign_load_cost. */
530 1, /* vec_store_cost. */
531 3, /* cond_taken_branch_cost. */
532 1, /* cond_not_taken_branch_cost. */
536 struct processor_costs pentium_cost = {
537 COSTS_N_INSNS (1), /* cost of an add instruction */
538 COSTS_N_INSNS (1), /* cost of a lea instruction */
539 COSTS_N_INSNS (4), /* variable shift costs */
540 COSTS_N_INSNS (1), /* constant shift costs */
541 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
542 COSTS_N_INSNS (11), /* HI */
543 COSTS_N_INSNS (11), /* SI */
544 COSTS_N_INSNS (11), /* DI */
545 COSTS_N_INSNS (11)}, /* other */
546 0, /* cost of multiply per each bit set */
547 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
548 COSTS_N_INSNS (25), /* HI */
549 COSTS_N_INSNS (25), /* SI */
550 COSTS_N_INSNS (25), /* DI */
551 COSTS_N_INSNS (25)}, /* other */
552 COSTS_N_INSNS (3), /* cost of movsx */
553 COSTS_N_INSNS (2), /* cost of movzx */
554 8, /* "large" insn */
556 6, /* cost for loading QImode using movzbl */
557 {2, 4, 2}, /* cost of loading integer registers
558 in QImode, HImode and SImode.
559 Relative to reg-reg move (2). */
560 {2, 4, 2}, /* cost of storing integer registers */
561 2, /* cost of reg,reg fld/fst */
562 {2, 2, 6}, /* cost of loading fp registers
563 in SFmode, DFmode and XFmode */
564 {4, 4, 6}, /* cost of storing fp registers
565 in SFmode, DFmode and XFmode */
566 8, /* cost of moving MMX register */
567 {8, 8}, /* cost of loading MMX registers
568 in SImode and DImode */
569 {8, 8}, /* cost of storing MMX registers
570 in SImode and DImode */
571 2, /* cost of moving SSE register */
572 {4, 8, 16}, /* cost of loading SSE registers
573 in SImode, DImode and TImode */
574 {4, 8, 16}, /* cost of storing SSE registers
575 in SImode, DImode and TImode */
576 3, /* MMX or SSE register to integer */
577 8, /* size of l1 cache. */
578 8, /* size of l2 cache */
579 0, /* size of prefetch block */
580 0, /* number of parallel prefetches */
582 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
583 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
584 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
585 COSTS_N_INSNS (1), /* cost of FABS instruction. */
586 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
587 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
588 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
589 DUMMY_STRINGOP_ALGS},
590 {{libcall, {{-1, rep_prefix_4_byte}}},
591 DUMMY_STRINGOP_ALGS},
592 1, /* scalar_stmt_cost. */
593 1, /* scalar load_cost. */
594 1, /* scalar_store_cost. */
595 1, /* vec_stmt_cost. */
596 1, /* vec_to_scalar_cost. */
597 1, /* scalar_to_vec_cost. */
598 1, /* vec_align_load_cost. */
599 2, /* vec_unalign_load_cost. */
600 1, /* vec_store_cost. */
601 3, /* cond_taken_branch_cost. */
602 1, /* cond_not_taken_branch_cost. */
606 struct processor_costs pentiumpro_cost = {
607 COSTS_N_INSNS (1), /* cost of an add instruction */
608 COSTS_N_INSNS (1), /* cost of a lea instruction */
609 COSTS_N_INSNS (1), /* variable shift costs */
610 COSTS_N_INSNS (1), /* constant shift costs */
611 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
612 COSTS_N_INSNS (4), /* HI */
613 COSTS_N_INSNS (4), /* SI */
614 COSTS_N_INSNS (4), /* DI */
615 COSTS_N_INSNS (4)}, /* other */
616 0, /* cost of multiply per each bit set */
617 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
618 COSTS_N_INSNS (17), /* HI */
619 COSTS_N_INSNS (17), /* SI */
620 COSTS_N_INSNS (17), /* DI */
621 COSTS_N_INSNS (17)}, /* other */
622 COSTS_N_INSNS (1), /* cost of movsx */
623 COSTS_N_INSNS (1), /* cost of movzx */
624 8, /* "large" insn */
626 2, /* cost for loading QImode using movzbl */
627 {4, 4, 4}, /* cost of loading integer registers
628 in QImode, HImode and SImode.
629 Relative to reg-reg move (2). */
630 {2, 2, 2}, /* cost of storing integer registers */
631 2, /* cost of reg,reg fld/fst */
632 {2, 2, 6}, /* cost of loading fp registers
633 in SFmode, DFmode and XFmode */
634 {4, 4, 6}, /* cost of storing fp registers
635 in SFmode, DFmode and XFmode */
636 2, /* cost of moving MMX register */
637 {2, 2}, /* cost of loading MMX registers
638 in SImode and DImode */
639 {2, 2}, /* cost of storing MMX registers
640 in SImode and DImode */
641 2, /* cost of moving SSE register */
642 {2, 2, 8}, /* cost of loading SSE registers
643 in SImode, DImode and TImode */
644 {2, 2, 8}, /* cost of storing SSE registers
645 in SImode, DImode and TImode */
646 3, /* MMX or SSE register to integer */
647 8, /* size of l1 cache. */
648 256, /* size of l2 cache */
649 32, /* size of prefetch block */
650 6, /* number of parallel prefetches */
652 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
653 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
654 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
655 COSTS_N_INSNS (2), /* cost of FABS instruction. */
656 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
657 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
658 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes
659 (we ensure the alignment). For small blocks inline loop is still a
660 noticeable win, for bigger blocks either rep movsl or rep movsb is
661 way to go. Rep movsb has apparently more expensive startup time in CPU,
662 but after 4K the difference is down in the noise. */
663 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
664 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
665 DUMMY_STRINGOP_ALGS},
666 {{rep_prefix_4_byte, {{1024, unrolled_loop},
667 {8192, rep_prefix_4_byte}, {-1, libcall}}},
668 DUMMY_STRINGOP_ALGS},
669 1, /* scalar_stmt_cost. */
670 1, /* scalar load_cost. */
671 1, /* scalar_store_cost. */
672 1, /* vec_stmt_cost. */
673 1, /* vec_to_scalar_cost. */
674 1, /* scalar_to_vec_cost. */
675 1, /* vec_align_load_cost. */
676 2, /* vec_unalign_load_cost. */
677 1, /* vec_store_cost. */
678 3, /* cond_taken_branch_cost. */
679 1, /* cond_not_taken_branch_cost. */
683 struct processor_costs geode_cost = {
684 COSTS_N_INSNS (1), /* cost of an add instruction */
685 COSTS_N_INSNS (1), /* cost of a lea instruction */
686 COSTS_N_INSNS (2), /* variable shift costs */
687 COSTS_N_INSNS (1), /* constant shift costs */
688 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
689 COSTS_N_INSNS (4), /* HI */
690 COSTS_N_INSNS (7), /* SI */
691 COSTS_N_INSNS (7), /* DI */
692 COSTS_N_INSNS (7)}, /* other */
693 0, /* cost of multiply per each bit set */
694 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
695 COSTS_N_INSNS (23), /* HI */
696 COSTS_N_INSNS (39), /* SI */
697 COSTS_N_INSNS (39), /* DI */
698 COSTS_N_INSNS (39)}, /* other */
699 COSTS_N_INSNS (1), /* cost of movsx */
700 COSTS_N_INSNS (1), /* cost of movzx */
701 8, /* "large" insn */
703 1, /* cost for loading QImode using movzbl */
704 {1, 1, 1}, /* cost of loading integer registers
705 in QImode, HImode and SImode.
706 Relative to reg-reg move (2). */
707 {1, 1, 1}, /* cost of storing integer registers */
708 1, /* cost of reg,reg fld/fst */
709 {1, 1, 1}, /* cost of loading fp registers
710 in SFmode, DFmode and XFmode */
711 {4, 6, 6}, /* cost of storing fp registers
712 in SFmode, DFmode and XFmode */
714 1, /* cost of moving MMX register */
715 {1, 1}, /* cost of loading MMX registers
716 in SImode and DImode */
717 {1, 1}, /* cost of storing MMX registers
718 in SImode and DImode */
719 1, /* cost of moving SSE register */
720 {1, 1, 1}, /* cost of loading SSE registers
721 in SImode, DImode and TImode */
722 {1, 1, 1}, /* cost of storing SSE registers
723 in SImode, DImode and TImode */
724 1, /* MMX or SSE register to integer */
725 64, /* size of l1 cache. */
726 128, /* size of l2 cache. */
727 32, /* size of prefetch block */
728 1, /* number of parallel prefetches */
730 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
731 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
732 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
733 COSTS_N_INSNS (1), /* cost of FABS instruction. */
734 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
735 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
736 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
737 DUMMY_STRINGOP_ALGS},
738 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
739 DUMMY_STRINGOP_ALGS},
740 1, /* scalar_stmt_cost. */
741 1, /* scalar load_cost. */
742 1, /* scalar_store_cost. */
743 1, /* vec_stmt_cost. */
744 1, /* vec_to_scalar_cost. */
745 1, /* scalar_to_vec_cost. */
746 1, /* vec_align_load_cost. */
747 2, /* vec_unalign_load_cost. */
748 1, /* vec_store_cost. */
749 3, /* cond_taken_branch_cost. */
750 1, /* cond_not_taken_branch_cost. */
754 struct processor_costs k6_cost = {
755 COSTS_N_INSNS (1), /* cost of an add instruction */
756 COSTS_N_INSNS (2), /* cost of a lea instruction */
757 COSTS_N_INSNS (1), /* variable shift costs */
758 COSTS_N_INSNS (1), /* constant shift costs */
759 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
760 COSTS_N_INSNS (3), /* HI */
761 COSTS_N_INSNS (3), /* SI */
762 COSTS_N_INSNS (3), /* DI */
763 COSTS_N_INSNS (3)}, /* other */
764 0, /* cost of multiply per each bit set */
765 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
766 COSTS_N_INSNS (18), /* HI */
767 COSTS_N_INSNS (18), /* SI */
768 COSTS_N_INSNS (18), /* DI */
769 COSTS_N_INSNS (18)}, /* other */
770 COSTS_N_INSNS (2), /* cost of movsx */
771 COSTS_N_INSNS (2), /* cost of movzx */
772 8, /* "large" insn */
774 3, /* cost for loading QImode using movzbl */
775 {4, 5, 4}, /* cost of loading integer registers
776 in QImode, HImode and SImode.
777 Relative to reg-reg move (2). */
778 {2, 3, 2}, /* cost of storing integer registers */
779 4, /* cost of reg,reg fld/fst */
780 {6, 6, 6}, /* cost of loading fp registers
781 in SFmode, DFmode and XFmode */
782 {4, 4, 4}, /* cost of storing fp registers
783 in SFmode, DFmode and XFmode */
784 2, /* cost of moving MMX register */
785 {2, 2}, /* cost of loading MMX registers
786 in SImode and DImode */
787 {2, 2}, /* cost of storing MMX registers
788 in SImode and DImode */
789 2, /* cost of moving SSE register */
790 {2, 2, 8}, /* cost of loading SSE registers
791 in SImode, DImode and TImode */
792 {2, 2, 8}, /* cost of storing SSE registers
793 in SImode, DImode and TImode */
794 6, /* MMX or SSE register to integer */
795 32, /* size of l1 cache. */
796 32, /* size of l2 cache. Some models
797 have integrated l2 cache, but
798 optimizing for k6 is not important
799 enough to worry about that. */
800 32, /* size of prefetch block */
801 1, /* number of parallel prefetches */
803 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
804 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
805 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
806 COSTS_N_INSNS (2), /* cost of FABS instruction. */
807 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
808 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
809 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
810 DUMMY_STRINGOP_ALGS},
811 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
812 DUMMY_STRINGOP_ALGS},
813 1, /* scalar_stmt_cost. */
814 1, /* scalar load_cost. */
815 1, /* scalar_store_cost. */
816 1, /* vec_stmt_cost. */
817 1, /* vec_to_scalar_cost. */
818 1, /* scalar_to_vec_cost. */
819 1, /* vec_align_load_cost. */
820 2, /* vec_unalign_load_cost. */
821 1, /* vec_store_cost. */
822 3, /* cond_taken_branch_cost. */
823 1, /* cond_not_taken_branch_cost. */
827 struct processor_costs athlon_cost = {
828 COSTS_N_INSNS (1), /* cost of an add instruction */
829 COSTS_N_INSNS (2), /* cost of a lea instruction */
830 COSTS_N_INSNS (1), /* variable shift costs */
831 COSTS_N_INSNS (1), /* constant shift costs */
832 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
833 COSTS_N_INSNS (5), /* HI */
834 COSTS_N_INSNS (5), /* SI */
835 COSTS_N_INSNS (5), /* DI */
836 COSTS_N_INSNS (5)}, /* other */
837 0, /* cost of multiply per each bit set */
838 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
839 COSTS_N_INSNS (26), /* HI */
840 COSTS_N_INSNS (42), /* SI */
841 COSTS_N_INSNS (74), /* DI */
842 COSTS_N_INSNS (74)}, /* other */
843 COSTS_N_INSNS (1), /* cost of movsx */
844 COSTS_N_INSNS (1), /* cost of movzx */
845 8, /* "large" insn */
847 4, /* cost for loading QImode using movzbl */
848 {3, 4, 3}, /* cost of loading integer registers
849 in QImode, HImode and SImode.
850 Relative to reg-reg move (2). */
851 {3, 4, 3}, /* cost of storing integer registers */
852 4, /* cost of reg,reg fld/fst */
853 {4, 4, 12}, /* cost of loading fp registers
854 in SFmode, DFmode and XFmode */
855 {6, 6, 8}, /* cost of storing fp registers
856 in SFmode, DFmode and XFmode */
857 2, /* cost of moving MMX register */
858 {4, 4}, /* cost of loading MMX registers
859 in SImode and DImode */
860 {4, 4}, /* cost of storing MMX registers
861 in SImode and DImode */
862 2, /* cost of moving SSE register */
863 {4, 4, 6}, /* cost of loading SSE registers
864 in SImode, DImode and TImode */
865 {4, 4, 5}, /* cost of storing SSE registers
866 in SImode, DImode and TImode */
867 5, /* MMX or SSE register to integer */
868 64, /* size of l1 cache. */
869 256, /* size of l2 cache. */
870 64, /* size of prefetch block */
871 6, /* number of parallel prefetches */
873 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
874 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
875 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
876 COSTS_N_INSNS (2), /* cost of FABS instruction. */
877 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
878 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
879 /* For some reason, Athlon deals better with REP prefix (relative to loops)
880 compared to K8. Alignment becomes important after 8 bytes for memcpy and
881 128 bytes for memset. */
882 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
883 DUMMY_STRINGOP_ALGS},
884 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
885 DUMMY_STRINGOP_ALGS},
886 1, /* scalar_stmt_cost. */
887 1, /* scalar load_cost. */
888 1, /* scalar_store_cost. */
889 1, /* vec_stmt_cost. */
890 1, /* vec_to_scalar_cost. */
891 1, /* scalar_to_vec_cost. */
892 1, /* vec_align_load_cost. */
893 2, /* vec_unalign_load_cost. */
894 1, /* vec_store_cost. */
895 3, /* cond_taken_branch_cost. */
896 1, /* cond_not_taken_branch_cost. */
900 struct processor_costs k8_cost = {
901 COSTS_N_INSNS (1), /* cost of an add instruction */
902 COSTS_N_INSNS (2), /* cost of a lea instruction */
903 COSTS_N_INSNS (1), /* variable shift costs */
904 COSTS_N_INSNS (1), /* constant shift costs */
905 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
906 COSTS_N_INSNS (4), /* HI */
907 COSTS_N_INSNS (3), /* SI */
908 COSTS_N_INSNS (4), /* DI */
909 COSTS_N_INSNS (5)}, /* other */
910 0, /* cost of multiply per each bit set */
911 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
912 COSTS_N_INSNS (26), /* HI */
913 COSTS_N_INSNS (42), /* SI */
914 COSTS_N_INSNS (74), /* DI */
915 COSTS_N_INSNS (74)}, /* other */
916 COSTS_N_INSNS (1), /* cost of movsx */
917 COSTS_N_INSNS (1), /* cost of movzx */
918 8, /* "large" insn */
920 4, /* cost for loading QImode using movzbl */
921 {3, 4, 3}, /* cost of loading integer registers
922 in QImode, HImode and SImode.
923 Relative to reg-reg move (2). */
924 {3, 4, 3}, /* cost of storing integer registers */
925 4, /* cost of reg,reg fld/fst */
926 {4, 4, 12}, /* cost of loading fp registers
927 in SFmode, DFmode and XFmode */
928 {6, 6, 8}, /* cost of storing fp registers
929 in SFmode, DFmode and XFmode */
930 2, /* cost of moving MMX register */
931 {3, 3}, /* cost of loading MMX registers
932 in SImode and DImode */
933 {4, 4}, /* cost of storing MMX registers
934 in SImode and DImode */
935 2, /* cost of moving SSE register */
936 {4, 3, 6}, /* cost of loading SSE registers
937 in SImode, DImode and TImode */
938 {4, 4, 5}, /* cost of storing SSE registers
939 in SImode, DImode and TImode */
940 5, /* MMX or SSE register to integer */
941 64, /* size of l1 cache. */
942 512, /* size of l2 cache. */
943 64, /* size of prefetch block */
944 /* New AMD processors never drop prefetches; if they cannot be performed
945 immediately, they are queued. We set number of simultaneous prefetches
946 to a large constant to reflect this (it probably is not a good idea not
947 to limit number of prefetches at all, as their execution also takes some
949 100, /* number of parallel prefetches */
951 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
952 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
953 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
954 COSTS_N_INSNS (2), /* cost of FABS instruction. */
955 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
956 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
957 /* K8 has optimized REP instruction for medium sized blocks, but for very
958 small blocks it is better to use loop. For large blocks, libcall can
959 do nontemporary accesses and beat inline considerably. */
960 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
961 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
962 {{libcall, {{8, loop}, {24, unrolled_loop},
963 {2048, rep_prefix_4_byte}, {-1, libcall}}},
964 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
965 4, /* scalar_stmt_cost. */
966 2, /* scalar load_cost. */
967 2, /* scalar_store_cost. */
968 5, /* vec_stmt_cost. */
969 0, /* vec_to_scalar_cost. */
970 2, /* scalar_to_vec_cost. */
971 2, /* vec_align_load_cost. */
972 3, /* vec_unalign_load_cost. */
973 3, /* vec_store_cost. */
974 3, /* cond_taken_branch_cost. */
975 2, /* cond_not_taken_branch_cost. */
978 struct processor_costs amdfam10_cost = {
979 COSTS_N_INSNS (1), /* cost of an add instruction */
980 COSTS_N_INSNS (2), /* cost of a lea instruction */
981 COSTS_N_INSNS (1), /* variable shift costs */
982 COSTS_N_INSNS (1), /* constant shift costs */
983 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
984 COSTS_N_INSNS (4), /* HI */
985 COSTS_N_INSNS (3), /* SI */
986 COSTS_N_INSNS (4), /* DI */
987 COSTS_N_INSNS (5)}, /* other */
988 0, /* cost of multiply per each bit set */
989 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
990 COSTS_N_INSNS (35), /* HI */
991 COSTS_N_INSNS (51), /* SI */
992 COSTS_N_INSNS (83), /* DI */
993 COSTS_N_INSNS (83)}, /* other */
994 COSTS_N_INSNS (1), /* cost of movsx */
995 COSTS_N_INSNS (1), /* cost of movzx */
996 8, /* "large" insn */
998 4, /* cost for loading QImode using movzbl */
999 {3, 4, 3}, /* cost of loading integer registers
1000 in QImode, HImode and SImode.
1001 Relative to reg-reg move (2). */
1002 {3, 4, 3}, /* cost of storing integer registers */
1003 4, /* cost of reg,reg fld/fst */
1004 {4, 4, 12}, /* cost of loading fp registers
1005 in SFmode, DFmode and XFmode */
1006 {6, 6, 8}, /* cost of storing fp registers
1007 in SFmode, DFmode and XFmode */
1008 2, /* cost of moving MMX register */
1009 {3, 3}, /* cost of loading MMX registers
1010 in SImode and DImode */
1011 {4, 4}, /* cost of storing MMX registers
1012 in SImode and DImode */
1013 2, /* cost of moving SSE register */
1014 {4, 4, 3}, /* cost of loading SSE registers
1015 in SImode, DImode and TImode */
1016 {4, 4, 5}, /* cost of storing SSE registers
1017 in SImode, DImode and TImode */
1018 3, /* MMX or SSE register to integer */
1020 MOVD reg64, xmmreg Double FSTORE 4
1021 MOVD reg32, xmmreg Double FSTORE 4
1023 MOVD reg64, xmmreg Double FADD 3
1025 MOVD reg32, xmmreg Double FADD 3
1027 64, /* size of l1 cache. */
1028 512, /* size of l2 cache. */
1029 64, /* size of prefetch block */
1030 /* New AMD processors never drop prefetches; if they cannot be performed
1031 immediately, they are queued. We set number of simultaneous prefetches
1032 to a large constant to reflect this (it probably is not a good idea not
1033 to limit number of prefetches at all, as their execution also takes some
1035 100, /* number of parallel prefetches */
1036 2, /* Branch cost */
1037 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1038 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1039 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1040 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1041 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1042 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1044 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
1045 very small blocks it is better to use loop. For large blocks, libcall can
1046 do nontemporary accesses and beat inline considerably. */
1047 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1048 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1049 {{libcall, {{8, loop}, {24, unrolled_loop},
1050 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1051 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1052 4, /* scalar_stmt_cost. */
1053 2, /* scalar load_cost. */
1054 2, /* scalar_store_cost. */
1055 6, /* vec_stmt_cost. */
1056 0, /* vec_to_scalar_cost. */
1057 2, /* scalar_to_vec_cost. */
1058 2, /* vec_align_load_cost. */
1059 2, /* vec_unalign_load_cost. */
1060 2, /* vec_store_cost. */
1061 2, /* cond_taken_branch_cost. */
1062 1, /* cond_not_taken_branch_cost. */
1065 struct processor_costs bdver1_cost = {
1066 COSTS_N_INSNS (1), /* cost of an add instruction */
1067 COSTS_N_INSNS (1), /* cost of a lea instruction */
1068 COSTS_N_INSNS (1), /* variable shift costs */
1069 COSTS_N_INSNS (1), /* constant shift costs */
1070 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
1071 COSTS_N_INSNS (4), /* HI */
1072 COSTS_N_INSNS (4), /* SI */
1073 COSTS_N_INSNS (6), /* DI */
1074 COSTS_N_INSNS (6)}, /* other */
1075 0, /* cost of multiply per each bit set */
1076 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1077 COSTS_N_INSNS (35), /* HI */
1078 COSTS_N_INSNS (51), /* SI */
1079 COSTS_N_INSNS (83), /* DI */
1080 COSTS_N_INSNS (83)}, /* other */
1081 COSTS_N_INSNS (1), /* cost of movsx */
1082 COSTS_N_INSNS (1), /* cost of movzx */
1083 8, /* "large" insn */
1085 4, /* cost for loading QImode using movzbl */
1086 {5, 5, 4}, /* cost of loading integer registers
1087 in QImode, HImode and SImode.
1088 Relative to reg-reg move (2). */
1089 {4, 4, 4}, /* cost of storing integer registers */
1090 2, /* cost of reg,reg fld/fst */
1091 {5, 5, 12}, /* cost of loading fp registers
1092 in SFmode, DFmode and XFmode */
1093 {4, 4, 8}, /* cost of storing fp registers
1094 in SFmode, DFmode and XFmode */
1095 2, /* cost of moving MMX register */
1096 {4, 4}, /* cost of loading MMX registers
1097 in SImode and DImode */
1098 {4, 4}, /* cost of storing MMX registers
1099 in SImode and DImode */
1100 2, /* cost of moving SSE register */
1101 {4, 4, 4}, /* cost of loading SSE registers
1102 in SImode, DImode and TImode */
1103 {4, 4, 4}, /* cost of storing SSE registers
1104 in SImode, DImode and TImode */
1105 2, /* MMX or SSE register to integer */
1107 MOVD reg64, xmmreg Double FSTORE 4
1108 MOVD reg32, xmmreg Double FSTORE 4
1110 MOVD reg64, xmmreg Double FADD 3
1112 MOVD reg32, xmmreg Double FADD 3
1114 16, /* size of l1 cache. */
1115 2048, /* size of l2 cache. */
1116 64, /* size of prefetch block */
1117 /* New AMD processors never drop prefetches; if they cannot be performed
1118 immediately, they are queued. We set number of simultaneous prefetches
1119 to a large constant to reflect this (it probably is not a good idea not
1120 to limit number of prefetches at all, as their execution also takes some
1122 100, /* number of parallel prefetches */
1123 2, /* Branch cost */
1124 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1125 COSTS_N_INSNS (6), /* cost of FMUL instruction. */
1126 COSTS_N_INSNS (42), /* cost of FDIV instruction. */
1127 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1128 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1129 COSTS_N_INSNS (52), /* cost of FSQRT instruction. */
1131 /* BDVER1 has optimized REP instruction for medium sized blocks, but for
1132 very small blocks it is better to use loop. For large blocks, libcall
1133 can do nontemporary accesses and beat inline considerably. */
1134 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1135 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1136 {{libcall, {{8, loop}, {24, unrolled_loop},
1137 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1138 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1139 6, /* scalar_stmt_cost. */
1140 4, /* scalar load_cost. */
1141 4, /* scalar_store_cost. */
1142 6, /* vec_stmt_cost. */
1143 0, /* vec_to_scalar_cost. */
1144 2, /* scalar_to_vec_cost. */
1145 4, /* vec_align_load_cost. */
1146 4, /* vec_unalign_load_cost. */
1147 4, /* vec_store_cost. */
1148 2, /* cond_taken_branch_cost. */
1149 1, /* cond_not_taken_branch_cost. */
1153 struct processor_costs pentium4_cost = {
1154 COSTS_N_INSNS (1), /* cost of an add instruction */
1155 COSTS_N_INSNS (3), /* cost of a lea instruction */
1156 COSTS_N_INSNS (4), /* variable shift costs */
1157 COSTS_N_INSNS (4), /* constant shift costs */
1158 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
1159 COSTS_N_INSNS (15), /* HI */
1160 COSTS_N_INSNS (15), /* SI */
1161 COSTS_N_INSNS (15), /* DI */
1162 COSTS_N_INSNS (15)}, /* other */
1163 0, /* cost of multiply per each bit set */
1164 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
1165 COSTS_N_INSNS (56), /* HI */
1166 COSTS_N_INSNS (56), /* SI */
1167 COSTS_N_INSNS (56), /* DI */
1168 COSTS_N_INSNS (56)}, /* other */
1169 COSTS_N_INSNS (1), /* cost of movsx */
1170 COSTS_N_INSNS (1), /* cost of movzx */
1171 16, /* "large" insn */
1173 2, /* cost for loading QImode using movzbl */
1174 {4, 5, 4}, /* cost of loading integer registers
1175 in QImode, HImode and SImode.
1176 Relative to reg-reg move (2). */
1177 {2, 3, 2}, /* cost of storing integer registers */
1178 2, /* cost of reg,reg fld/fst */
1179 {2, 2, 6}, /* cost of loading fp registers
1180 in SFmode, DFmode and XFmode */
1181 {4, 4, 6}, /* cost of storing fp registers
1182 in SFmode, DFmode and XFmode */
1183 2, /* cost of moving MMX register */
1184 {2, 2}, /* cost of loading MMX registers
1185 in SImode and DImode */
1186 {2, 2}, /* cost of storing MMX registers
1187 in SImode and DImode */
1188 12, /* cost of moving SSE register */
1189 {12, 12, 12}, /* cost of loading SSE registers
1190 in SImode, DImode and TImode */
1191 {2, 2, 8}, /* cost of storing SSE registers
1192 in SImode, DImode and TImode */
1193 10, /* MMX or SSE register to integer */
1194 8, /* size of l1 cache. */
1195 256, /* size of l2 cache. */
1196 64, /* size of prefetch block */
1197 6, /* number of parallel prefetches */
1198 2, /* Branch cost */
1199 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
1200 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
1201 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
1202 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1203 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1204 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
1205 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1206 DUMMY_STRINGOP_ALGS},
1207 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1209 DUMMY_STRINGOP_ALGS},
1210 1, /* scalar_stmt_cost. */
1211 1, /* scalar load_cost. */
1212 1, /* scalar_store_cost. */
1213 1, /* vec_stmt_cost. */
1214 1, /* vec_to_scalar_cost. */
1215 1, /* scalar_to_vec_cost. */
1216 1, /* vec_align_load_cost. */
1217 2, /* vec_unalign_load_cost. */
1218 1, /* vec_store_cost. */
1219 3, /* cond_taken_branch_cost. */
1220 1, /* cond_not_taken_branch_cost. */
1224 struct processor_costs nocona_cost = {
1225 COSTS_N_INSNS (1), /* cost of an add instruction */
1226 COSTS_N_INSNS (1), /* cost of a lea instruction */
1227 COSTS_N_INSNS (1), /* variable shift costs */
1228 COSTS_N_INSNS (1), /* constant shift costs */
1229 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
1230 COSTS_N_INSNS (10), /* HI */
1231 COSTS_N_INSNS (10), /* SI */
1232 COSTS_N_INSNS (10), /* DI */
1233 COSTS_N_INSNS (10)}, /* other */
1234 0, /* cost of multiply per each bit set */
1235 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
1236 COSTS_N_INSNS (66), /* HI */
1237 COSTS_N_INSNS (66), /* SI */
1238 COSTS_N_INSNS (66), /* DI */
1239 COSTS_N_INSNS (66)}, /* other */
1240 COSTS_N_INSNS (1), /* cost of movsx */
1241 COSTS_N_INSNS (1), /* cost of movzx */
1242 16, /* "large" insn */
1243 17, /* MOVE_RATIO */
1244 4, /* cost for loading QImode using movzbl */
1245 {4, 4, 4}, /* cost of loading integer registers
1246 in QImode, HImode and SImode.
1247 Relative to reg-reg move (2). */
1248 {4, 4, 4}, /* cost of storing integer registers */
1249 3, /* cost of reg,reg fld/fst */
1250 {12, 12, 12}, /* cost of loading fp registers
1251 in SFmode, DFmode and XFmode */
1252 {4, 4, 4}, /* cost of storing fp registers
1253 in SFmode, DFmode and XFmode */
1254 6, /* cost of moving MMX register */
1255 {12, 12}, /* cost of loading MMX registers
1256 in SImode and DImode */
1257 {12, 12}, /* cost of storing MMX registers
1258 in SImode and DImode */
1259 6, /* cost of moving SSE register */
1260 {12, 12, 12}, /* cost of loading SSE registers
1261 in SImode, DImode and TImode */
1262 {12, 12, 12}, /* cost of storing SSE registers
1263 in SImode, DImode and TImode */
1264 8, /* MMX or SSE register to integer */
1265 8, /* size of l1 cache. */
1266 1024, /* size of l2 cache. */
1267 128, /* size of prefetch block */
1268 8, /* number of parallel prefetches */
1269 1, /* Branch cost */
1270 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1271 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1272 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
1273 COSTS_N_INSNS (3), /* cost of FABS instruction. */
1274 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
1275 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
1276 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1277 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
1278 {100000, unrolled_loop}, {-1, libcall}}}},
1279 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1281 {libcall, {{24, loop}, {64, unrolled_loop},
1282 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1283 1, /* scalar_stmt_cost. */
1284 1, /* scalar load_cost. */
1285 1, /* scalar_store_cost. */
1286 1, /* vec_stmt_cost. */
1287 1, /* vec_to_scalar_cost. */
1288 1, /* scalar_to_vec_cost. */
1289 1, /* vec_align_load_cost. */
1290 2, /* vec_unalign_load_cost. */
1291 1, /* vec_store_cost. */
1292 3, /* cond_taken_branch_cost. */
1293 1, /* cond_not_taken_branch_cost. */
1297 struct processor_costs core2_cost = {
1298 COSTS_N_INSNS (1), /* cost of an add instruction */
1299 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1300 COSTS_N_INSNS (1), /* variable shift costs */
1301 COSTS_N_INSNS (1), /* constant shift costs */
1302 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1303 COSTS_N_INSNS (3), /* HI */
1304 COSTS_N_INSNS (3), /* SI */
1305 COSTS_N_INSNS (3), /* DI */
1306 COSTS_N_INSNS (3)}, /* other */
1307 0, /* cost of multiply per each bit set */
1308 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
1309 COSTS_N_INSNS (22), /* HI */
1310 COSTS_N_INSNS (22), /* SI */
1311 COSTS_N_INSNS (22), /* DI */
1312 COSTS_N_INSNS (22)}, /* other */
1313 COSTS_N_INSNS (1), /* cost of movsx */
1314 COSTS_N_INSNS (1), /* cost of movzx */
1315 8, /* "large" insn */
1316 16, /* MOVE_RATIO */
1317 2, /* cost for loading QImode using movzbl */
1318 {6, 6, 6}, /* cost of loading integer registers
1319 in QImode, HImode and SImode.
1320 Relative to reg-reg move (2). */
1321 {4, 4, 4}, /* cost of storing integer registers */
1322 2, /* cost of reg,reg fld/fst */
1323 {6, 6, 6}, /* cost of loading fp registers
1324 in SFmode, DFmode and XFmode */
1325 {4, 4, 4}, /* cost of storing fp registers
1326 in SFmode, DFmode and XFmode */
1327 2, /* cost of moving MMX register */
1328 {6, 6}, /* cost of loading MMX registers
1329 in SImode and DImode */
1330 {4, 4}, /* cost of storing MMX registers
1331 in SImode and DImode */
1332 2, /* cost of moving SSE register */
1333 {6, 6, 6}, /* cost of loading SSE registers
1334 in SImode, DImode and TImode */
1335 {4, 4, 4}, /* cost of storing SSE registers
1336 in SImode, DImode and TImode */
1337 2, /* MMX or SSE register to integer */
1338 32, /* size of l1 cache. */
1339 2048, /* size of l2 cache. */
1340 128, /* size of prefetch block */
1341 8, /* number of parallel prefetches */
1342 3, /* Branch cost */
1343 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1344 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1345 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1346 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1347 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1348 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1349 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1350 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1351 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1352 {{libcall, {{8, loop}, {15, unrolled_loop},
1353 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1354 {libcall, {{24, loop}, {32, unrolled_loop},
1355 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1356 1, /* scalar_stmt_cost. */
1357 1, /* scalar load_cost. */
1358 1, /* scalar_store_cost. */
1359 1, /* vec_stmt_cost. */
1360 1, /* vec_to_scalar_cost. */
1361 1, /* scalar_to_vec_cost. */
1362 1, /* vec_align_load_cost. */
1363 2, /* vec_unalign_load_cost. */
1364 1, /* vec_store_cost. */
1365 3, /* cond_taken_branch_cost. */
1366 1, /* cond_not_taken_branch_cost. */
1370 struct processor_costs atom_cost = {
1371 COSTS_N_INSNS (1), /* cost of an add instruction */
1372 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1373 COSTS_N_INSNS (1), /* variable shift costs */
1374 COSTS_N_INSNS (1), /* constant shift costs */
1375 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1376 COSTS_N_INSNS (4), /* HI */
1377 COSTS_N_INSNS (3), /* SI */
1378 COSTS_N_INSNS (4), /* DI */
1379 COSTS_N_INSNS (2)}, /* other */
1380 0, /* cost of multiply per each bit set */
1381 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1382 COSTS_N_INSNS (26), /* HI */
1383 COSTS_N_INSNS (42), /* SI */
1384 COSTS_N_INSNS (74), /* DI */
1385 COSTS_N_INSNS (74)}, /* other */
1386 COSTS_N_INSNS (1), /* cost of movsx */
1387 COSTS_N_INSNS (1), /* cost of movzx */
1388 8, /* "large" insn */
1389 17, /* MOVE_RATIO */
1390 2, /* cost for loading QImode using movzbl */
1391 {4, 4, 4}, /* cost of loading integer registers
1392 in QImode, HImode and SImode.
1393 Relative to reg-reg move (2). */
1394 {4, 4, 4}, /* cost of storing integer registers */
1395 4, /* cost of reg,reg fld/fst */
1396 {12, 12, 12}, /* cost of loading fp registers
1397 in SFmode, DFmode and XFmode */
1398 {6, 6, 8}, /* cost of storing fp registers
1399 in SFmode, DFmode and XFmode */
1400 2, /* cost of moving MMX register */
1401 {8, 8}, /* cost of loading MMX registers
1402 in SImode and DImode */
1403 {8, 8}, /* cost of storing MMX registers
1404 in SImode and DImode */
1405 2, /* cost of moving SSE register */
1406 {8, 8, 8}, /* cost of loading SSE registers
1407 in SImode, DImode and TImode */
1408 {8, 8, 8}, /* cost of storing SSE registers
1409 in SImode, DImode and TImode */
1410 5, /* MMX or SSE register to integer */
1411 32, /* size of l1 cache. */
1412 256, /* size of l2 cache. */
1413 64, /* size of prefetch block */
1414 6, /* number of parallel prefetches */
1415 3, /* Branch cost */
1416 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1417 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1418 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1419 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1420 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1421 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1422 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1423 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1424 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1425 {{libcall, {{8, loop}, {15, unrolled_loop},
1426 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1427 {libcall, {{24, loop}, {32, unrolled_loop},
1428 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1429 1, /* scalar_stmt_cost. */
1430 1, /* scalar load_cost. */
1431 1, /* scalar_store_cost. */
1432 1, /* vec_stmt_cost. */
1433 1, /* vec_to_scalar_cost. */
1434 1, /* scalar_to_vec_cost. */
1435 1, /* vec_align_load_cost. */
1436 2, /* vec_unalign_load_cost. */
1437 1, /* vec_store_cost. */
1438 3, /* cond_taken_branch_cost. */
1439 1, /* cond_not_taken_branch_cost. */
1442 /* Generic64 should produce code tuned for Nocona and K8. */
1444 struct processor_costs generic64_cost = {
1445 COSTS_N_INSNS (1), /* cost of an add instruction */
1446 /* On all chips taken into consideration lea is 2 cycles and more. With
1447 this cost however our current implementation of synth_mult results in
1448 use of unnecessary temporary registers causing regression on several
1449 SPECfp benchmarks. */
1450 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1451 COSTS_N_INSNS (1), /* variable shift costs */
1452 COSTS_N_INSNS (1), /* constant shift costs */
1453 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1454 COSTS_N_INSNS (4), /* HI */
1455 COSTS_N_INSNS (3), /* SI */
1456 COSTS_N_INSNS (4), /* DI */
1457 COSTS_N_INSNS (2)}, /* other */
1458 0, /* cost of multiply per each bit set */
1459 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1460 COSTS_N_INSNS (26), /* HI */
1461 COSTS_N_INSNS (42), /* SI */
1462 COSTS_N_INSNS (74), /* DI */
1463 COSTS_N_INSNS (74)}, /* other */
1464 COSTS_N_INSNS (1), /* cost of movsx */
1465 COSTS_N_INSNS (1), /* cost of movzx */
1466 8, /* "large" insn */
1467 17, /* MOVE_RATIO */
1468 4, /* cost for loading QImode using movzbl */
1469 {4, 4, 4}, /* cost of loading integer registers
1470 in QImode, HImode and SImode.
1471 Relative to reg-reg move (2). */
1472 {4, 4, 4}, /* cost of storing integer registers */
1473 4, /* cost of reg,reg fld/fst */
1474 {12, 12, 12}, /* cost of loading fp registers
1475 in SFmode, DFmode and XFmode */
1476 {6, 6, 8}, /* cost of storing fp registers
1477 in SFmode, DFmode and XFmode */
1478 2, /* cost of moving MMX register */
1479 {8, 8}, /* cost of loading MMX registers
1480 in SImode and DImode */
1481 {8, 8}, /* cost of storing MMX registers
1482 in SImode and DImode */
1483 2, /* cost of moving SSE register */
1484 {8, 8, 8}, /* cost of loading SSE registers
1485 in SImode, DImode and TImode */
1486 {8, 8, 8}, /* cost of storing SSE registers
1487 in SImode, DImode and TImode */
1488 5, /* MMX or SSE register to integer */
1489 32, /* size of l1 cache. */
1490 512, /* size of l2 cache. */
1491 64, /* size of prefetch block */
1492 6, /* number of parallel prefetches */
1493 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this
1494 value is increased to perhaps more appropriate value of 5. */
1495 3, /* Branch cost */
1496 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1497 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1498 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1499 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1500 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1501 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1502 {DUMMY_STRINGOP_ALGS,
1503 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1504 {DUMMY_STRINGOP_ALGS,
1505 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1506 1, /* scalar_stmt_cost. */
1507 1, /* scalar load_cost. */
1508 1, /* scalar_store_cost. */
1509 1, /* vec_stmt_cost. */
1510 1, /* vec_to_scalar_cost. */
1511 1, /* scalar_to_vec_cost. */
1512 1, /* vec_align_load_cost. */
1513 2, /* vec_unalign_load_cost. */
1514 1, /* vec_store_cost. */
1515 3, /* cond_taken_branch_cost. */
1516 1, /* cond_not_taken_branch_cost. */
1519 /* Generic32 should produce code tuned for PPro, Pentium4, Nocona,
1522 struct processor_costs generic32_cost = {
1523 COSTS_N_INSNS (1), /* cost of an add instruction */
1524 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1525 COSTS_N_INSNS (1), /* variable shift costs */
1526 COSTS_N_INSNS (1), /* constant shift costs */
1527 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1528 COSTS_N_INSNS (4), /* HI */
1529 COSTS_N_INSNS (3), /* SI */
1530 COSTS_N_INSNS (4), /* DI */
1531 COSTS_N_INSNS (2)}, /* other */
1532 0, /* cost of multiply per each bit set */
1533 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1534 COSTS_N_INSNS (26), /* HI */
1535 COSTS_N_INSNS (42), /* SI */
1536 COSTS_N_INSNS (74), /* DI */
1537 COSTS_N_INSNS (74)}, /* other */
1538 COSTS_N_INSNS (1), /* cost of movsx */
1539 COSTS_N_INSNS (1), /* cost of movzx */
1540 8, /* "large" insn */
1541 17, /* MOVE_RATIO */
1542 4, /* cost for loading QImode using movzbl */
1543 {4, 4, 4}, /* cost of loading integer registers
1544 in QImode, HImode and SImode.
1545 Relative to reg-reg move (2). */
1546 {4, 4, 4}, /* cost of storing integer registers */
1547 4, /* cost of reg,reg fld/fst */
1548 {12, 12, 12}, /* cost of loading fp registers
1549 in SFmode, DFmode and XFmode */
1550 {6, 6, 8}, /* cost of storing fp registers
1551 in SFmode, DFmode and XFmode */
1552 2, /* cost of moving MMX register */
1553 {8, 8}, /* cost of loading MMX registers
1554 in SImode and DImode */
1555 {8, 8}, /* cost of storing MMX registers
1556 in SImode and DImode */
1557 2, /* cost of moving SSE register */
1558 {8, 8, 8}, /* cost of loading SSE registers
1559 in SImode, DImode and TImode */
1560 {8, 8, 8}, /* cost of storing SSE registers
1561 in SImode, DImode and TImode */
1562 5, /* MMX or SSE register to integer */
1563 32, /* size of l1 cache. */
1564 256, /* size of l2 cache. */
1565 64, /* size of prefetch block */
1566 6, /* number of parallel prefetches */
1567 3, /* Branch cost */
1568 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1569 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1570 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1571 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1572 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1573 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1574 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1575 DUMMY_STRINGOP_ALGS},
1576 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1577 DUMMY_STRINGOP_ALGS},
1578 1, /* scalar_stmt_cost. */
1579 1, /* scalar load_cost. */
1580 1, /* scalar_store_cost. */
1581 1, /* vec_stmt_cost. */
1582 1, /* vec_to_scalar_cost. */
1583 1, /* scalar_to_vec_cost. */
1584 1, /* vec_align_load_cost. */
1585 2, /* vec_unalign_load_cost. */
1586 1, /* vec_store_cost. */
1587 3, /* cond_taken_branch_cost. */
1588 1, /* cond_not_taken_branch_cost. */
1591 const struct processor_costs *ix86_cost = &pentium_cost;
1593 /* Processor feature/optimization bitmasks. */
1594 #define m_386 (1<<PROCESSOR_I386)
1595 #define m_486 (1<<PROCESSOR_I486)
1596 #define m_PENT (1<<PROCESSOR_PENTIUM)
1597 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1598 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1599 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1600 #define m_CORE2 (1<<PROCESSOR_CORE2)
1601 #define m_COREI7_32 (1<<PROCESSOR_COREI7_32)
1602 #define m_COREI7_64 (1<<PROCESSOR_COREI7_64)
1603 #define m_ATOM (1<<PROCESSOR_ATOM)
1605 #define m_GEODE (1<<PROCESSOR_GEODE)
1606 #define m_K6 (1<<PROCESSOR_K6)
1607 #define m_K6_GEODE (m_K6 | m_GEODE)
1608 #define m_K8 (1<<PROCESSOR_K8)
1609 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1610 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1611 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1612 #define m_BDVER1 (1<<PROCESSOR_BDVER1)
1613 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10 | m_BDVER1)
1615 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32 | m_COREI7_32)
1616 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64 | m_COREI7_64)
1618 /* Generic instruction choice should be common subset of supported CPUs
1619 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1620 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1622 /* Feature tests against the various tunings. */
1623 unsigned char ix86_tune_features[X86_TUNE_LAST];
1625 /* Feature tests against the various tunings used to create ix86_tune_features
1626 based on the processor mask. */
1627 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1628 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1629 negatively, so enabling for Generic64 seems like good code size
1630 tradeoff. We can't enable it for 32bit generic because it does not
1631 work well with PPro base chips. */
1632 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1634 /* X86_TUNE_PUSH_MEMORY */
1635 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1636 | m_NOCONA | m_CORE2 | m_GENERIC,
1638 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1641 /* X86_TUNE_UNROLL_STRLEN */
1642 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1643 | m_CORE2 | m_GENERIC,
1645 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1646 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1648 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1649 on simulation result. But after P4 was made, no performance benefit
1650 was observed with branch hints. It also increases the code size.
1651 As a result, icc never generates branch hints. */
1654 /* X86_TUNE_DOUBLE_WITH_ADD */
1657 /* X86_TUNE_USE_SAHF */
1658 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER1 | m_PENT4
1659 | m_NOCONA | m_CORE2 | m_GENERIC,
1661 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1662 partial dependencies. */
1663 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1664 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1666 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1667 register stalls on Generic32 compilation setting as well. However
1668 in current implementation the partial register stalls are not eliminated
1669 very well - they can be introduced via subregs synthesized by combine
1670 and can happen in caller/callee saving sequences. Because this option
1671 pays back little on PPro based chips and is in conflict with partial reg
1672 dependencies used by Athlon/P4 based chips, it is better to leave it off
1673 for generic32 for now. */
1676 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1677 m_CORE2 | m_GENERIC,
1679 /* X86_TUNE_USE_HIMODE_FIOP */
1680 m_386 | m_486 | m_K6_GEODE,
1682 /* X86_TUNE_USE_SIMODE_FIOP */
1683 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
1685 /* X86_TUNE_USE_MOV0 */
1688 /* X86_TUNE_USE_CLTD */
1689 ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
1691 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1694 /* X86_TUNE_SPLIT_LONG_MOVES */
1697 /* X86_TUNE_READ_MODIFY_WRITE */
1700 /* X86_TUNE_READ_MODIFY */
1703 /* X86_TUNE_PROMOTE_QIMODE */
1704 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1705 | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
1707 /* X86_TUNE_FAST_PREFIX */
1708 ~(m_PENT | m_486 | m_386),
1710 /* X86_TUNE_SINGLE_STRINGOP */
1711 m_386 | m_PENT4 | m_NOCONA,
1713 /* X86_TUNE_QIMODE_MATH */
1716 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1717 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1718 might be considered for Generic32 if our scheme for avoiding partial
1719 stalls was more effective. */
1722 /* X86_TUNE_PROMOTE_QI_REGS */
1725 /* X86_TUNE_PROMOTE_HI_REGS */
1728 /* X86_TUNE_SINGLE_POP: Enable if single pop insn is preferred
1729 over esp addition. */
1730 m_386 | m_486 | m_PENT | m_PPRO,
1732 /* X86_TUNE_DOUBLE_POP: Enable if double pop insn is preferred
1733 over esp addition. */
1736 /* X86_TUNE_SINGLE_PUSH: Enable if single push insn is preferred
1737 over esp subtraction. */
1738 m_386 | m_486 | m_PENT | m_K6_GEODE,
1740 /* X86_TUNE_DOUBLE_PUSH. Enable if double push insn is preferred
1741 over esp subtraction. */
1742 m_PENT | m_K6_GEODE,
1744 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1745 for DFmode copies */
1746 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1747 | m_GENERIC | m_GEODE),
1749 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1750 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1752 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1753 conflict here in between PPro/Pentium4 based chips that thread 128bit
1754 SSE registers as single units versus K8 based chips that divide SSE
1755 registers to two 64bit halves. This knob promotes all store destinations
1756 to be 128bit to allow register renaming on 128bit SSE units, but usually
1757 results in one extra microop on 64bit SSE units. Experimental results
1758 shows that disabling this option on P4 brings over 20% SPECfp regression,
1759 while enabling it on K8 brings roughly 2.4% regression that can be partly
1760 masked by careful scheduling of moves. */
1761 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
1762 | m_AMDFAM10 | m_BDVER1,
1764 /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL */
1765 m_AMDFAM10 | m_BDVER1,
1767 /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL */
1770 /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL */
1773 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1774 are resolved on SSE register parts instead of whole registers, so we may
1775 maintain just lower part of scalar values in proper format leaving the
1776 upper part undefined. */
1779 /* X86_TUNE_SSE_TYPELESS_STORES */
1782 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1783 m_PPRO | m_PENT4 | m_NOCONA,
1785 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1786 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1788 /* X86_TUNE_PROLOGUE_USING_MOVE */
1789 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1791 /* X86_TUNE_EPILOGUE_USING_MOVE */
1792 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1794 /* X86_TUNE_SHIFT1 */
1797 /* X86_TUNE_USE_FFREEP */
1800 /* X86_TUNE_INTER_UNIT_MOVES */
1801 ~(m_AMD_MULTIPLE | m_GENERIC),
1803 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1804 ~(m_AMDFAM10 | m_BDVER1),
1806 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1807 than 4 branch instructions in the 16 byte window. */
1808 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
1811 /* X86_TUNE_SCHEDULE */
1812 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
1815 /* X86_TUNE_USE_BT */
1816 m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
1818 /* X86_TUNE_USE_INCDEC */
1819 ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
1821 /* X86_TUNE_PAD_RETURNS */
1822 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1824 /* X86_TUNE_PAD_SHORT_FUNCTION: Pad short funtion. */
1827 /* X86_TUNE_EXT_80387_CONSTANTS */
1828 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
1829 | m_CORE2 | m_GENERIC,
1831 /* X86_TUNE_SHORTEN_X87_SSE */
1834 /* X86_TUNE_AVOID_VECTOR_DECODE */
1837 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1838 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1841 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1842 vector path on AMD machines. */
1843 m_K8 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1,
1845 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1847 m_K8 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1,
1849 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1853 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1854 but one byte longer. */
1857 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1858 operand that cannot be represented using a modRM byte. The XOR
1859 replacement is long decoded, so this split helps here as well. */
1862 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1864 m_AMDFAM10 | m_GENERIC,
1866 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1867 from integer to FP. */
1870 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1871 with a subsequent conditional jump instruction into a single
1872 compare-and-branch uop. */
1875 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
1876 will impact LEA instruction selection. */
1879 /* X86_TUNE_VECTORIZE_DOUBLE: Enable double precision vector
1884 /* Feature tests against the various architecture variations. */
1885 unsigned char ix86_arch_features[X86_ARCH_LAST];
1887 /* Feature tests against the various architecture variations, used to create
1888 ix86_arch_features based on the processor mask. */
1889 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1890 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1891 ~(m_386 | m_486 | m_PENT | m_K6),
1893 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1896 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1899 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1902 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1906 static const unsigned int x86_accumulate_outgoing_args
1907 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1910 static const unsigned int x86_arch_always_fancy_math_387
1911 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1912 | m_NOCONA | m_CORE2 | m_GENERIC;
1914 static enum stringop_alg stringop_alg = no_stringop;
1916 /* In case the average insn count for single function invocation is
1917 lower than this constant, emit fast (but longer) prologue and
1919 #define FAST_PROLOGUE_INSN_COUNT 20
1921 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1922 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1923 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1924 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1926 /* Array of the smallest class containing reg number REGNO, indexed by
1927 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1929 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1931 /* ax, dx, cx, bx */
1932 AREG, DREG, CREG, BREG,
1933 /* si, di, bp, sp */
1934 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1936 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1937 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1940 /* flags, fpsr, fpcr, frame */
1941 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1943 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1946 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1949 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1950 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1951 /* SSE REX registers */
1952 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1956 /* The "default" register map used in 32bit mode. */
1958 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1960 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1961 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1962 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1963 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1964 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1965 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1966 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1969 /* The "default" register map used in 64bit mode. */
1971 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1973 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1974 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1975 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1976 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1977 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1978 8,9,10,11,12,13,14,15, /* extended integer registers */
1979 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1982 /* Define the register numbers to be used in Dwarf debugging information.
1983 The SVR4 reference port C compiler uses the following register numbers
1984 in its Dwarf output code:
1985 0 for %eax (gcc regno = 0)
1986 1 for %ecx (gcc regno = 2)
1987 2 for %edx (gcc regno = 1)
1988 3 for %ebx (gcc regno = 3)
1989 4 for %esp (gcc regno = 7)
1990 5 for %ebp (gcc regno = 6)
1991 6 for %esi (gcc regno = 4)
1992 7 for %edi (gcc regno = 5)
1993 The following three DWARF register numbers are never generated by
1994 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1995 believes these numbers have these meanings.
1996 8 for %eip (no gcc equivalent)
1997 9 for %eflags (gcc regno = 17)
1998 10 for %trapno (no gcc equivalent)
1999 It is not at all clear how we should number the FP stack registers
2000 for the x86 architecture. If the version of SDB on x86/svr4 were
2001 a bit less brain dead with respect to floating-point then we would
2002 have a precedent to follow with respect to DWARF register numbers
2003 for x86 FP registers, but the SDB on x86/svr4 is so completely
2004 broken with respect to FP registers that it is hardly worth thinking
2005 of it as something to strive for compatibility with.
2006 The version of x86/svr4 SDB I have at the moment does (partially)
2007 seem to believe that DWARF register number 11 is associated with
2008 the x86 register %st(0), but that's about all. Higher DWARF
2009 register numbers don't seem to be associated with anything in
2010 particular, and even for DWARF regno 11, SDB only seems to under-
2011 stand that it should say that a variable lives in %st(0) (when
2012 asked via an `=' command) if we said it was in DWARF regno 11,
2013 but SDB still prints garbage when asked for the value of the
2014 variable in question (via a `/' command).
2015 (Also note that the labels SDB prints for various FP stack regs
2016 when doing an `x' command are all wrong.)
2017 Note that these problems generally don't affect the native SVR4
2018 C compiler because it doesn't allow the use of -O with -g and
2019 because when it is *not* optimizing, it allocates a memory
2020 location for each floating-point variable, and the memory
2021 location is what gets described in the DWARF AT_location
2022 attribute for the variable in question.
2023 Regardless of the severe mental illness of the x86/svr4 SDB, we
2024 do something sensible here and we use the following DWARF
2025 register numbers. Note that these are all stack-top-relative
2027 11 for %st(0) (gcc regno = 8)
2028 12 for %st(1) (gcc regno = 9)
2029 13 for %st(2) (gcc regno = 10)
2030 14 for %st(3) (gcc regno = 11)
2031 15 for %st(4) (gcc regno = 12)
2032 16 for %st(5) (gcc regno = 13)
2033 17 for %st(6) (gcc regno = 14)
2034 18 for %st(7) (gcc regno = 15)
2036 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
2038 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
2039 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
2040 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2041 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
2042 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
2043 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
2044 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
2047 /* Define parameter passing and return registers. */
2049 static int const x86_64_int_parameter_registers[6] =
2051 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
2054 static int const x86_64_ms_abi_int_parameter_registers[4] =
2056 CX_REG, DX_REG, R8_REG, R9_REG
2059 static int const x86_64_int_return_registers[4] =
2061 AX_REG, DX_REG, DI_REG, SI_REG
2064 /* Define the structure for the machine field in struct function. */
2066 struct GTY(()) stack_local_entry {
2067 unsigned short mode;
2070 struct stack_local_entry *next;
2073 /* Structure describing stack frame layout.
2074 Stack grows downward:
2080 saved static chain if ix86_static_chain_on_stack
2082 saved frame pointer if frame_pointer_needed
2083 <- HARD_FRAME_POINTER
2089 <- sse_regs_save_offset
2092 [va_arg registers] |
2096 [padding2] | = to_allocate
2105 int outgoing_arguments_size;
2106 HOST_WIDE_INT frame;
2108 /* The offsets relative to ARG_POINTER. */
2109 HOST_WIDE_INT frame_pointer_offset;
2110 HOST_WIDE_INT hard_frame_pointer_offset;
2111 HOST_WIDE_INT stack_pointer_offset;
2112 HOST_WIDE_INT hfp_save_offset;
2113 HOST_WIDE_INT reg_save_offset;
2114 HOST_WIDE_INT sse_reg_save_offset;
2116 /* When save_regs_using_mov is set, emit prologue using
2117 move instead of push instructions. */
2118 bool save_regs_using_mov;
2121 /* Code model option. */
2122 enum cmodel ix86_cmodel;
2124 enum asm_dialect ix86_asm_dialect = ASM_ATT;
2126 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
2128 /* Which unit we are generating floating point math for. */
2129 enum fpmath_unit ix86_fpmath;
2131 /* Which cpu are we scheduling for. */
2132 enum attr_cpu ix86_schedule;
2134 /* Which cpu are we optimizing for. */
2135 enum processor_type ix86_tune;
2137 /* Which instruction set architecture to use. */
2138 enum processor_type ix86_arch;
2140 /* true if sse prefetch instruction is not NOOP. */
2141 int x86_prefetch_sse;
2143 /* ix86_regparm_string as a number */
2144 static int ix86_regparm;
2146 /* -mstackrealign option */
2147 static const char ix86_force_align_arg_pointer_string[]
2148 = "force_align_arg_pointer";
2150 static rtx (*ix86_gen_leave) (void);
2151 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
2152 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
2153 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
2154 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
2155 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
2156 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
2157 static rtx (*ix86_gen_allocate_stack_worker) (rtx, rtx);
2158 static rtx (*ix86_gen_adjust_stack_and_probe) (rtx, rtx, rtx);
2159 static rtx (*ix86_gen_probe_stack_range) (rtx, rtx, rtx);
2161 /* Preferred alignment for stack boundary in bits. */
2162 unsigned int ix86_preferred_stack_boundary;
2164 /* Alignment for incoming stack boundary in bits specified at
2166 static unsigned int ix86_user_incoming_stack_boundary;
2168 /* Default alignment for incoming stack boundary in bits. */
2169 static unsigned int ix86_default_incoming_stack_boundary;
2171 /* Alignment for incoming stack boundary in bits. */
2172 unsigned int ix86_incoming_stack_boundary;
2174 /* The abi used by target. */
2175 enum calling_abi ix86_abi;
2177 /* Values 1-5: see jump.c */
2178 int ix86_branch_cost;
2180 /* Calling abi specific va_list type nodes. */
2181 static GTY(()) tree sysv_va_list_type_node;
2182 static GTY(()) tree ms_va_list_type_node;
2184 /* Variables which are this size or smaller are put in the data/bss
2185 or ldata/lbss sections. */
2187 int ix86_section_threshold = 65536;
2189 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
2190 char internal_label_prefix[16];
2191 int internal_label_prefix_len;
2193 /* Fence to use after loop using movnt. */
2196 /* Register class used for passing given 64bit part of the argument.
2197 These represent classes as documented by the PS ABI, with the exception
2198 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
2199 use SF or DFmode move instead of DImode to avoid reformatting penalties.
2201 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
2202 whenever possible (upper half does contain padding). */
2203 enum x86_64_reg_class
2206 X86_64_INTEGER_CLASS,
2207 X86_64_INTEGERSI_CLASS,
2214 X86_64_COMPLEX_X87_CLASS,
2218 #define MAX_CLASSES 4
2220 /* Table of constants used by fldpi, fldln2, etc.... */
2221 static REAL_VALUE_TYPE ext_80387_constants_table [5];
2222 static bool ext_80387_constants_init = 0;
2225 static struct machine_function * ix86_init_machine_status (void);
2226 static rtx ix86_function_value (const_tree, const_tree, bool);
2227 static bool ix86_function_value_regno_p (const unsigned int);
2228 static unsigned int ix86_function_arg_boundary (enum machine_mode,
2230 static rtx ix86_static_chain (const_tree, bool);
2231 static int ix86_function_regparm (const_tree, const_tree);
2232 static void ix86_compute_frame_layout (struct ix86_frame *);
2233 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
2235 static void ix86_add_new_builtins (int);
2236 static rtx ix86_expand_vec_perm_builtin (tree);
2237 static tree ix86_canonical_va_list_type (tree);
2238 static void predict_jump (int);
2239 static unsigned int split_stack_prologue_scratch_regno (void);
2240 static bool i386_asm_output_addr_const_extra (FILE *, rtx);
2242 enum ix86_function_specific_strings
2244 IX86_FUNCTION_SPECIFIC_ARCH,
2245 IX86_FUNCTION_SPECIFIC_TUNE,
2246 IX86_FUNCTION_SPECIFIC_FPMATH,
2247 IX86_FUNCTION_SPECIFIC_MAX
2250 static char *ix86_target_string (int, int, const char *, const char *,
2251 const char *, bool);
2252 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
2253 static void ix86_function_specific_save (struct cl_target_option *);
2254 static void ix86_function_specific_restore (struct cl_target_option *);
2255 static void ix86_function_specific_print (FILE *, int,
2256 struct cl_target_option *);
2257 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
2258 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
2259 static bool ix86_can_inline_p (tree, tree);
2260 static void ix86_set_current_function (tree);
2261 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
2263 static enum calling_abi ix86_function_abi (const_tree);
2266 #ifndef SUBTARGET32_DEFAULT_CPU
2267 #define SUBTARGET32_DEFAULT_CPU "i386"
2270 /* The svr4 ABI for the i386 says that records and unions are returned
2272 #ifndef DEFAULT_PCC_STRUCT_RETURN
2273 #define DEFAULT_PCC_STRUCT_RETURN 1
2276 /* Whether -mtune= or -march= were specified */
2277 static int ix86_tune_defaulted;
2278 static int ix86_arch_specified;
2280 /* A mask of ix86_isa_flags that includes bit X if X
2281 was set or cleared on the command line. */
2282 static int ix86_isa_flags_explicit;
2284 /* Define a set of ISAs which are available when a given ISA is
2285 enabled. MMX and SSE ISAs are handled separately. */
2287 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
2288 #define OPTION_MASK_ISA_3DNOW_SET \
2289 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
2291 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
2292 #define OPTION_MASK_ISA_SSE2_SET \
2293 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
2294 #define OPTION_MASK_ISA_SSE3_SET \
2295 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
2296 #define OPTION_MASK_ISA_SSSE3_SET \
2297 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
2298 #define OPTION_MASK_ISA_SSE4_1_SET \
2299 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
2300 #define OPTION_MASK_ISA_SSE4_2_SET \
2301 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
2302 #define OPTION_MASK_ISA_AVX_SET \
2303 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
2304 #define OPTION_MASK_ISA_FMA_SET \
2305 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
2307 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
2309 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
2311 #define OPTION_MASK_ISA_SSE4A_SET \
2312 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
2313 #define OPTION_MASK_ISA_FMA4_SET \
2314 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
2315 | OPTION_MASK_ISA_AVX_SET)
2316 #define OPTION_MASK_ISA_XOP_SET \
2317 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
2318 #define OPTION_MASK_ISA_LWP_SET \
2321 /* AES and PCLMUL need SSE2 because they use xmm registers */
2322 #define OPTION_MASK_ISA_AES_SET \
2323 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
2324 #define OPTION_MASK_ISA_PCLMUL_SET \
2325 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
2327 #define OPTION_MASK_ISA_ABM_SET \
2328 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
2330 #define OPTION_MASK_ISA_BMI_SET OPTION_MASK_ISA_BMI
2331 #define OPTION_MASK_ISA_TBM_SET OPTION_MASK_ISA_TBM
2332 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
2333 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
2334 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
2335 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
2336 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
2338 #define OPTION_MASK_ISA_FSGSBASE_SET OPTION_MASK_ISA_FSGSBASE
2339 #define OPTION_MASK_ISA_RDRND_SET OPTION_MASK_ISA_RDRND
2340 #define OPTION_MASK_ISA_F16C_SET \
2341 (OPTION_MASK_ISA_F16C | OPTION_MASK_ISA_AVX_SET)
2343 /* Define a set of ISAs which aren't available when a given ISA is
2344 disabled. MMX and SSE ISAs are handled separately. */
2346 #define OPTION_MASK_ISA_MMX_UNSET \
2347 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
2348 #define OPTION_MASK_ISA_3DNOW_UNSET \
2349 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
2350 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
2352 #define OPTION_MASK_ISA_SSE_UNSET \
2353 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
2354 #define OPTION_MASK_ISA_SSE2_UNSET \
2355 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
2356 #define OPTION_MASK_ISA_SSE3_UNSET \
2357 (OPTION_MASK_ISA_SSE3 \
2358 | OPTION_MASK_ISA_SSSE3_UNSET \
2359 | OPTION_MASK_ISA_SSE4A_UNSET )
2360 #define OPTION_MASK_ISA_SSSE3_UNSET \
2361 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2362 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2363 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2364 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2365 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2366 #define OPTION_MASK_ISA_AVX_UNSET \
2367 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2368 | OPTION_MASK_ISA_FMA4_UNSET | OPTION_MASK_ISA_F16C_UNSET)
2369 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2371 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2373 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2375 #define OPTION_MASK_ISA_SSE4A_UNSET \
2376 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2378 #define OPTION_MASK_ISA_FMA4_UNSET \
2379 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2380 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2381 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2383 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2384 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2385 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2386 #define OPTION_MASK_ISA_BMI_UNSET OPTION_MASK_ISA_BMI
2387 #define OPTION_MASK_ISA_TBM_UNSET OPTION_MASK_ISA_TBM
2388 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2389 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2390 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2391 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2392 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2394 #define OPTION_MASK_ISA_FSGSBASE_UNSET OPTION_MASK_ISA_FSGSBASE
2395 #define OPTION_MASK_ISA_RDRND_UNSET OPTION_MASK_ISA_RDRND
2396 #define OPTION_MASK_ISA_F16C_UNSET OPTION_MASK_ISA_F16C
2398 /* Vectorization library interface and handlers. */
2399 static tree (*ix86_veclib_handler) (enum built_in_function, tree, tree);
2401 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2402 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2404 /* Processor target table, indexed by processor number */
2407 const struct processor_costs *cost; /* Processor costs */
2408 const int align_loop; /* Default alignments. */
2409 const int align_loop_max_skip;
2410 const int align_jump;
2411 const int align_jump_max_skip;
2412 const int align_func;
2415 static const struct ptt processor_target_table[PROCESSOR_max] =
2417 {&i386_cost, 4, 3, 4, 3, 4},
2418 {&i486_cost, 16, 15, 16, 15, 16},
2419 {&pentium_cost, 16, 7, 16, 7, 16},
2420 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2421 {&geode_cost, 0, 0, 0, 0, 0},
2422 {&k6_cost, 32, 7, 32, 7, 32},
2423 {&athlon_cost, 16, 7, 16, 7, 16},
2424 {&pentium4_cost, 0, 0, 0, 0, 0},
2425 {&k8_cost, 16, 7, 16, 7, 16},
2426 {&nocona_cost, 0, 0, 0, 0, 0},
2427 {&core2_cost, 16, 10, 16, 10, 16},
2428 /* Core i7 32-bit. */
2429 {&generic32_cost, 16, 10, 16, 10, 16},
2430 /* Core i7 64-bit. */
2431 {&generic64_cost, 16, 10, 16, 10, 16},
2432 {&generic32_cost, 16, 7, 16, 7, 16},
2433 {&generic64_cost, 16, 10, 16, 10, 16},
2434 {&amdfam10_cost, 32, 24, 32, 7, 32},
2435 {&bdver1_cost, 32, 24, 32, 7, 32},
2436 {&atom_cost, 16, 7, 16, 7, 16}
2439 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2467 /* Return true if a red-zone is in use. */
2470 ix86_using_red_zone (void)
2472 return TARGET_RED_ZONE && !TARGET_64BIT_MS_ABI;
2475 /* Implement TARGET_HANDLE_OPTION. */
2478 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
2485 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2486 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2490 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2491 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2498 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2499 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2503 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2504 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2514 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2515 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2519 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2520 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2527 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2528 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2532 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2533 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2540 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2541 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2545 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2546 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2553 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2554 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2558 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2559 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2566 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2567 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2571 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2572 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2579 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2580 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2584 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2585 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2592 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2593 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2597 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2598 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2605 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2606 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2610 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2611 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2616 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2617 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2621 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2622 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2628 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2629 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2633 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2634 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2641 ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2642 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2646 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2647 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2654 ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2655 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2659 ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2660 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2667 ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2668 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2672 ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2673 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2680 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2681 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2685 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2686 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2693 ix86_isa_flags |= OPTION_MASK_ISA_BMI_SET;
2694 ix86_isa_flags_explicit |= OPTION_MASK_ISA_BMI_SET;
2698 ix86_isa_flags &= ~OPTION_MASK_ISA_BMI_UNSET;
2699 ix86_isa_flags_explicit |= OPTION_MASK_ISA_BMI_UNSET;
2706 ix86_isa_flags |= OPTION_MASK_ISA_TBM_SET;
2707 ix86_isa_flags_explicit |= OPTION_MASK_ISA_TBM_SET;
2711 ix86_isa_flags &= ~OPTION_MASK_ISA_TBM_UNSET;
2712 ix86_isa_flags_explicit |= OPTION_MASK_ISA_TBM_UNSET;
2719 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2720 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2724 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2725 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2732 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2733 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2737 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2738 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2745 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2746 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2750 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2751 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2758 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2759 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2763 ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2764 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2771 ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2772 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2776 ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2777 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2784 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2785 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2789 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2790 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2797 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2798 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2802 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2803 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2810 ix86_isa_flags |= OPTION_MASK_ISA_FSGSBASE_SET;
2811 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FSGSBASE_SET;
2815 ix86_isa_flags &= ~OPTION_MASK_ISA_FSGSBASE_UNSET;
2816 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FSGSBASE_UNSET;
2823 ix86_isa_flags |= OPTION_MASK_ISA_RDRND_SET;
2824 ix86_isa_flags_explicit |= OPTION_MASK_ISA_RDRND_SET;
2828 ix86_isa_flags &= ~OPTION_MASK_ISA_RDRND_UNSET;
2829 ix86_isa_flags_explicit |= OPTION_MASK_ISA_RDRND_UNSET;
2836 ix86_isa_flags |= OPTION_MASK_ISA_F16C_SET;
2837 ix86_isa_flags_explicit |= OPTION_MASK_ISA_F16C_SET;
2841 ix86_isa_flags &= ~OPTION_MASK_ISA_F16C_UNSET;
2842 ix86_isa_flags_explicit |= OPTION_MASK_ISA_F16C_UNSET;
2851 /* Return a string that documents the current -m options. The caller is
2852 responsible for freeing the string. */
2855 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2856 const char *fpmath, bool add_nl_p)
2858 struct ix86_target_opts
2860 const char *option; /* option string */
2861 int mask; /* isa mask options */
2864 /* This table is ordered so that options like -msse4.2 that imply
2865 preceding options while match those first. */
2866 static struct ix86_target_opts isa_opts[] =
2868 { "-m64", OPTION_MASK_ISA_64BIT },
2869 { "-mfma4", OPTION_MASK_ISA_FMA4 },
2870 { "-mfma", OPTION_MASK_ISA_FMA },
2871 { "-mxop", OPTION_MASK_ISA_XOP },
2872 { "-mlwp", OPTION_MASK_ISA_LWP },
2873 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2874 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2875 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2876 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2877 { "-msse3", OPTION_MASK_ISA_SSE3 },
2878 { "-msse2", OPTION_MASK_ISA_SSE2 },
2879 { "-msse", OPTION_MASK_ISA_SSE },
2880 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2881 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2882 { "-mmmx", OPTION_MASK_ISA_MMX },
2883 { "-mabm", OPTION_MASK_ISA_ABM },
2884 { "-mbmi", OPTION_MASK_ISA_BMI },
2885 { "-mtbm", OPTION_MASK_ISA_TBM },
2886 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2887 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2888 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2889 { "-maes", OPTION_MASK_ISA_AES },
2890 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2891 { "-mfsgsbase", OPTION_MASK_ISA_FSGSBASE },
2892 { "-mrdrnd", OPTION_MASK_ISA_RDRND },
2893 { "-mf16c", OPTION_MASK_ISA_F16C },
2897 static struct ix86_target_opts flag_opts[] =
2899 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2900 { "-m80387", MASK_80387 },
2901 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2902 { "-malign-double", MASK_ALIGN_DOUBLE },
2903 { "-mcld", MASK_CLD },
2904 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2905 { "-mieee-fp", MASK_IEEE_FP },
2906 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2907 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2908 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2909 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2910 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2911 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2912 { "-mno-red-zone", MASK_NO_RED_ZONE },
2913 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2914 { "-mrecip", MASK_RECIP },
2915 { "-mrtd", MASK_RTD },
2916 { "-msseregparm", MASK_SSEREGPARM },
2917 { "-mstack-arg-probe", MASK_STACK_PROBE },
2918 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2919 { "-mvect8-ret-in-mem", MASK_VECT8_RETURNS },
2920 { "-m8bit-idiv", MASK_USE_8BIT_IDIV },
2921 { "-mvzeroupper", MASK_VZEROUPPER },
2924 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2927 char target_other[40];
2936 memset (opts, '\0', sizeof (opts));
2938 /* Add -march= option. */
2941 opts[num][0] = "-march=";
2942 opts[num++][1] = arch;
2945 /* Add -mtune= option. */
2948 opts[num][0] = "-mtune=";
2949 opts[num++][1] = tune;
2952 /* Pick out the options in isa options. */
2953 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2955 if ((isa & isa_opts[i].mask) != 0)
2957 opts[num++][0] = isa_opts[i].option;
2958 isa &= ~ isa_opts[i].mask;
2962 if (isa && add_nl_p)
2964 opts[num++][0] = isa_other;
2965 sprintf (isa_other, "(other isa: %#x)", isa);
2968 /* Add flag options. */
2969 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2971 if ((flags & flag_opts[i].mask) != 0)
2973 opts[num++][0] = flag_opts[i].option;
2974 flags &= ~ flag_opts[i].mask;
2978 if (flags && add_nl_p)
2980 opts[num++][0] = target_other;
2981 sprintf (target_other, "(other flags: %#x)", flags);
2984 /* Add -fpmath= option. */
2987 opts[num][0] = "-mfpmath=";
2988 opts[num++][1] = fpmath;
2995 gcc_assert (num < ARRAY_SIZE (opts));
2997 /* Size the string. */
2999 sep_len = (add_nl_p) ? 3 : 1;
3000 for (i = 0; i < num; i++)
3003 for (j = 0; j < 2; j++)
3005 len += strlen (opts[i][j]);
3008 /* Build the string. */
3009 ret = ptr = (char *) xmalloc (len);
3012 for (i = 0; i < num; i++)
3016 for (j = 0; j < 2; j++)
3017 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
3024 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
3032 for (j = 0; j < 2; j++)
3035 memcpy (ptr, opts[i][j], len2[j]);
3037 line_len += len2[j];
3042 gcc_assert (ret + len >= ptr);
3047 /* Return TRUE if software prefetching is beneficial for the
3051 software_prefetching_beneficial_p (void)
3055 case PROCESSOR_GEODE:
3057 case PROCESSOR_ATHLON:
3059 case PROCESSOR_AMDFAM10:
3067 /* Return true, if profiling code should be emitted before
3068 prologue. Otherwise it returns false.
3069 Note: For x86 with "hotfix" it is sorried. */
3071 ix86_profile_before_prologue (void)
3073 return flag_fentry != 0;
3076 /* Function that is callable from the debugger to print the current
3079 ix86_debug_options (void)
3081 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
3082 ix86_arch_string, ix86_tune_string,
3083 ix86_fpmath_string, true);
3087 fprintf (stderr, "%s\n\n", opts);
3091 fputs ("<no options>\n\n", stderr);
3096 /* Override various settings based on options. If MAIN_ARGS_P, the
3097 options are from the command line, otherwise they are from
3101 ix86_option_override_internal (bool main_args_p)
3104 unsigned int ix86_arch_mask, ix86_tune_mask;
3105 const bool ix86_tune_specified = (ix86_tune_string != NULL);
3110 /* Comes from final.c -- no real reason to change it. */
3111 #define MAX_CODE_ALIGN 16
3119 PTA_PREFETCH_SSE = 1 << 4,
3121 PTA_3DNOW_A = 1 << 6,
3125 PTA_POPCNT = 1 << 10,
3127 PTA_SSE4A = 1 << 12,
3128 PTA_NO_SAHF = 1 << 13,
3129 PTA_SSE4_1 = 1 << 14,
3130 PTA_SSE4_2 = 1 << 15,
3132 PTA_PCLMUL = 1 << 17,
3135 PTA_MOVBE = 1 << 20,
3139 PTA_FSGSBASE = 1 << 24,
3140 PTA_RDRND = 1 << 25,
3144 /* if this reaches 32, need to widen struct pta flags below */
3149 const char *const name; /* processor name or nickname. */
3150 const enum processor_type processor;
3151 const enum attr_cpu schedule;
3152 const unsigned /*enum pta_flags*/ flags;
3154 const processor_alias_table[] =
3156 {"i386", PROCESSOR_I386, CPU_NONE, 0},
3157 {"i486", PROCESSOR_I486, CPU_NONE, 0},
3158 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
3159 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
3160 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
3161 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
3162 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
3163 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
3164 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
3165 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
3166 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
3167 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
3168 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
3170 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
3172 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
3173 PTA_MMX | PTA_SSE | PTA_SSE2},
3174 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
3175 PTA_MMX |PTA_SSE | PTA_SSE2},
3176 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
3177 PTA_MMX | PTA_SSE | PTA_SSE2},
3178 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
3179 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
3180 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
3181 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3182 | PTA_CX16 | PTA_NO_SAHF},
3183 {"core2", PROCESSOR_CORE2, CPU_CORE2,
3184 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3185 | PTA_SSSE3 | PTA_CX16},
3186 {"corei7", PROCESSOR_COREI7_64, CPU_GENERIC64,
3187 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3188 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_CX16},
3189 {"atom", PROCESSOR_ATOM, CPU_ATOM,
3190 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3191 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
3192 {"geode", PROCESSOR_GEODE, CPU_GEODE,
3193 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
3194 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
3195 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
3196 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
3197 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
3198 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
3199 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
3200 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
3201 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
3202 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
3203 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
3204 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
3205 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
3206 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
3207 {"x86-64", PROCESSOR_K8, CPU_K8,
3208 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
3209 {"k8", PROCESSOR_K8, CPU_K8,
3210 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3211 | PTA_SSE2 | PTA_NO_SAHF},
3212 {"k8-sse3", PROCESSOR_K8, CPU_K8,
3213 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3214 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3215 {"opteron", PROCESSOR_K8, CPU_K8,
3216 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3217 | PTA_SSE2 | PTA_NO_SAHF},
3218 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
3219 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3220 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3221 {"athlon64", PROCESSOR_K8, CPU_K8,
3222 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3223 | PTA_SSE2 | PTA_NO_SAHF},
3224 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
3225 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3226 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3227 {"athlon-fx", PROCESSOR_K8, CPU_K8,
3228 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3229 | PTA_SSE2 | PTA_NO_SAHF},
3230 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
3231 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3232 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
3233 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
3234 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3235 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
3236 {"bdver1", PROCESSOR_BDVER1, CPU_BDVER1,
3237 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3238 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM
3239 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AES
3240 | PTA_PCLMUL | PTA_AVX | PTA_FMA4 | PTA_XOP | PTA_LWP},
3241 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
3242 0 /* flags are only used for -march switch. */ },
3243 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
3244 PTA_64BIT /* flags are only used for -march switch. */ },
3247 int const pta_size = ARRAY_SIZE (processor_alias_table);
3249 /* Set up prefix/suffix so the error messages refer to either the command
3250 line argument, or the attribute(target). */
3259 prefix = "option(\"";
3264 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3265 SUBTARGET_OVERRIDE_OPTIONS;
3268 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3269 SUBSUBTARGET_OVERRIDE_OPTIONS;
3272 /* -fPIC is the default for x86_64. */
3273 if (TARGET_MACHO && TARGET_64BIT)
3276 /* Need to check -mtune=generic first. */
3277 if (ix86_tune_string)
3279 if (!strcmp (ix86_tune_string, "generic")
3280 || !strcmp (ix86_tune_string, "i686")
3281 /* As special support for cross compilers we read -mtune=native
3282 as -mtune=generic. With native compilers we won't see the
3283 -mtune=native, as it was changed by the driver. */
3284 || !strcmp (ix86_tune_string, "native"))
3287 ix86_tune_string = "generic64";
3289 ix86_tune_string = "generic32";
3291 /* If this call is for setting the option attribute, allow the
3292 generic32/generic64 that was previously set. */
3293 else if (!main_args_p
3294 && (!strcmp (ix86_tune_string, "generic32")
3295 || !strcmp (ix86_tune_string, "generic64")))
3297 else if (!strncmp (ix86_tune_string, "generic", 7))
3298 error ("bad value (%s) for %stune=%s %s",
3299 ix86_tune_string, prefix, suffix, sw);
3300 else if (!strcmp (ix86_tune_string, "x86-64"))
3301 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated; use "
3302 "%stune=k8%s or %stune=generic%s instead as appropriate",
3303 prefix, suffix, prefix, suffix, prefix, suffix);
3307 if (ix86_arch_string)
3308 ix86_tune_string = ix86_arch_string;
3309 if (!ix86_tune_string)
3311 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
3312 ix86_tune_defaulted = 1;
3315 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
3316 need to use a sensible tune option. */
3317 if (!strcmp (ix86_tune_string, "generic")
3318 || !strcmp (ix86_tune_string, "x86-64")
3319 || !strcmp (ix86_tune_string, "i686"))
3322 ix86_tune_string = "generic64";
3324 ix86_tune_string = "generic32";
3328 if (ix86_stringop_string)
3330 if (!strcmp (ix86_stringop_string, "rep_byte"))
3331 stringop_alg = rep_prefix_1_byte;
3332 else if (!strcmp (ix86_stringop_string, "libcall"))
3333 stringop_alg = libcall;
3334 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
3335 stringop_alg = rep_prefix_4_byte;
3336 else if (!strcmp (ix86_stringop_string, "rep_8byte")
3338 /* rep; movq isn't available in 32-bit code. */
3339 stringop_alg = rep_prefix_8_byte;
3340 else if (!strcmp (ix86_stringop_string, "byte_loop"))
3341 stringop_alg = loop_1_byte;
3342 else if (!strcmp (ix86_stringop_string, "loop"))
3343 stringop_alg = loop;
3344 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
3345 stringop_alg = unrolled_loop;
3347 error ("bad value (%s) for %sstringop-strategy=%s %s",
3348 ix86_stringop_string, prefix, suffix, sw);
3351 if (!ix86_arch_string)
3352 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
3354 ix86_arch_specified = 1;
3356 /* Validate -mabi= value. */
3357 if (ix86_abi_string)
3359 if (strcmp (ix86_abi_string, "sysv") == 0)
3360 ix86_abi = SYSV_ABI;
3361 else if (strcmp (ix86_abi_string, "ms") == 0)
3364 error ("unknown ABI (%s) for %sabi=%s %s",
3365 ix86_abi_string, prefix, suffix, sw);
3368 ix86_abi = DEFAULT_ABI;
3370 if (ix86_cmodel_string != 0)
3372 if (!strcmp (ix86_cmodel_string, "small"))
3373 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3374 else if (!strcmp (ix86_cmodel_string, "medium"))
3375 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
3376 else if (!strcmp (ix86_cmodel_string, "large"))
3377 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
3379 error ("code model %s does not support PIC mode", ix86_cmodel_string);
3380 else if (!strcmp (ix86_cmodel_string, "32"))
3381 ix86_cmodel = CM_32;
3382 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
3383 ix86_cmodel = CM_KERNEL;
3385 error ("bad value (%s) for %scmodel=%s %s",
3386 ix86_cmodel_string, prefix, suffix, sw);
3390 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
3391 use of rip-relative addressing. This eliminates fixups that
3392 would otherwise be needed if this object is to be placed in a
3393 DLL, and is essentially just as efficient as direct addressing. */
3394 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
3395 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
3396 else if (TARGET_64BIT)
3397 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3399 ix86_cmodel = CM_32;
3401 if (ix86_asm_string != 0)
3404 && !strcmp (ix86_asm_string, "intel"))
3405 ix86_asm_dialect = ASM_INTEL;
3406 else if (!strcmp (ix86_asm_string, "att"))
3407 ix86_asm_dialect = ASM_ATT;
3409 error ("bad value (%s) for %sasm=%s %s",
3410 ix86_asm_string, prefix, suffix, sw);
3412 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
3413 error ("code model %qs not supported in the %s bit mode",
3414 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
3415 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
3416 sorry ("%i-bit mode not compiled in",
3417 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
3419 for (i = 0; i < pta_size; i++)
3420 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
3422 ix86_schedule = processor_alias_table[i].schedule;
3423 ix86_arch = processor_alias_table[i].processor;
3424 /* Default cpu tuning to the architecture. */
3425 ix86_tune = ix86_arch;
3427 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3428 error ("CPU you selected does not support x86-64 "
3431 if (processor_alias_table[i].flags & PTA_MMX
3432 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
3433 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
3434 if (processor_alias_table[i].flags & PTA_3DNOW
3435 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
3436 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
3437 if (processor_alias_table[i].flags & PTA_3DNOW_A
3438 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
3439 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
3440 if (processor_alias_table[i].flags & PTA_SSE
3441 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
3442 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
3443 if (processor_alias_table[i].flags & PTA_SSE2
3444 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
3445 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
3446 if (processor_alias_table[i].flags & PTA_SSE3
3447 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
3448 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
3449 if (processor_alias_table[i].flags & PTA_SSSE3
3450 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
3451 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
3452 if (processor_alias_table[i].flags & PTA_SSE4_1
3453 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
3454 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
3455 if (processor_alias_table[i].flags & PTA_SSE4_2
3456 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
3457 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
3458 if (processor_alias_table[i].flags & PTA_AVX
3459 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
3460 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
3461 if (processor_alias_table[i].flags & PTA_FMA
3462 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
3463 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
3464 if (processor_alias_table[i].flags & PTA_SSE4A
3465 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3466 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3467 if (processor_alias_table[i].flags & PTA_FMA4
3468 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3469 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3470 if (processor_alias_table[i].flags & PTA_XOP
3471 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3472 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3473 if (processor_alias_table[i].flags & PTA_LWP
3474 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3475 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3476 if (processor_alias_table[i].flags & PTA_ABM
3477 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3478 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3479 if (processor_alias_table[i].flags & PTA_BMI
3480 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_BMI))
3481 ix86_isa_flags |= OPTION_MASK_ISA_BMI;
3482 if (processor_alias_table[i].flags & PTA_TBM
3483 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_TBM))
3484 ix86_isa_flags |= OPTION_MASK_ISA_TBM;
3485 if (processor_alias_table[i].flags & PTA_CX16
3486 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3487 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3488 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3489 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3490 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3491 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3492 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3493 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3494 if (processor_alias_table[i].flags & PTA_MOVBE
3495 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3496 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3497 if (processor_alias_table[i].flags & PTA_AES
3498 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3499 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3500 if (processor_alias_table[i].flags & PTA_PCLMUL
3501 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3502 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3503 if (processor_alias_table[i].flags & PTA_FSGSBASE
3504 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FSGSBASE))
3505 ix86_isa_flags |= OPTION_MASK_ISA_FSGSBASE;
3506 if (processor_alias_table[i].flags & PTA_RDRND
3507 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_RDRND))
3508 ix86_isa_flags |= OPTION_MASK_ISA_RDRND;
3509 if (processor_alias_table[i].flags & PTA_F16C
3510 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_F16C))
3511 ix86_isa_flags |= OPTION_MASK_ISA_F16C;
3512 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3513 x86_prefetch_sse = true;
3518 if (!strcmp (ix86_arch_string, "generic"))
3519 error ("generic CPU can be used only for %stune=%s %s",
3520 prefix, suffix, sw);
3521 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3522 error ("bad value (%s) for %sarch=%s %s",
3523 ix86_arch_string, prefix, suffix, sw);
3525 ix86_arch_mask = 1u << ix86_arch;
3526 for (i = 0; i < X86_ARCH_LAST; ++i)
3527 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3529 for (i = 0; i < pta_size; i++)
3530 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3532 ix86_schedule = processor_alias_table[i].schedule;
3533 ix86_tune = processor_alias_table[i].processor;
3536 if (!(processor_alias_table[i].flags & PTA_64BIT))
3538 if (ix86_tune_defaulted)
3540 ix86_tune_string = "x86-64";
3541 for (i = 0; i < pta_size; i++)
3542 if (! strcmp (ix86_tune_string,
3543 processor_alias_table[i].name))
3545 ix86_schedule = processor_alias_table[i].schedule;
3546 ix86_tune = processor_alias_table[i].processor;
3549 error ("CPU you selected does not support x86-64 "
3555 /* Adjust tuning when compiling for 32-bit ABI. */
3558 case PROCESSOR_GENERIC64:
3559 ix86_tune = PROCESSOR_GENERIC32;
3560 ix86_schedule = CPU_PENTIUMPRO;
3563 case PROCESSOR_COREI7_64:
3564 ix86_tune = PROCESSOR_COREI7_32;
3565 ix86_schedule = CPU_PENTIUMPRO;
3572 /* Intel CPUs have always interpreted SSE prefetch instructions as
3573 NOPs; so, we can enable SSE prefetch instructions even when
3574 -mtune (rather than -march) points us to a processor that has them.
3575 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3576 higher processors. */
3578 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3579 x86_prefetch_sse = true;
3583 if (ix86_tune_specified && i == pta_size)
3584 error ("bad value (%s) for %stune=%s %s",
3585 ix86_tune_string, prefix, suffix, sw);
3587 ix86_tune_mask = 1u << ix86_tune;
3588 for (i = 0; i < X86_TUNE_LAST; ++i)
3589 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3591 #ifndef USE_IX86_FRAME_POINTER
3592 #define USE_IX86_FRAME_POINTER 0
3595 #ifndef USE_X86_64_FRAME_POINTER
3596 #define USE_X86_64_FRAME_POINTER 0
3599 /* Set the default values for switches whose default depends on TARGET_64BIT
3600 in case they weren't overwritten by command line options. */
3603 if (optimize > 1 && !global_options_set.x_flag_zee)
3605 if (optimize >= 1 && !global_options_set.x_flag_omit_frame_pointer)
3606 flag_omit_frame_pointer = !USE_X86_64_FRAME_POINTER;
3607 if (flag_asynchronous_unwind_tables == 2)
3608 flag_unwind_tables = flag_asynchronous_unwind_tables = 1;
3609 if (flag_pcc_struct_return == 2)
3610 flag_pcc_struct_return = 0;
3614 if (optimize >= 1 && !global_options_set.x_flag_omit_frame_pointer)
3615 flag_omit_frame_pointer = !(USE_IX86_FRAME_POINTER || optimize_size);
3616 if (flag_asynchronous_unwind_tables == 2)
3617 flag_asynchronous_unwind_tables = !USE_IX86_FRAME_POINTER;
3618 if (flag_pcc_struct_return == 2)
3619 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
3623 ix86_cost = &ix86_size_cost;
3625 ix86_cost = processor_target_table[ix86_tune].cost;
3627 /* Arrange to set up i386_stack_locals for all functions. */
3628 init_machine_status = ix86_init_machine_status;
3630 /* Validate -mregparm= value. */
3631 if (ix86_regparm_string)
3634 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3635 i = atoi (ix86_regparm_string);
3636 if (i < 0 || i > REGPARM_MAX)
3637 error ("%sregparm=%d%s is not between 0 and %d",
3638 prefix, i, suffix, REGPARM_MAX);
3643 ix86_regparm = REGPARM_MAX;
3645 /* If the user has provided any of the -malign-* options,
3646 warn and use that value only if -falign-* is not set.
3647 Remove this code in GCC 3.2 or later. */
3648 if (ix86_align_loops_string)
3650 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3651 prefix, suffix, suffix);
3652 if (align_loops == 0)
3654 i = atoi (ix86_align_loops_string);
3655 if (i < 0 || i > MAX_CODE_ALIGN)
3656 error ("%salign-loops=%d%s is not between 0 and %d",
3657 prefix, i, suffix, MAX_CODE_ALIGN);
3659 align_loops = 1 << i;
3663 if (ix86_align_jumps_string)
3665 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3666 prefix, suffix, suffix);
3667 if (align_jumps == 0)
3669 i = atoi (ix86_align_jumps_string);
3670 if (i < 0 || i > MAX_CODE_ALIGN)
3671 error ("%salign-loops=%d%s is not between 0 and %d",
3672 prefix, i, suffix, MAX_CODE_ALIGN);
3674 align_jumps = 1 << i;
3678 if (ix86_align_funcs_string)
3680 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3681 prefix, suffix, suffix);
3682 if (align_functions == 0)
3684 i = atoi (ix86_align_funcs_string);
3685 if (i < 0 || i > MAX_CODE_ALIGN)
3686 error ("%salign-loops=%d%s is not between 0 and %d",
3687 prefix, i, suffix, MAX_CODE_ALIGN);
3689 align_functions = 1 << i;
3693 /* Default align_* from the processor table. */
3694 if (align_loops == 0)
3696 align_loops = processor_target_table[ix86_tune].align_loop;
3697 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3699 if (align_jumps == 0)
3701 align_jumps = processor_target_table[ix86_tune].align_jump;
3702 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3704 if (align_functions == 0)
3706 align_functions = processor_target_table[ix86_tune].align_func;
3709 /* Validate -mbranch-cost= value, or provide default. */
3710 ix86_branch_cost = ix86_cost->branch_cost;
3711 if (ix86_branch_cost_string)
3713 i = atoi (ix86_branch_cost_string);
3715 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3717 ix86_branch_cost = i;
3719 if (ix86_section_threshold_string)
3721 i = atoi (ix86_section_threshold_string);
3723 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3725 ix86_section_threshold = i;
3728 if (ix86_tls_dialect_string)
3730 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3731 ix86_tls_dialect = TLS_DIALECT_GNU;
3732 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3733 ix86_tls_dialect = TLS_DIALECT_GNU2;
3735 error ("bad value (%s) for %stls-dialect=%s %s",
3736 ix86_tls_dialect_string, prefix, suffix, sw);
3739 if (ix87_precision_string)
3741 i = atoi (ix87_precision_string);
3742 if (i != 32 && i != 64 && i != 80)
3743 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3748 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3750 /* Enable by default the SSE and MMX builtins. Do allow the user to
3751 explicitly disable any of these. In particular, disabling SSE and
3752 MMX for kernel code is extremely useful. */
3753 if (!ix86_arch_specified)
3755 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3756 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3759 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3763 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3765 if (!ix86_arch_specified)
3767 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3769 /* i386 ABI does not specify red zone. It still makes sense to use it
3770 when programmer takes care to stack from being destroyed. */
3771 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3772 target_flags |= MASK_NO_RED_ZONE;
3775 /* Keep nonleaf frame pointers. */
3776 if (flag_omit_frame_pointer)
3777 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3778 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3779 flag_omit_frame_pointer = 1;
3781 /* If we're doing fast math, we don't care about comparison order
3782 wrt NaNs. This lets us use a shorter comparison sequence. */
3783 if (flag_finite_math_only)
3784 target_flags &= ~MASK_IEEE_FP;
3786 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3787 since the insns won't need emulation. */
3788 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3789 target_flags &= ~MASK_NO_FANCY_MATH_387;
3791 /* Likewise, if the target doesn't have a 387, or we've specified
3792 software floating point, don't use 387 inline intrinsics. */
3794 target_flags |= MASK_NO_FANCY_MATH_387;
3796 /* Turn on MMX builtins for -msse. */
3799 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3800 x86_prefetch_sse = true;
3803 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3804 if (TARGET_SSE4_2 || TARGET_ABM)
3805 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3807 /* Validate -mpreferred-stack-boundary= value or default it to
3808 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3809 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3810 if (ix86_preferred_stack_boundary_string)
3812 int min = (TARGET_64BIT ? 4 : 2);
3813 int max = (TARGET_SEH ? 4 : 12);
3815 i = atoi (ix86_preferred_stack_boundary_string);
3816 if (i < min || i > max)
3819 error ("%spreferred-stack-boundary%s is not supported "
3820 "for this target", prefix, suffix);
3822 error ("%spreferred-stack-boundary=%d%s is not between %d and %d",
3823 prefix, i, suffix, min, max);
3826 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3829 /* Set the default value for -mstackrealign. */
3830 if (ix86_force_align_arg_pointer == -1)
3831 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3833 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3835 /* Validate -mincoming-stack-boundary= value or default it to
3836 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3837 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3838 if (ix86_incoming_stack_boundary_string)
3840 i = atoi (ix86_incoming_stack_boundary_string);
3841 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3842 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3843 i, TARGET_64BIT ? 4 : 2);
3846 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3847 ix86_incoming_stack_boundary
3848 = ix86_user_incoming_stack_boundary;
3852 /* Accept -msseregparm only if at least SSE support is enabled. */
3853 if (TARGET_SSEREGPARM
3855 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3857 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3858 if (ix86_fpmath_string != 0)
3860 if (! strcmp (ix86_fpmath_string, "387"))
3861 ix86_fpmath = FPMATH_387;
3862 else if (! strcmp (ix86_fpmath_string, "sse"))
3866 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3867 ix86_fpmath = FPMATH_387;
3870 ix86_fpmath = FPMATH_SSE;
3872 else if (! strcmp (ix86_fpmath_string, "387,sse")
3873 || ! strcmp (ix86_fpmath_string, "387+sse")
3874 || ! strcmp (ix86_fpmath_string, "sse,387")
3875 || ! strcmp (ix86_fpmath_string, "sse+387")
3876 || ! strcmp (ix86_fpmath_string, "both"))
3880 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3881 ix86_fpmath = FPMATH_387;
3883 else if (!TARGET_80387)
3885 warning (0, "387 instruction set disabled, using SSE arithmetics");
3886 ix86_fpmath = FPMATH_SSE;
3889 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3892 error ("bad value (%s) for %sfpmath=%s %s",
3893 ix86_fpmath_string, prefix, suffix, sw);
3896 /* If the i387 is disabled, then do not return values in it. */
3898 target_flags &= ~MASK_FLOAT_RETURNS;
3900 /* Use external vectorized library in vectorizing intrinsics. */
3901 if (ix86_veclibabi_string)
3903 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3904 ix86_veclib_handler = ix86_veclibabi_svml;
3905 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3906 ix86_veclib_handler = ix86_veclibabi_acml;
3908 error ("unknown vectorization library ABI type (%s) for "
3909 "%sveclibabi=%s %s", ix86_veclibabi_string,
3910 prefix, suffix, sw);
3913 if ((!USE_IX86_FRAME_POINTER
3914 || (x86_accumulate_outgoing_args & ix86_tune_mask))
3915 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3917 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3919 /* ??? Unwind info is not correct around the CFG unless either a frame
3920 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3921 unwind info generation to be aware of the CFG and propagating states
3923 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3924 || flag_exceptions || flag_non_call_exceptions)
3925 && flag_omit_frame_pointer
3926 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3928 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3929 warning (0, "unwind tables currently require either a frame pointer "
3930 "or %saccumulate-outgoing-args%s for correctness",
3932 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3935 /* If stack probes are required, the space used for large function
3936 arguments on the stack must also be probed, so enable
3937 -maccumulate-outgoing-args so this happens in the prologue. */
3938 if (TARGET_STACK_PROBE
3939 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3941 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3942 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3943 "for correctness", prefix, suffix);
3944 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3947 /* For sane SSE instruction set generation we need fcomi instruction.
3948 It is safe to enable all CMOVE instructions. */
3952 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3955 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3956 p = strchr (internal_label_prefix, 'X');
3957 internal_label_prefix_len = p - internal_label_prefix;
3961 /* When scheduling description is not available, disable scheduler pass
3962 so it won't slow down the compilation and make x87 code slower. */
3963 if (!TARGET_SCHEDULE)
3964 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3966 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
3967 ix86_cost->simultaneous_prefetches,
3968 global_options.x_param_values,
3969 global_options_set.x_param_values);
3970 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, ix86_cost->prefetch_block,
3971 global_options.x_param_values,
3972 global_options_set.x_param_values);
3973 maybe_set_param_value (PARAM_L1_CACHE_SIZE, ix86_cost->l1_cache_size,
3974 global_options.x_param_values,
3975 global_options_set.x_param_values);
3976 maybe_set_param_value (PARAM_L2_CACHE_SIZE, ix86_cost->l2_cache_size,
3977 global_options.x_param_values,
3978 global_options_set.x_param_values);
3980 /* Enable sw prefetching at -O3 for CPUS that prefetching is helpful. */
3981 if (flag_prefetch_loop_arrays < 0
3984 && software_prefetching_beneficial_p ())
3985 flag_prefetch_loop_arrays = 1;
3987 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3988 can be optimized to ap = __builtin_next_arg (0). */
3989 if (!TARGET_64BIT && !flag_split_stack)
3990 targetm.expand_builtin_va_start = NULL;
3994 ix86_gen_leave = gen_leave_rex64;
3995 ix86_gen_add3 = gen_adddi3;
3996 ix86_gen_sub3 = gen_subdi3;
3997 ix86_gen_sub3_carry = gen_subdi3_carry;
3998 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3999 ix86_gen_monitor = gen_sse3_monitor64;
4000 ix86_gen_andsp = gen_anddi3;
4001 ix86_gen_allocate_stack_worker = gen_allocate_stack_worker_probe_di;
4002 ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probedi;
4003 ix86_gen_probe_stack_range = gen_probe_stack_rangedi;
4007 ix86_gen_leave = gen_leave;
4008 ix86_gen_add3 = gen_addsi3;
4009 ix86_gen_sub3 = gen_subsi3;
4010 ix86_gen_sub3_carry = gen_subsi3_carry;
4011 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
4012 ix86_gen_monitor = gen_sse3_monitor;
4013 ix86_gen_andsp = gen_andsi3;
4014 ix86_gen_allocate_stack_worker = gen_allocate_stack_worker_probe_si;
4015 ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probesi;
4016 ix86_gen_probe_stack_range = gen_probe_stack_rangesi;
4020 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
4022 target_flags |= MASK_CLD & ~target_flags_explicit;
4025 if (!TARGET_64BIT && flag_pic)
4027 if (flag_fentry > 0)
4028 sorry ("-mfentry isn%'t supported for 32-bit in combination "
4032 else if (TARGET_SEH)
4034 if (flag_fentry == 0)
4035 sorry ("-mno-fentry isn%'t compatible with SEH");
4038 else if (flag_fentry < 0)
4040 #if defined(PROFILE_BEFORE_PROLOGUE)
4047 /* Save the initial options in case the user does function specific options */
4049 target_option_default_node = target_option_current_node
4050 = build_target_option_node ();
4054 /* Enable vzeroupper pass by default for TARGET_AVX. */
4055 if (!(target_flags_explicit & MASK_VZEROUPPER))
4056 target_flags |= MASK_VZEROUPPER;
4060 /* Disable vzeroupper pass if TARGET_AVX is disabled. */
4061 target_flags &= ~MASK_VZEROUPPER;
4065 /* Return TRUE if type TYPE and mode MODE use 256bit AVX modes. */
4068 use_avx256_p (enum machine_mode mode, const_tree type)
4070 return (VALID_AVX256_REG_MODE (mode)
4072 && TREE_CODE (type) == VECTOR_TYPE
4073 && int_size_in_bytes (type) == 32));
4076 /* Return TRUE if VAL is passed in register with 256bit AVX modes. */
4079 function_pass_avx256_p (const_rtx val)
4084 if (REG_P (val) && VALID_AVX256_REG_MODE (GET_MODE (val)))
4087 if (GET_CODE (val) == PARALLEL)
4092 for (i = XVECLEN (val, 0) - 1; i >= 0; i--)
4094 r = XVECEXP (val, 0, i);
4095 if (GET_CODE (r) == EXPR_LIST
4097 && REG_P (XEXP (r, 0))
4098 && (GET_MODE (XEXP (r, 0)) == OImode
4099 || VALID_AVX256_REG_MODE (GET_MODE (XEXP (r, 0)))))
4107 /* Implement the TARGET_OPTION_OVERRIDE hook. */
4110 ix86_option_override (void)
4112 ix86_option_override_internal (true);
4115 /* Update register usage after having seen the compiler flags. */
4118 ix86_conditional_register_usage (void)
4123 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4125 if (fixed_regs[i] > 1)
4126 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
4127 if (call_used_regs[i] > 1)
4128 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
4131 /* The PIC register, if it exists, is fixed. */
4132 j = PIC_OFFSET_TABLE_REGNUM;
4133 if (j != INVALID_REGNUM)
4134 fixed_regs[j] = call_used_regs[j] = 1;
4136 /* The MS_ABI changes the set of call-used registers. */
4137 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
4139 call_used_regs[SI_REG] = 0;
4140 call_used_regs[DI_REG] = 0;
4141 call_used_regs[XMM6_REG] = 0;
4142 call_used_regs[XMM7_REG] = 0;
4143 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
4144 call_used_regs[i] = 0;
4147 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
4148 other call-clobbered regs for 64-bit. */
4151 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
4153 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4154 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
4155 && call_used_regs[i])
4156 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
4159 /* If MMX is disabled, squash the registers. */
4161 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4162 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
4163 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
4165 /* If SSE is disabled, squash the registers. */
4167 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4168 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
4169 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
4171 /* If the FPU is disabled, squash the registers. */
4172 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
4173 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4174 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
4175 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
4177 /* If 32-bit, squash the 64-bit registers. */
4180 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
4182 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
4188 /* Save the current options */
4191 ix86_function_specific_save (struct cl_target_option *ptr)
4193 ptr->arch = ix86_arch;
4194 ptr->schedule = ix86_schedule;
4195 ptr->tune = ix86_tune;
4196 ptr->fpmath = ix86_fpmath;
4197 ptr->branch_cost = ix86_branch_cost;
4198 ptr->tune_defaulted = ix86_tune_defaulted;
4199 ptr->arch_specified = ix86_arch_specified;
4200 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
4201 ptr->ix86_target_flags_explicit = target_flags_explicit;
4203 /* The fields are char but the variables are not; make sure the
4204 values fit in the fields. */
4205 gcc_assert (ptr->arch == ix86_arch);
4206 gcc_assert (ptr->schedule == ix86_schedule);
4207 gcc_assert (ptr->tune == ix86_tune);
4208 gcc_assert (ptr->fpmath == ix86_fpmath);
4209 gcc_assert (ptr->branch_cost == ix86_branch_cost);
4212 /* Restore the current options */
4215 ix86_function_specific_restore (struct cl_target_option *ptr)
4217 enum processor_type old_tune = ix86_tune;
4218 enum processor_type old_arch = ix86_arch;
4219 unsigned int ix86_arch_mask, ix86_tune_mask;
4222 ix86_arch = (enum processor_type) ptr->arch;
4223 ix86_schedule = (enum attr_cpu) ptr->schedule;
4224 ix86_tune = (enum processor_type) ptr->tune;
4225 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
4226 ix86_branch_cost = ptr->branch_cost;
4227 ix86_tune_defaulted = ptr->tune_defaulted;
4228 ix86_arch_specified = ptr->arch_specified;
4229 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
4230 target_flags_explicit = ptr->ix86_target_flags_explicit;
4232 /* Recreate the arch feature tests if the arch changed */
4233 if (old_arch != ix86_arch)
4235 ix86_arch_mask = 1u << ix86_arch;
4236 for (i = 0; i < X86_ARCH_LAST; ++i)
4237 ix86_arch_features[i]
4238 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
4241 /* Recreate the tune optimization tests */
4242 if (old_tune != ix86_tune)
4244 ix86_tune_mask = 1u << ix86_tune;
4245 for (i = 0; i < X86_TUNE_LAST; ++i)
4246 ix86_tune_features[i]
4247 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
4251 /* Print the current options */
4254 ix86_function_specific_print (FILE *file, int indent,
4255 struct cl_target_option *ptr)
4258 = ix86_target_string (ptr->x_ix86_isa_flags, ptr->x_target_flags,
4259 NULL, NULL, NULL, false);
4261 fprintf (file, "%*sarch = %d (%s)\n",
4264 ((ptr->arch < TARGET_CPU_DEFAULT_max)
4265 ? cpu_names[ptr->arch]
4268 fprintf (file, "%*stune = %d (%s)\n",
4271 ((ptr->tune < TARGET_CPU_DEFAULT_max)
4272 ? cpu_names[ptr->tune]
4275 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
4276 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
4277 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
4278 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
4282 fprintf (file, "%*s%s\n", indent, "", target_string);
4283 free (target_string);
4288 /* Inner function to process the attribute((target(...))), take an argument and
4289 set the current options from the argument. If we have a list, recursively go
4293 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
4298 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
4299 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
4300 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
4301 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
4316 enum ix86_opt_type type;
4321 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
4322 IX86_ATTR_ISA ("abm", OPT_mabm),
4323 IX86_ATTR_ISA ("bmi", OPT_mbmi),
4324 IX86_ATTR_ISA ("tbm", OPT_mtbm),
4325 IX86_ATTR_ISA ("aes", OPT_maes),
4326 IX86_ATTR_ISA ("avx", OPT_mavx),
4327 IX86_ATTR_ISA ("mmx", OPT_mmmx),
4328 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
4329 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
4330 IX86_ATTR_ISA ("sse", OPT_msse),
4331 IX86_ATTR_ISA ("sse2", OPT_msse2),
4332 IX86_ATTR_ISA ("sse3", OPT_msse3),
4333 IX86_ATTR_ISA ("sse4", OPT_msse4),
4334 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
4335 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
4336 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
4337 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
4338 IX86_ATTR_ISA ("fma4", OPT_mfma4),
4339 IX86_ATTR_ISA ("xop", OPT_mxop),
4340 IX86_ATTR_ISA ("lwp", OPT_mlwp),
4341 IX86_ATTR_ISA ("fsgsbase", OPT_mfsgsbase),
4342 IX86_ATTR_ISA ("rdrnd", OPT_mrdrnd),
4343 IX86_ATTR_ISA ("f16c", OPT_mf16c),
4345 /* string options */
4346 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
4347 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
4348 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
4351 IX86_ATTR_YES ("cld",
4355 IX86_ATTR_NO ("fancy-math-387",
4356 OPT_mfancy_math_387,
4357 MASK_NO_FANCY_MATH_387),
4359 IX86_ATTR_YES ("ieee-fp",
4363 IX86_ATTR_YES ("inline-all-stringops",
4364 OPT_minline_all_stringops,
4365 MASK_INLINE_ALL_STRINGOPS),
4367 IX86_ATTR_YES ("inline-stringops-dynamically",
4368 OPT_minline_stringops_dynamically,
4369 MASK_INLINE_STRINGOPS_DYNAMICALLY),
4371 IX86_ATTR_NO ("align-stringops",
4372 OPT_mno_align_stringops,
4373 MASK_NO_ALIGN_STRINGOPS),
4375 IX86_ATTR_YES ("recip",
4381 /* If this is a list, recurse to get the options. */
4382 if (TREE_CODE (args) == TREE_LIST)
4386 for (; args; args = TREE_CHAIN (args))
4387 if (TREE_VALUE (args)
4388 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
4394 else if (TREE_CODE (args) != STRING_CST)
4397 /* Handle multiple arguments separated by commas. */
4398 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
4400 while (next_optstr && *next_optstr != '\0')
4402 char *p = next_optstr;
4404 char *comma = strchr (next_optstr, ',');
4405 const char *opt_string;
4406 size_t len, opt_len;
4411 enum ix86_opt_type type = ix86_opt_unknown;
4417 len = comma - next_optstr;
4418 next_optstr = comma + 1;
4426 /* Recognize no-xxx. */
4427 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
4436 /* Find the option. */
4439 for (i = 0; i < ARRAY_SIZE (attrs); i++)
4441 type = attrs[i].type;
4442 opt_len = attrs[i].len;
4443 if (ch == attrs[i].string[0]
4444 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
4445 && memcmp (p, attrs[i].string, opt_len) == 0)
4448 mask = attrs[i].mask;
4449 opt_string = attrs[i].string;
4454 /* Process the option. */
4457 error ("attribute(target(\"%s\")) is unknown", orig_p);
4461 else if (type == ix86_opt_isa)
4462 ix86_handle_option (opt, p, opt_set_p);
4464 else if (type == ix86_opt_yes || type == ix86_opt_no)
4466 if (type == ix86_opt_no)
4467 opt_set_p = !opt_set_p;
4470 target_flags |= mask;
4472 target_flags &= ~mask;
4475 else if (type == ix86_opt_str)
4479 error ("option(\"%s\") was already specified", opt_string);
4483 p_strings[opt] = xstrdup (p + opt_len);
4493 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
4496 ix86_valid_target_attribute_tree (tree args)
4498 const char *orig_arch_string = ix86_arch_string;
4499 const char *orig_tune_string = ix86_tune_string;
4500 const char *orig_fpmath_string = ix86_fpmath_string;
4501 int orig_tune_defaulted = ix86_tune_defaulted;
4502 int orig_arch_specified = ix86_arch_specified;
4503 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
4506 struct cl_target_option *def
4507 = TREE_TARGET_OPTION (target_option_default_node);
4509 /* Process each of the options on the chain. */
4510 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
4513 /* If the changed options are different from the default, rerun
4514 ix86_option_override_internal, and then save the options away.
4515 The string options are are attribute options, and will be undone
4516 when we copy the save structure. */
4517 if (ix86_isa_flags != def->x_ix86_isa_flags
4518 || target_flags != def->x_target_flags
4519 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
4520 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
4521 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
4523 /* If we are using the default tune= or arch=, undo the string assigned,
4524 and use the default. */
4525 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
4526 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
4527 else if (!orig_arch_specified)
4528 ix86_arch_string = NULL;
4530 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
4531 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
4532 else if (orig_tune_defaulted)
4533 ix86_tune_string = NULL;
4535 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
4536 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
4537 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
4538 else if (!TARGET_64BIT && TARGET_SSE)
4539 ix86_fpmath_string = "sse,387";
4541 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
4542 ix86_option_override_internal (false);
4544 /* Add any builtin functions with the new isa if any. */
4545 ix86_add_new_builtins (ix86_isa_flags);
4547 /* Save the current options unless we are validating options for
4549 t = build_target_option_node ();
4551 ix86_arch_string = orig_arch_string;
4552 ix86_tune_string = orig_tune_string;
4553 ix86_fpmath_string = orig_fpmath_string;
4555 /* Free up memory allocated to hold the strings */
4556 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
4557 if (option_strings[i])
4558 free (option_strings[i]);
4564 /* Hook to validate attribute((target("string"))). */
4567 ix86_valid_target_attribute_p (tree fndecl,
4568 tree ARG_UNUSED (name),
4570 int ARG_UNUSED (flags))
4572 struct cl_target_option cur_target;
4574 tree old_optimize = build_optimization_node ();
4575 tree new_target, new_optimize;
4576 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
4578 /* If the function changed the optimization levels as well as setting target
4579 options, start with the optimizations specified. */
4580 if (func_optimize && func_optimize != old_optimize)
4581 cl_optimization_restore (&global_options,
4582 TREE_OPTIMIZATION (func_optimize));
4584 /* The target attributes may also change some optimization flags, so update
4585 the optimization options if necessary. */
4586 cl_target_option_save (&cur_target, &global_options);
4587 new_target = ix86_valid_target_attribute_tree (args);
4588 new_optimize = build_optimization_node ();
4595 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
4597 if (old_optimize != new_optimize)
4598 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
4601 cl_target_option_restore (&global_options, &cur_target);
4603 if (old_optimize != new_optimize)
4604 cl_optimization_restore (&global_options,
4605 TREE_OPTIMIZATION (old_optimize));
4611 /* Hook to determine if one function can safely inline another. */
4614 ix86_can_inline_p (tree caller, tree callee)
4617 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
4618 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
4620 /* If callee has no option attributes, then it is ok to inline. */
4624 /* If caller has no option attributes, but callee does then it is not ok to
4626 else if (!caller_tree)
4631 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
4632 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
4634 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
4635 can inline a SSE2 function but a SSE2 function can't inline a SSE4
4637 if ((caller_opts->x_ix86_isa_flags & callee_opts->x_ix86_isa_flags)
4638 != callee_opts->x_ix86_isa_flags)
4641 /* See if we have the same non-isa options. */
4642 else if (caller_opts->x_target_flags != callee_opts->x_target_flags)
4645 /* See if arch, tune, etc. are the same. */
4646 else if (caller_opts->arch != callee_opts->arch)
4649 else if (caller_opts->tune != callee_opts->tune)
4652 else if (caller_opts->fpmath != callee_opts->fpmath)
4655 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4666 /* Remember the last target of ix86_set_current_function. */
4667 static GTY(()) tree ix86_previous_fndecl;
4669 /* Establish appropriate back-end context for processing the function
4670 FNDECL. The argument might be NULL to indicate processing at top
4671 level, outside of any function scope. */
4673 ix86_set_current_function (tree fndecl)
4675 /* Only change the context if the function changes. This hook is called
4676 several times in the course of compiling a function, and we don't want to
4677 slow things down too much or call target_reinit when it isn't safe. */
4678 if (fndecl && fndecl != ix86_previous_fndecl)
4680 tree old_tree = (ix86_previous_fndecl
4681 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4684 tree new_tree = (fndecl
4685 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4688 ix86_previous_fndecl = fndecl;
4689 if (old_tree == new_tree)
4694 cl_target_option_restore (&global_options,
4695 TREE_TARGET_OPTION (new_tree));
4701 struct cl_target_option *def
4702 = TREE_TARGET_OPTION (target_option_current_node);
4704 cl_target_option_restore (&global_options, def);
4711 /* Return true if this goes in large data/bss. */
4714 ix86_in_large_data_p (tree exp)
4716 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4719 /* Functions are never large data. */
4720 if (TREE_CODE (exp) == FUNCTION_DECL)
4723 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4725 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4726 if (strcmp (section, ".ldata") == 0
4727 || strcmp (section, ".lbss") == 0)
4733 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4735 /* If this is an incomplete type with size 0, then we can't put it
4736 in data because it might be too big when completed. */
4737 if (!size || size > ix86_section_threshold)
4744 /* Switch to the appropriate section for output of DECL.
4745 DECL is either a `VAR_DECL' node or a constant of some sort.
4746 RELOC indicates whether forming the initial value of DECL requires
4747 link-time relocations. */
4749 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4753 x86_64_elf_select_section (tree decl, int reloc,
4754 unsigned HOST_WIDE_INT align)
4756 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4757 && ix86_in_large_data_p (decl))
4759 const char *sname = NULL;
4760 unsigned int flags = SECTION_WRITE;
4761 switch (categorize_decl_for_section (decl, reloc))
4766 case SECCAT_DATA_REL:
4767 sname = ".ldata.rel";
4769 case SECCAT_DATA_REL_LOCAL:
4770 sname = ".ldata.rel.local";
4772 case SECCAT_DATA_REL_RO:
4773 sname = ".ldata.rel.ro";
4775 case SECCAT_DATA_REL_RO_LOCAL:
4776 sname = ".ldata.rel.ro.local";
4780 flags |= SECTION_BSS;
4783 case SECCAT_RODATA_MERGE_STR:
4784 case SECCAT_RODATA_MERGE_STR_INIT:
4785 case SECCAT_RODATA_MERGE_CONST:
4789 case SECCAT_SRODATA:
4796 /* We don't split these for medium model. Place them into
4797 default sections and hope for best. */
4802 /* We might get called with string constants, but get_named_section
4803 doesn't like them as they are not DECLs. Also, we need to set
4804 flags in that case. */
4806 return get_section (sname, flags, NULL);
4807 return get_named_section (decl, sname, reloc);
4810 return default_elf_select_section (decl, reloc, align);
4813 /* Build up a unique section name, expressed as a
4814 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4815 RELOC indicates whether the initial value of EXP requires
4816 link-time relocations. */
4818 static void ATTRIBUTE_UNUSED
4819 x86_64_elf_unique_section (tree decl, int reloc)
4821 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4822 && ix86_in_large_data_p (decl))
4824 const char *prefix = NULL;
4825 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4826 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4828 switch (categorize_decl_for_section (decl, reloc))
4831 case SECCAT_DATA_REL:
4832 case SECCAT_DATA_REL_LOCAL:
4833 case SECCAT_DATA_REL_RO:
4834 case SECCAT_DATA_REL_RO_LOCAL:
4835 prefix = one_only ? ".ld" : ".ldata";
4838 prefix = one_only ? ".lb" : ".lbss";
4841 case SECCAT_RODATA_MERGE_STR:
4842 case SECCAT_RODATA_MERGE_STR_INIT:
4843 case SECCAT_RODATA_MERGE_CONST:
4844 prefix = one_only ? ".lr" : ".lrodata";
4846 case SECCAT_SRODATA:
4853 /* We don't split these for medium model. Place them into
4854 default sections and hope for best. */
4859 const char *name, *linkonce;
4862 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4863 name = targetm.strip_name_encoding (name);
4865 /* If we're using one_only, then there needs to be a .gnu.linkonce
4866 prefix to the section name. */
4867 linkonce = one_only ? ".gnu.linkonce" : "";
4869 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4871 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4875 default_unique_section (decl, reloc);
4878 #ifdef COMMON_ASM_OP
4879 /* This says how to output assembler code to declare an
4880 uninitialized external linkage data object.
4882 For medium model x86-64 we need to use .largecomm opcode for
4885 x86_elf_aligned_common (FILE *file,
4886 const char *name, unsigned HOST_WIDE_INT size,
4889 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4890 && size > (unsigned int)ix86_section_threshold)
4891 fputs (".largecomm\t", file);
4893 fputs (COMMON_ASM_OP, file);
4894 assemble_name (file, name);
4895 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4896 size, align / BITS_PER_UNIT);
4900 /* Utility function for targets to use in implementing
4901 ASM_OUTPUT_ALIGNED_BSS. */
4904 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4905 const char *name, unsigned HOST_WIDE_INT size,
4908 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4909 && size > (unsigned int)ix86_section_threshold)
4910 switch_to_section (get_named_section (decl, ".lbss", 0));
4912 switch_to_section (bss_section);
4913 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4914 #ifdef ASM_DECLARE_OBJECT_NAME
4915 last_assemble_variable_decl = decl;
4916 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4918 /* Standard thing is just output label for the object. */
4919 ASM_OUTPUT_LABEL (file, name);
4920 #endif /* ASM_DECLARE_OBJECT_NAME */
4921 ASM_OUTPUT_SKIP (file, size ? size : 1);
4924 static const struct default_options ix86_option_optimization_table[] =
4926 /* Turn off -fschedule-insns by default. It tends to make the
4927 problem with not enough registers even worse. */
4928 #ifdef INSN_SCHEDULING
4929 { OPT_LEVELS_ALL, OPT_fschedule_insns, NULL, 0 },
4932 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4933 SUBTARGET_OPTIMIZATION_OPTIONS,
4935 { OPT_LEVELS_NONE, 0, NULL, 0 }
4938 /* Implement TARGET_OPTION_INIT_STRUCT. */
4941 ix86_option_init_struct (struct gcc_options *opts)
4944 /* The Darwin libraries never set errno, so we might as well
4945 avoid calling them when that's the only reason we would. */
4946 opts->x_flag_errno_math = 0;
4948 opts->x_flag_pcc_struct_return = 2;
4949 opts->x_flag_asynchronous_unwind_tables = 2;
4950 opts->x_flag_vect_cost_model = 1;
4953 /* Decide whether we must probe the stack before any space allocation
4954 on this target. It's essentially TARGET_STACK_PROBE except when
4955 -fstack-check causes the stack to be already probed differently. */
4958 ix86_target_stack_probe (void)
4960 /* Do not probe the stack twice if static stack checking is enabled. */
4961 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
4964 return TARGET_STACK_PROBE;
4967 /* Decide whether we can make a sibling call to a function. DECL is the
4968 declaration of the function being targeted by the call and EXP is the
4969 CALL_EXPR representing the call. */
4972 ix86_function_ok_for_sibcall (tree decl, tree exp)
4974 tree type, decl_or_type;
4977 /* If we are generating position-independent code, we cannot sibcall
4978 optimize any indirect call, or a direct call to a global function,
4979 as the PLT requires %ebx be live. (Darwin does not have a PLT.) */
4983 && (!decl || !targetm.binds_local_p (decl)))
4986 /* If we need to align the outgoing stack, then sibcalling would
4987 unalign the stack, which may break the called function. */
4988 if (ix86_minimum_incoming_stack_boundary (true)
4989 < PREFERRED_STACK_BOUNDARY)
4994 decl_or_type = decl;
4995 type = TREE_TYPE (decl);
4999 /* We're looking at the CALL_EXPR, we need the type of the function. */
5000 type = CALL_EXPR_FN (exp); /* pointer expression */
5001 type = TREE_TYPE (type); /* pointer type */
5002 type = TREE_TYPE (type); /* function type */
5003 decl_or_type = type;
5006 /* Check that the return value locations are the same. Like
5007 if we are returning floats on the 80387 register stack, we cannot
5008 make a sibcall from a function that doesn't return a float to a
5009 function that does or, conversely, from a function that does return
5010 a float to a function that doesn't; the necessary stack adjustment
5011 would not be executed. This is also the place we notice
5012 differences in the return value ABI. Note that it is ok for one
5013 of the functions to have void return type as long as the return
5014 value of the other is passed in a register. */
5015 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
5016 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
5018 if (STACK_REG_P (a) || STACK_REG_P (b))
5020 if (!rtx_equal_p (a, b))
5023 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
5025 /* Disable sibcall if we need to generate vzeroupper after
5027 if (TARGET_VZEROUPPER
5028 && cfun->machine->callee_return_avx256_p
5029 && !cfun->machine->caller_return_avx256_p)
5032 else if (!rtx_equal_p (a, b))
5037 /* The SYSV ABI has more call-clobbered registers;
5038 disallow sibcalls from MS to SYSV. */
5039 if (cfun->machine->call_abi == MS_ABI
5040 && ix86_function_type_abi (type) == SYSV_ABI)
5045 /* If this call is indirect, we'll need to be able to use a
5046 call-clobbered register for the address of the target function.
5047 Make sure that all such registers are not used for passing
5048 parameters. Note that DLLIMPORT functions are indirect. */
5050 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
5052 if (ix86_function_regparm (type, NULL) >= 3)
5054 /* ??? Need to count the actual number of registers to be used,
5055 not the possible number of registers. Fix later. */
5061 /* Otherwise okay. That also includes certain types of indirect calls. */
5065 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
5066 and "sseregparm" calling convention attributes;
5067 arguments as in struct attribute_spec.handler. */
5070 ix86_handle_cconv_attribute (tree *node, tree name,
5072 int flags ATTRIBUTE_UNUSED,
5075 if (TREE_CODE (*node) != FUNCTION_TYPE
5076 && TREE_CODE (*node) != METHOD_TYPE
5077 && TREE_CODE (*node) != FIELD_DECL
5078 && TREE_CODE (*node) != TYPE_DECL)
5080 warning (OPT_Wattributes, "%qE attribute only applies to functions",
5082 *no_add_attrs = true;
5086 /* Can combine regparm with all attributes but fastcall. */
5087 if (is_attribute_p ("regparm", name))
5091 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5093 error ("fastcall and regparm attributes are not compatible");
5096 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5098 error ("regparam and thiscall attributes are not compatible");
5101 cst = TREE_VALUE (args);
5102 if (TREE_CODE (cst) != INTEGER_CST)
5104 warning (OPT_Wattributes,
5105 "%qE attribute requires an integer constant argument",
5107 *no_add_attrs = true;
5109 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
5111 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
5113 *no_add_attrs = true;
5121 /* Do not warn when emulating the MS ABI. */
5122 if ((TREE_CODE (*node) != FUNCTION_TYPE
5123 && TREE_CODE (*node) != METHOD_TYPE)
5124 || ix86_function_type_abi (*node) != MS_ABI)
5125 warning (OPT_Wattributes, "%qE attribute ignored",
5127 *no_add_attrs = true;
5131 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
5132 if (is_attribute_p ("fastcall", name))
5134 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
5136 error ("fastcall and cdecl attributes are not compatible");
5138 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
5140 error ("fastcall and stdcall attributes are not compatible");
5142 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
5144 error ("fastcall and regparm attributes are not compatible");
5146 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5148 error ("fastcall and thiscall attributes are not compatible");
5152 /* Can combine stdcall with fastcall (redundant), regparm and
5154 else if (is_attribute_p ("stdcall", name))
5156 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
5158 error ("stdcall and cdecl attributes are not compatible");
5160 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5162 error ("stdcall and fastcall attributes are not compatible");
5164 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5166 error ("stdcall and thiscall attributes are not compatible");
5170 /* Can combine cdecl with regparm and sseregparm. */
5171 else if (is_attribute_p ("cdecl", name))
5173 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
5175 error ("stdcall and cdecl attributes are not compatible");
5177 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5179 error ("fastcall and cdecl attributes are not compatible");
5181 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5183 error ("cdecl and thiscall attributes are not compatible");
5186 else if (is_attribute_p ("thiscall", name))
5188 if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
5189 warning (OPT_Wattributes, "%qE attribute is used for none class-method",
5191 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
5193 error ("stdcall and thiscall attributes are not compatible");
5195 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5197 error ("fastcall and thiscall attributes are not compatible");
5199 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
5201 error ("cdecl and thiscall attributes are not compatible");
5205 /* Can combine sseregparm with all attributes. */
5210 /* Return 0 if the attributes for two types are incompatible, 1 if they
5211 are compatible, and 2 if they are nearly compatible (which causes a
5212 warning to be generated). */
5215 ix86_comp_type_attributes (const_tree type1, const_tree type2)
5217 /* Check for mismatch of non-default calling convention. */
5218 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
5220 if (TREE_CODE (type1) != FUNCTION_TYPE
5221 && TREE_CODE (type1) != METHOD_TYPE)
5224 /* Check for mismatched fastcall/regparm types. */
5225 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
5226 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
5227 || (ix86_function_regparm (type1, NULL)
5228 != ix86_function_regparm (type2, NULL)))
5231 /* Check for mismatched sseregparm types. */
5232 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
5233 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
5236 /* Check for mismatched thiscall types. */
5237 if (!lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type1))
5238 != !lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type2)))
5241 /* Check for mismatched return types (cdecl vs stdcall). */
5242 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
5243 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
5249 /* Return the regparm value for a function with the indicated TYPE and DECL.
5250 DECL may be NULL when calling function indirectly
5251 or considering a libcall. */
5254 ix86_function_regparm (const_tree type, const_tree decl)
5260 return (ix86_function_type_abi (type) == SYSV_ABI
5261 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
5263 regparm = ix86_regparm;
5264 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
5267 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
5271 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
5274 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
5277 /* Use register calling convention for local functions when possible. */
5279 && TREE_CODE (decl) == FUNCTION_DECL
5281 && !(profile_flag && !flag_fentry))
5283 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
5284 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
5287 int local_regparm, globals = 0, regno;
5289 /* Make sure no regparm register is taken by a
5290 fixed register variable. */
5291 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
5292 if (fixed_regs[local_regparm])
5295 /* We don't want to use regparm(3) for nested functions as
5296 these use a static chain pointer in the third argument. */
5297 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
5300 /* In 32-bit mode save a register for the split stack. */
5301 if (!TARGET_64BIT && local_regparm == 3 && flag_split_stack)
5304 /* Each fixed register usage increases register pressure,
5305 so less registers should be used for argument passing.
5306 This functionality can be overriden by an explicit
5308 for (regno = 0; regno <= DI_REG; regno++)
5309 if (fixed_regs[regno])
5313 = globals < local_regparm ? local_regparm - globals : 0;
5315 if (local_regparm > regparm)
5316 regparm = local_regparm;
5323 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
5324 DFmode (2) arguments in SSE registers for a function with the
5325 indicated TYPE and DECL. DECL may be NULL when calling function
5326 indirectly or considering a libcall. Otherwise return 0. */
5329 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
5331 gcc_assert (!TARGET_64BIT);
5333 /* Use SSE registers to pass SFmode and DFmode arguments if requested
5334 by the sseregparm attribute. */
5335 if (TARGET_SSEREGPARM
5336 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
5343 error ("calling %qD with attribute sseregparm without "
5344 "SSE/SSE2 enabled", decl);
5346 error ("calling %qT with attribute sseregparm without "
5347 "SSE/SSE2 enabled", type);
5355 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
5356 (and DFmode for SSE2) arguments in SSE registers. */
5357 if (decl && TARGET_SSE_MATH && optimize
5358 && !(profile_flag && !flag_fentry))
5360 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
5361 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
5363 return TARGET_SSE2 ? 2 : 1;
5369 /* Return true if EAX is live at the start of the function. Used by
5370 ix86_expand_prologue to determine if we need special help before
5371 calling allocate_stack_worker. */
5374 ix86_eax_live_at_start_p (void)
5376 /* Cheat. Don't bother working forward from ix86_function_regparm
5377 to the function type to whether an actual argument is located in
5378 eax. Instead just look at cfg info, which is still close enough
5379 to correct at this point. This gives false positives for broken
5380 functions that might use uninitialized data that happens to be
5381 allocated in eax, but who cares? */
5382 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
5385 /* Value is the number of bytes of arguments automatically
5386 popped when returning from a subroutine call.
5387 FUNDECL is the declaration node of the function (as a tree),
5388 FUNTYPE is the data type of the function (as a tree),
5389 or for a library call it is an identifier node for the subroutine name.
5390 SIZE is the number of bytes of arguments passed on the stack.
5392 On the 80386, the RTD insn may be used to pop them if the number
5393 of args is fixed, but if the number is variable then the caller
5394 must pop them all. RTD can't be used for library calls now
5395 because the library is compiled with the Unix compiler.
5396 Use of RTD is a selectable option, since it is incompatible with
5397 standard Unix calling sequences. If the option is not selected,
5398 the caller must always pop the args.
5400 The attribute stdcall is equivalent to RTD on a per module basis. */
5403 ix86_return_pops_args (tree fundecl, tree funtype, int size)
5407 /* None of the 64-bit ABIs pop arguments. */
5411 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
5413 /* Cdecl functions override -mrtd, and never pop the stack. */
5414 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
5416 /* Stdcall and fastcall functions will pop the stack if not
5418 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
5419 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype))
5420 || lookup_attribute ("thiscall", TYPE_ATTRIBUTES (funtype)))
5423 if (rtd && ! stdarg_p (funtype))
5427 /* Lose any fake structure return argument if it is passed on the stack. */
5428 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
5429 && !KEEP_AGGREGATE_RETURN_POINTER)
5431 int nregs = ix86_function_regparm (funtype, fundecl);
5433 return GET_MODE_SIZE (Pmode);
5439 /* Argument support functions. */
5441 /* Return true when register may be used to pass function parameters. */
5443 ix86_function_arg_regno_p (int regno)
5446 const int *parm_regs;
5451 return (regno < REGPARM_MAX
5452 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
5454 return (regno < REGPARM_MAX
5455 || (TARGET_MMX && MMX_REGNO_P (regno)
5456 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
5457 || (TARGET_SSE && SSE_REGNO_P (regno)
5458 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
5463 if (SSE_REGNO_P (regno) && TARGET_SSE)
5468 if (TARGET_SSE && SSE_REGNO_P (regno)
5469 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
5473 /* TODO: The function should depend on current function ABI but
5474 builtins.c would need updating then. Therefore we use the
5477 /* RAX is used as hidden argument to va_arg functions. */
5478 if (ix86_abi == SYSV_ABI && regno == AX_REG)
5481 if (ix86_abi == MS_ABI)
5482 parm_regs = x86_64_ms_abi_int_parameter_registers;
5484 parm_regs = x86_64_int_parameter_registers;
5485 for (i = 0; i < (ix86_abi == MS_ABI
5486 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
5487 if (regno == parm_regs[i])
5492 /* Return if we do not know how to pass TYPE solely in registers. */
5495 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
5497 if (must_pass_in_stack_var_size_or_pad (mode, type))
5500 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
5501 The layout_type routine is crafty and tries to trick us into passing
5502 currently unsupported vector types on the stack by using TImode. */
5503 return (!TARGET_64BIT && mode == TImode
5504 && type && TREE_CODE (type) != VECTOR_TYPE);
5507 /* It returns the size, in bytes, of the area reserved for arguments passed
5508 in registers for the function represented by fndecl dependent to the used
5511 ix86_reg_parm_stack_space (const_tree fndecl)
5513 enum calling_abi call_abi = SYSV_ABI;
5514 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
5515 call_abi = ix86_function_abi (fndecl);
5517 call_abi = ix86_function_type_abi (fndecl);
5518 if (call_abi == MS_ABI)
5523 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
5526 ix86_function_type_abi (const_tree fntype)
5528 if (TARGET_64BIT && fntype != NULL)
5530 enum calling_abi abi = ix86_abi;
5531 if (abi == SYSV_ABI)
5533 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
5536 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
5544 ix86_function_ms_hook_prologue (const_tree fn)
5546 if (fn && lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fn)))
5548 if (decl_function_context (fn) != NULL_TREE)
5549 error_at (DECL_SOURCE_LOCATION (fn),
5550 "ms_hook_prologue is not compatible with nested function");
5557 static enum calling_abi
5558 ix86_function_abi (const_tree fndecl)
5562 return ix86_function_type_abi (TREE_TYPE (fndecl));
5565 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
5568 ix86_cfun_abi (void)
5570 if (! cfun || ! TARGET_64BIT)
5572 return cfun->machine->call_abi;
5575 /* Write the extra assembler code needed to declare a function properly. */
5578 ix86_asm_output_function_label (FILE *asm_out_file, const char *fname,
5581 bool is_ms_hook = ix86_function_ms_hook_prologue (decl);
5585 int i, filler_count = (TARGET_64BIT ? 32 : 16);
5586 unsigned int filler_cc = 0xcccccccc;
5588 for (i = 0; i < filler_count; i += 4)
5589 fprintf (asm_out_file, ASM_LONG " %#x\n", filler_cc);
5592 #ifdef SUBTARGET_ASM_UNWIND_INIT
5593 SUBTARGET_ASM_UNWIND_INIT (asm_out_file);
5596 ASM_OUTPUT_LABEL (asm_out_file, fname);
5598 /* Output magic byte marker, if hot-patch attribute is set. */
5603 /* leaq [%rsp + 0], %rsp */
5604 asm_fprintf (asm_out_file, ASM_BYTE
5605 "0x48, 0x8d, 0xa4, 0x24, 0x00, 0x00, 0x00, 0x00\n");
5609 /* movl.s %edi, %edi
5611 movl.s %esp, %ebp */
5612 asm_fprintf (asm_out_file, ASM_BYTE
5613 "0x8b, 0xff, 0x55, 0x8b, 0xec\n");
5619 extern void init_regs (void);
5621 /* Implementation of call abi switching target hook. Specific to FNDECL
5622 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
5623 for more details. */
5625 ix86_call_abi_override (const_tree fndecl)
5627 if (fndecl == NULL_TREE)
5628 cfun->machine->call_abi = ix86_abi;
5630 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
5633 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
5634 re-initialization of init_regs each time we switch function context since
5635 this is needed only during RTL expansion. */
5637 ix86_maybe_switch_abi (void)
5640 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
5644 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5645 for a call to a function whose data type is FNTYPE.
5646 For a library call, FNTYPE is 0. */
5649 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
5650 tree fntype, /* tree ptr for function decl */
5651 rtx libname, /* SYMBOL_REF of library name or 0 */
5655 struct cgraph_local_info *i;
5658 memset (cum, 0, sizeof (*cum));
5660 /* Initialize for the current callee. */
5663 cfun->machine->callee_pass_avx256_p = false;
5664 cfun->machine->callee_return_avx256_p = false;
5669 i = cgraph_local_info (fndecl);
5670 cum->call_abi = ix86_function_abi (fndecl);
5671 fnret_type = TREE_TYPE (TREE_TYPE (fndecl));
5676 cum->call_abi = ix86_function_type_abi (fntype);
5678 fnret_type = TREE_TYPE (fntype);
5683 if (TARGET_VZEROUPPER && fnret_type)
5685 rtx fnret_value = ix86_function_value (fnret_type, fntype,
5687 if (function_pass_avx256_p (fnret_value))
5689 /* The return value of this function uses 256bit AVX modes. */
5690 cfun->machine->use_avx256_p = true;
5692 cfun->machine->callee_return_avx256_p = true;
5694 cfun->machine->caller_return_avx256_p = true;
5698 cum->caller = caller;
5700 /* Set up the number of registers to use for passing arguments. */
5702 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
5703 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
5704 "or subtarget optimization implying it");
5705 cum->nregs = ix86_regparm;
5708 cum->nregs = (cum->call_abi == SYSV_ABI
5709 ? X86_64_REGPARM_MAX
5710 : X86_64_MS_REGPARM_MAX);
5714 cum->sse_nregs = SSE_REGPARM_MAX;
5717 cum->sse_nregs = (cum->call_abi == SYSV_ABI
5718 ? X86_64_SSE_REGPARM_MAX
5719 : X86_64_MS_SSE_REGPARM_MAX);
5723 cum->mmx_nregs = MMX_REGPARM_MAX;
5724 cum->warn_avx = true;
5725 cum->warn_sse = true;
5726 cum->warn_mmx = true;
5728 /* Because type might mismatch in between caller and callee, we need to
5729 use actual type of function for local calls.
5730 FIXME: cgraph_analyze can be told to actually record if function uses
5731 va_start so for local functions maybe_vaarg can be made aggressive
5733 FIXME: once typesytem is fixed, we won't need this code anymore. */
5735 fntype = TREE_TYPE (fndecl);
5736 cum->maybe_vaarg = (fntype
5737 ? (!prototype_p (fntype) || stdarg_p (fntype))
5742 /* If there are variable arguments, then we won't pass anything
5743 in registers in 32-bit mode. */
5744 if (stdarg_p (fntype))
5755 /* Use ecx and edx registers if function has fastcall attribute,
5756 else look for regparm information. */
5759 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
5762 cum->fastcall = 1; /* Same first register as in fastcall. */
5764 else if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
5770 cum->nregs = ix86_function_regparm (fntype, fndecl);
5773 /* Set up the number of SSE registers used for passing SFmode
5774 and DFmode arguments. Warn for mismatching ABI. */
5775 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
5779 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
5780 But in the case of vector types, it is some vector mode.
5782 When we have only some of our vector isa extensions enabled, then there
5783 are some modes for which vector_mode_supported_p is false. For these
5784 modes, the generic vector support in gcc will choose some non-vector mode
5785 in order to implement the type. By computing the natural mode, we'll
5786 select the proper ABI location for the operand and not depend on whatever
5787 the middle-end decides to do with these vector types.
5789 The midde-end can't deal with the vector types > 16 bytes. In this
5790 case, we return the original mode and warn ABI change if CUM isn't
5793 static enum machine_mode
5794 type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum)
5796 enum machine_mode mode = TYPE_MODE (type);
5798 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
5800 HOST_WIDE_INT size = int_size_in_bytes (type);
5801 if ((size == 8 || size == 16 || size == 32)
5802 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5803 && TYPE_VECTOR_SUBPARTS (type) > 1)
5805 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
5807 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5808 mode = MIN_MODE_VECTOR_FLOAT;
5810 mode = MIN_MODE_VECTOR_INT;
5812 /* Get the mode which has this inner mode and number of units. */
5813 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
5814 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
5815 && GET_MODE_INNER (mode) == innermode)
5817 if (size == 32 && !TARGET_AVX)
5819 static bool warnedavx;
5826 warning (0, "AVX vector argument without AVX "
5827 "enabled changes the ABI");
5829 return TYPE_MODE (type);
5842 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5843 this may not agree with the mode that the type system has chosen for the
5844 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5845 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5848 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
5853 if (orig_mode != BLKmode)
5854 tmp = gen_rtx_REG (orig_mode, regno);
5857 tmp = gen_rtx_REG (mode, regno);
5858 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
5859 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
5865 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5866 of this code is to classify each 8bytes of incoming argument by the register
5867 class and assign registers accordingly. */
5869 /* Return the union class of CLASS1 and CLASS2.
5870 See the x86-64 PS ABI for details. */
5872 static enum x86_64_reg_class
5873 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5875 /* Rule #1: If both classes are equal, this is the resulting class. */
5876 if (class1 == class2)
5879 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5881 if (class1 == X86_64_NO_CLASS)
5883 if (class2 == X86_64_NO_CLASS)
5886 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5887 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5888 return X86_64_MEMORY_CLASS;
5890 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5891 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5892 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5893 return X86_64_INTEGERSI_CLASS;
5894 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5895 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5896 return X86_64_INTEGER_CLASS;
5898 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5900 if (class1 == X86_64_X87_CLASS
5901 || class1 == X86_64_X87UP_CLASS
5902 || class1 == X86_64_COMPLEX_X87_CLASS
5903 || class2 == X86_64_X87_CLASS
5904 || class2 == X86_64_X87UP_CLASS
5905 || class2 == X86_64_COMPLEX_X87_CLASS)
5906 return X86_64_MEMORY_CLASS;
5908 /* Rule #6: Otherwise class SSE is used. */
5909 return X86_64_SSE_CLASS;
5912 /* Classify the argument of type TYPE and mode MODE.
5913 CLASSES will be filled by the register class used to pass each word
5914 of the operand. The number of words is returned. In case the parameter
5915 should be passed in memory, 0 is returned. As a special case for zero
5916 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5918 BIT_OFFSET is used internally for handling records and specifies offset
5919 of the offset in bits modulo 256 to avoid overflow cases.
5921 See the x86-64 PS ABI for details.
5925 classify_argument (enum machine_mode mode, const_tree type,
5926 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5928 HOST_WIDE_INT bytes =
5929 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5930 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5932 /* Variable sized entities are always passed/returned in memory. */
5936 if (mode != VOIDmode
5937 && targetm.calls.must_pass_in_stack (mode, type))
5940 if (type && AGGREGATE_TYPE_P (type))
5944 enum x86_64_reg_class subclasses[MAX_CLASSES];
5946 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5950 for (i = 0; i < words; i++)
5951 classes[i] = X86_64_NO_CLASS;
5953 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5954 signalize memory class, so handle it as special case. */
5957 classes[0] = X86_64_NO_CLASS;
5961 /* Classify each field of record and merge classes. */
5962 switch (TREE_CODE (type))
5965 /* And now merge the fields of structure. */
5966 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5968 if (TREE_CODE (field) == FIELD_DECL)
5972 if (TREE_TYPE (field) == error_mark_node)
5975 /* Bitfields are always classified as integer. Handle them
5976 early, since later code would consider them to be
5977 misaligned integers. */
5978 if (DECL_BIT_FIELD (field))
5980 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5981 i < ((int_bit_position (field) + (bit_offset % 64))
5982 + tree_low_cst (DECL_SIZE (field), 0)
5985 merge_classes (X86_64_INTEGER_CLASS,
5992 type = TREE_TYPE (field);
5994 /* Flexible array member is ignored. */
5995 if (TYPE_MODE (type) == BLKmode
5996 && TREE_CODE (type) == ARRAY_TYPE
5997 && TYPE_SIZE (type) == NULL_TREE
5998 && TYPE_DOMAIN (type) != NULL_TREE
5999 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
6004 if (!warned && warn_psabi)
6007 inform (input_location,
6008 "the ABI of passing struct with"
6009 " a flexible array member has"
6010 " changed in GCC 4.4");
6014 num = classify_argument (TYPE_MODE (type), type,
6016 (int_bit_position (field)
6017 + bit_offset) % 256);
6020 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
6021 for (i = 0; i < num && (i + pos) < words; i++)
6023 merge_classes (subclasses[i], classes[i + pos]);
6030 /* Arrays are handled as small records. */
6033 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
6034 TREE_TYPE (type), subclasses, bit_offset);
6038 /* The partial classes are now full classes. */
6039 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
6040 subclasses[0] = X86_64_SSE_CLASS;
6041 if (subclasses[0] == X86_64_INTEGERSI_CLASS
6042 && !((bit_offset % 64) == 0 && bytes == 4))
6043 subclasses[0] = X86_64_INTEGER_CLASS;
6045 for (i = 0; i < words; i++)
6046 classes[i] = subclasses[i % num];
6051 case QUAL_UNION_TYPE:
6052 /* Unions are similar to RECORD_TYPE but offset is always 0.
6054 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6056 if (TREE_CODE (field) == FIELD_DECL)
6060 if (TREE_TYPE (field) == error_mark_node)
6063 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
6064 TREE_TYPE (field), subclasses,
6068 for (i = 0; i < num; i++)
6069 classes[i] = merge_classes (subclasses[i], classes[i]);
6080 /* When size > 16 bytes, if the first one isn't
6081 X86_64_SSE_CLASS or any other ones aren't
6082 X86_64_SSEUP_CLASS, everything should be passed in
6084 if (classes[0] != X86_64_SSE_CLASS)
6087 for (i = 1; i < words; i++)
6088 if (classes[i] != X86_64_SSEUP_CLASS)
6092 /* Final merger cleanup. */
6093 for (i = 0; i < words; i++)
6095 /* If one class is MEMORY, everything should be passed in
6097 if (classes[i] == X86_64_MEMORY_CLASS)
6100 /* The X86_64_SSEUP_CLASS should be always preceded by
6101 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
6102 if (classes[i] == X86_64_SSEUP_CLASS
6103 && classes[i - 1] != X86_64_SSE_CLASS
6104 && classes[i - 1] != X86_64_SSEUP_CLASS)
6106 /* The first one should never be X86_64_SSEUP_CLASS. */
6107 gcc_assert (i != 0);
6108 classes[i] = X86_64_SSE_CLASS;
6111 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
6112 everything should be passed in memory. */
6113 if (classes[i] == X86_64_X87UP_CLASS
6114 && (classes[i - 1] != X86_64_X87_CLASS))
6118 /* The first one should never be X86_64_X87UP_CLASS. */
6119 gcc_assert (i != 0);
6120 if (!warned && warn_psabi)
6123 inform (input_location,
6124 "the ABI of passing union with long double"
6125 " has changed in GCC 4.4");
6133 /* Compute alignment needed. We align all types to natural boundaries with
6134 exception of XFmode that is aligned to 64bits. */
6135 if (mode != VOIDmode && mode != BLKmode)
6137 int mode_alignment = GET_MODE_BITSIZE (mode);
6140 mode_alignment = 128;
6141 else if (mode == XCmode)
6142 mode_alignment = 256;
6143 if (COMPLEX_MODE_P (mode))
6144 mode_alignment /= 2;
6145 /* Misaligned fields are always returned in memory. */
6146 if (bit_offset % mode_alignment)
6150 /* for V1xx modes, just use the base mode */
6151 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
6152 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
6153 mode = GET_MODE_INNER (mode);
6155 /* Classification of atomic types. */
6160 classes[0] = X86_64_SSE_CLASS;
6163 classes[0] = X86_64_SSE_CLASS;
6164 classes[1] = X86_64_SSEUP_CLASS;
6174 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
6178 classes[0] = X86_64_INTEGERSI_CLASS;
6181 else if (size <= 64)
6183 classes[0] = X86_64_INTEGER_CLASS;
6186 else if (size <= 64+32)
6188 classes[0] = X86_64_INTEGER_CLASS;
6189 classes[1] = X86_64_INTEGERSI_CLASS;
6192 else if (size <= 64+64)
6194 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
6202 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
6206 /* OImode shouldn't be used directly. */
6211 if (!(bit_offset % 64))
6212 classes[0] = X86_64_SSESF_CLASS;
6214 classes[0] = X86_64_SSE_CLASS;
6217 classes[0] = X86_64_SSEDF_CLASS;
6220 classes[0] = X86_64_X87_CLASS;
6221 classes[1] = X86_64_X87UP_CLASS;
6224 classes[0] = X86_64_SSE_CLASS;
6225 classes[1] = X86_64_SSEUP_CLASS;
6228 classes[0] = X86_64_SSE_CLASS;
6229 if (!(bit_offset % 64))
6235 if (!warned && warn_psabi)
6238 inform (input_location,
6239 "the ABI of passing structure with complex float"
6240 " member has changed in GCC 4.4");
6242 classes[1] = X86_64_SSESF_CLASS;
6246 classes[0] = X86_64_SSEDF_CLASS;
6247 classes[1] = X86_64_SSEDF_CLASS;
6250 classes[0] = X86_64_COMPLEX_X87_CLASS;
6253 /* This modes is larger than 16 bytes. */
6261 classes[0] = X86_64_SSE_CLASS;
6262 classes[1] = X86_64_SSEUP_CLASS;
6263 classes[2] = X86_64_SSEUP_CLASS;
6264 classes[3] = X86_64_SSEUP_CLASS;
6272 classes[0] = X86_64_SSE_CLASS;
6273 classes[1] = X86_64_SSEUP_CLASS;
6281 classes[0] = X86_64_SSE_CLASS;
6287 gcc_assert (VECTOR_MODE_P (mode));
6292 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
6294 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
6295 classes[0] = X86_64_INTEGERSI_CLASS;
6297 classes[0] = X86_64_INTEGER_CLASS;
6298 classes[1] = X86_64_INTEGER_CLASS;
6299 return 1 + (bytes > 8);
6303 /* Examine the argument and return set number of register required in each
6304 class. Return 0 iff parameter should be passed in memory. */
6306 examine_argument (enum machine_mode mode, const_tree type, int in_return,
6307 int *int_nregs, int *sse_nregs)
6309 enum x86_64_reg_class regclass[MAX_CLASSES];
6310 int n = classify_argument (mode, type, regclass, 0);
6316 for (n--; n >= 0; n--)
6317 switch (regclass[n])
6319 case X86_64_INTEGER_CLASS:
6320 case X86_64_INTEGERSI_CLASS:
6323 case X86_64_SSE_CLASS:
6324 case X86_64_SSESF_CLASS:
6325 case X86_64_SSEDF_CLASS:
6328 case X86_64_NO_CLASS:
6329 case X86_64_SSEUP_CLASS:
6331 case X86_64_X87_CLASS:
6332 case X86_64_X87UP_CLASS:
6336 case X86_64_COMPLEX_X87_CLASS:
6337 return in_return ? 2 : 0;
6338 case X86_64_MEMORY_CLASS:
6344 /* Construct container for the argument used by GCC interface. See
6345 FUNCTION_ARG for the detailed description. */
6348 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
6349 const_tree type, int in_return, int nintregs, int nsseregs,
6350 const int *intreg, int sse_regno)
6352 /* The following variables hold the static issued_error state. */
6353 static bool issued_sse_arg_error;
6354 static bool issued_sse_ret_error;
6355 static bool issued_x87_ret_error;
6357 enum machine_mode tmpmode;
6359 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
6360 enum x86_64_reg_class regclass[MAX_CLASSES];
6364 int needed_sseregs, needed_intregs;
6365 rtx exp[MAX_CLASSES];
6368 n = classify_argument (mode, type, regclass, 0);
6371 if (!examine_argument (mode, type, in_return, &needed_intregs,
6374 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
6377 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
6378 some less clueful developer tries to use floating-point anyway. */
6379 if (needed_sseregs && !TARGET_SSE)
6383 if (!issued_sse_ret_error)
6385 error ("SSE register return with SSE disabled");
6386 issued_sse_ret_error = true;
6389 else if (!issued_sse_arg_error)
6391 error ("SSE register argument with SSE disabled");
6392 issued_sse_arg_error = true;
6397 /* Likewise, error if the ABI requires us to return values in the
6398 x87 registers and the user specified -mno-80387. */
6399 if (!TARGET_80387 && in_return)
6400 for (i = 0; i < n; i++)
6401 if (regclass[i] == X86_64_X87_CLASS
6402 || regclass[i] == X86_64_X87UP_CLASS
6403 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
6405 if (!issued_x87_ret_error)
6407 error ("x87 register return with x87 disabled");
6408 issued_x87_ret_error = true;
6413 /* First construct simple cases. Avoid SCmode, since we want to use
6414 single register to pass this type. */
6415 if (n == 1 && mode != SCmode)
6416 switch (regclass[0])
6418 case X86_64_INTEGER_CLASS:
6419 case X86_64_INTEGERSI_CLASS:
6420 return gen_rtx_REG (mode, intreg[0]);
6421 case X86_64_SSE_CLASS:
6422 case X86_64_SSESF_CLASS:
6423 case X86_64_SSEDF_CLASS:
6424 if (mode != BLKmode)
6425 return gen_reg_or_parallel (mode, orig_mode,
6426 SSE_REGNO (sse_regno));
6428 case X86_64_X87_CLASS:
6429 case X86_64_COMPLEX_X87_CLASS:
6430 return gen_rtx_REG (mode, FIRST_STACK_REG);
6431 case X86_64_NO_CLASS:
6432 /* Zero sized array, struct or class. */
6437 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
6438 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
6439 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
6441 && regclass[0] == X86_64_SSE_CLASS
6442 && regclass[1] == X86_64_SSEUP_CLASS
6443 && regclass[2] == X86_64_SSEUP_CLASS
6444 && regclass[3] == X86_64_SSEUP_CLASS
6446 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
6449 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
6450 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
6451 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
6452 && regclass[1] == X86_64_INTEGER_CLASS
6453 && (mode == CDImode || mode == TImode || mode == TFmode)
6454 && intreg[0] + 1 == intreg[1])
6455 return gen_rtx_REG (mode, intreg[0]);
6457 /* Otherwise figure out the entries of the PARALLEL. */
6458 for (i = 0; i < n; i++)
6462 switch (regclass[i])
6464 case X86_64_NO_CLASS:
6466 case X86_64_INTEGER_CLASS:
6467 case X86_64_INTEGERSI_CLASS:
6468 /* Merge TImodes on aligned occasions here too. */
6469 if (i * 8 + 8 > bytes)
6470 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
6471 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
6475 /* We've requested 24 bytes we don't have mode for. Use DImode. */
6476 if (tmpmode == BLKmode)
6478 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6479 gen_rtx_REG (tmpmode, *intreg),
6483 case X86_64_SSESF_CLASS:
6484 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6485 gen_rtx_REG (SFmode,
6486 SSE_REGNO (sse_regno)),
6490 case X86_64_SSEDF_CLASS:
6491 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6492 gen_rtx_REG (DFmode,
6493 SSE_REGNO (sse_regno)),
6497 case X86_64_SSE_CLASS:
6505 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
6515 && regclass[1] == X86_64_SSEUP_CLASS
6516 && regclass[2] == X86_64_SSEUP_CLASS
6517 && regclass[3] == X86_64_SSEUP_CLASS);
6524 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6525 gen_rtx_REG (tmpmode,
6526 SSE_REGNO (sse_regno)),
6535 /* Empty aligned struct, union or class. */
6539 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
6540 for (i = 0; i < nexps; i++)
6541 XVECEXP (ret, 0, i) = exp [i];
6545 /* Update the data in CUM to advance over an argument of mode MODE
6546 and data type TYPE. (TYPE is null for libcalls where that information
6547 may not be available.) */
6550 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6551 const_tree type, HOST_WIDE_INT bytes,
6552 HOST_WIDE_INT words)
6568 cum->words += words;
6569 cum->nregs -= words;
6570 cum->regno += words;
6572 if (cum->nregs <= 0)
6580 /* OImode shouldn't be used directly. */
6584 if (cum->float_in_sse < 2)
6587 if (cum->float_in_sse < 1)
6604 if (!type || !AGGREGATE_TYPE_P (type))
6606 cum->sse_words += words;
6607 cum->sse_nregs -= 1;
6608 cum->sse_regno += 1;
6609 if (cum->sse_nregs <= 0)
6623 if (!type || !AGGREGATE_TYPE_P (type))
6625 cum->mmx_words += words;
6626 cum->mmx_nregs -= 1;
6627 cum->mmx_regno += 1;
6628 if (cum->mmx_nregs <= 0)
6639 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6640 const_tree type, HOST_WIDE_INT words, bool named)
6642 int int_nregs, sse_nregs;
6644 /* Unnamed 256bit vector mode parameters are passed on stack. */
6645 if (!named && VALID_AVX256_REG_MODE (mode))
6648 if (examine_argument (mode, type, 0, &int_nregs, &sse_nregs)
6649 && sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
6651 cum->nregs -= int_nregs;
6652 cum->sse_nregs -= sse_nregs;
6653 cum->regno += int_nregs;
6654 cum->sse_regno += sse_nregs;
6658 int align = ix86_function_arg_boundary (mode, type) / BITS_PER_WORD;
6659 cum->words = (cum->words + align - 1) & ~(align - 1);
6660 cum->words += words;
6665 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
6666 HOST_WIDE_INT words)
6668 /* Otherwise, this should be passed indirect. */
6669 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
6671 cum->words += words;
6679 /* Update the data in CUM to advance over an argument of mode MODE and
6680 data type TYPE. (TYPE is null for libcalls where that information
6681 may not be available.) */
6684 ix86_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6685 const_tree type, bool named)
6687 HOST_WIDE_INT bytes, words;
6689 if (mode == BLKmode)
6690 bytes = int_size_in_bytes (type);
6692 bytes = GET_MODE_SIZE (mode);
6693 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6696 mode = type_natural_mode (type, NULL);
6698 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6699 function_arg_advance_ms_64 (cum, bytes, words);
6700 else if (TARGET_64BIT)
6701 function_arg_advance_64 (cum, mode, type, words, named);
6703 function_arg_advance_32 (cum, mode, type, bytes, words);
6706 /* Define where to put the arguments to a function.
6707 Value is zero to push the argument on the stack,
6708 or a hard register in which to store the argument.
6710 MODE is the argument's machine mode.
6711 TYPE is the data type of the argument (as a tree).
6712 This is null for libcalls where that information may
6714 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6715 the preceding args and about the function being called.
6716 NAMED is nonzero if this argument is a named parameter
6717 (otherwise it is an extra parameter matching an ellipsis). */
6720 function_arg_32 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6721 enum machine_mode orig_mode, const_tree type,
6722 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
6724 static bool warnedsse, warnedmmx;
6726 /* Avoid the AL settings for the Unix64 ABI. */
6727 if (mode == VOIDmode)
6743 if (words <= cum->nregs)
6745 int regno = cum->regno;
6747 /* Fastcall allocates the first two DWORD (SImode) or
6748 smaller arguments to ECX and EDX if it isn't an
6754 || (type && AGGREGATE_TYPE_P (type)))
6757 /* ECX not EAX is the first allocated register. */
6758 if (regno == AX_REG)
6761 return gen_rtx_REG (mode, regno);
6766 if (cum->float_in_sse < 2)
6769 if (cum->float_in_sse < 1)
6773 /* In 32bit, we pass TImode in xmm registers. */
6780 if (!type || !AGGREGATE_TYPE_P (type))
6782 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
6785 warning (0, "SSE vector argument without SSE enabled "
6789 return gen_reg_or_parallel (mode, orig_mode,
6790 cum->sse_regno + FIRST_SSE_REG);
6795 /* OImode shouldn't be used directly. */
6804 if (!type || !AGGREGATE_TYPE_P (type))
6807 return gen_reg_or_parallel (mode, orig_mode,
6808 cum->sse_regno + FIRST_SSE_REG);
6818 if (!type || !AGGREGATE_TYPE_P (type))
6820 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
6823 warning (0, "MMX vector argument without MMX enabled "
6827 return gen_reg_or_parallel (mode, orig_mode,
6828 cum->mmx_regno + FIRST_MMX_REG);
6837 function_arg_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6838 enum machine_mode orig_mode, const_tree type, bool named)
6840 /* Handle a hidden AL argument containing number of registers
6841 for varargs x86-64 functions. */
6842 if (mode == VOIDmode)
6843 return GEN_INT (cum->maybe_vaarg
6844 ? (cum->sse_nregs < 0
6845 ? X86_64_SSE_REGPARM_MAX
6860 /* Unnamed 256bit vector mode parameters are passed on stack. */
6866 return construct_container (mode, orig_mode, type, 0, cum->nregs,
6868 &x86_64_int_parameter_registers [cum->regno],
6873 function_arg_ms_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6874 enum machine_mode orig_mode, bool named,
6875 HOST_WIDE_INT bytes)
6879 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6880 We use value of -2 to specify that current function call is MSABI. */
6881 if (mode == VOIDmode)
6882 return GEN_INT (-2);
6884 /* If we've run out of registers, it goes on the stack. */
6885 if (cum->nregs == 0)
6888 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6890 /* Only floating point modes are passed in anything but integer regs. */
6891 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6894 regno = cum->regno + FIRST_SSE_REG;
6899 /* Unnamed floating parameters are passed in both the
6900 SSE and integer registers. */
6901 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6902 t2 = gen_rtx_REG (mode, regno);
6903 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6904 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6905 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6908 /* Handle aggregated types passed in register. */
6909 if (orig_mode == BLKmode)
6911 if (bytes > 0 && bytes <= 8)
6912 mode = (bytes > 4 ? DImode : SImode);
6913 if (mode == BLKmode)
6917 return gen_reg_or_parallel (mode, orig_mode, regno);
6920 /* Return where to put the arguments to a function.
6921 Return zero to push the argument on the stack, or a hard register in which to store the argument.
6923 MODE is the argument's machine mode. TYPE is the data type of the
6924 argument. It is null for libcalls where that information may not be
6925 available. CUM gives information about the preceding args and about
6926 the function being called. NAMED is nonzero if this argument is a
6927 named parameter (otherwise it is an extra parameter matching an
6931 ix86_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
6932 const_tree type, bool named)
6934 enum machine_mode mode = omode;
6935 HOST_WIDE_INT bytes, words;
6938 if (mode == BLKmode)
6939 bytes = int_size_in_bytes (type);
6941 bytes = GET_MODE_SIZE (mode);
6942 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6944 /* To simplify the code below, represent vector types with a vector mode
6945 even if MMX/SSE are not active. */
6946 if (type && TREE_CODE (type) == VECTOR_TYPE)
6947 mode = type_natural_mode (type, cum);
6949 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6950 arg = function_arg_ms_64 (cum, mode, omode, named, bytes);
6951 else if (TARGET_64BIT)
6952 arg = function_arg_64 (cum, mode, omode, type, named);
6954 arg = function_arg_32 (cum, mode, omode, type, bytes, words);
6956 if (TARGET_VZEROUPPER && function_pass_avx256_p (arg))
6958 /* This argument uses 256bit AVX modes. */
6959 cfun->machine->use_avx256_p = true;
6961 cfun->machine->callee_pass_avx256_p = true;
6963 cfun->machine->caller_pass_avx256_p = true;
6969 /* A C expression that indicates when an argument must be passed by
6970 reference. If nonzero for an argument, a copy of that argument is
6971 made in memory and a pointer to the argument is passed instead of
6972 the argument itself. The pointer is passed in whatever way is
6973 appropriate for passing a pointer to that type. */
6976 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6977 enum machine_mode mode ATTRIBUTE_UNUSED,
6978 const_tree type, bool named ATTRIBUTE_UNUSED)
6980 /* See Windows x64 Software Convention. */
6981 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6983 int msize = (int) GET_MODE_SIZE (mode);
6986 /* Arrays are passed by reference. */
6987 if (TREE_CODE (type) == ARRAY_TYPE)
6990 if (AGGREGATE_TYPE_P (type))
6992 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6993 are passed by reference. */
6994 msize = int_size_in_bytes (type);
6998 /* __m128 is passed by reference. */
7000 case 1: case 2: case 4: case 8:
7006 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
7012 /* Return true when TYPE should be 128bit aligned for 32bit argument
7013 passing ABI. XXX: This function is obsolete and is only used for
7014 checking psABI compatibility with previous versions of GCC. */
7017 ix86_compat_aligned_value_p (const_tree type)
7019 enum machine_mode mode = TYPE_MODE (type);
7020 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
7024 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
7026 if (TYPE_ALIGN (type) < 128)
7029 if (AGGREGATE_TYPE_P (type))
7031 /* Walk the aggregates recursively. */
7032 switch (TREE_CODE (type))
7036 case QUAL_UNION_TYPE:
7040 /* Walk all the structure fields. */
7041 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
7043 if (TREE_CODE (field) == FIELD_DECL
7044 && ix86_compat_aligned_value_p (TREE_TYPE (field)))
7051 /* Just for use if some languages passes arrays by value. */
7052 if (ix86_compat_aligned_value_p (TREE_TYPE (type)))
7063 /* Return the alignment boundary for MODE and TYPE with alignment ALIGN.
7064 XXX: This function is obsolete and is only used for checking psABI
7065 compatibility with previous versions of GCC. */
7068 ix86_compat_function_arg_boundary (enum machine_mode mode,
7069 const_tree type, unsigned int align)
7071 /* In 32bit, only _Decimal128 and __float128 are aligned to their
7072 natural boundaries. */
7073 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
7075 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
7076 make an exception for SSE modes since these require 128bit
7079 The handling here differs from field_alignment. ICC aligns MMX
7080 arguments to 4 byte boundaries, while structure fields are aligned
7081 to 8 byte boundaries. */
7084 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
7085 align = PARM_BOUNDARY;
7089 if (!ix86_compat_aligned_value_p (type))
7090 align = PARM_BOUNDARY;
7093 if (align > BIGGEST_ALIGNMENT)
7094 align = BIGGEST_ALIGNMENT;
7098 /* Return true when TYPE should be 128bit aligned for 32bit argument
7102 ix86_contains_aligned_value_p (const_tree type)
7104 enum machine_mode mode = TYPE_MODE (type);
7106 if (mode == XFmode || mode == XCmode)
7109 if (TYPE_ALIGN (type) < 128)
7112 if (AGGREGATE_TYPE_P (type))
7114 /* Walk the aggregates recursively. */
7115 switch (TREE_CODE (type))
7119 case QUAL_UNION_TYPE:
7123 /* Walk all the structure fields. */
7124 for (field = TYPE_FIELDS (type);
7126 field = DECL_CHAIN (field))
7128 if (TREE_CODE (field) == FIELD_DECL
7129 && ix86_contains_aligned_value_p (TREE_TYPE (field)))
7136 /* Just for use if some languages passes arrays by value. */
7137 if (ix86_contains_aligned_value_p (TREE_TYPE (type)))
7146 return TYPE_ALIGN (type) >= 128;
7151 /* Gives the alignment boundary, in bits, of an argument with the
7152 specified mode and type. */
7155 ix86_function_arg_boundary (enum machine_mode mode, const_tree type)
7160 /* Since the main variant type is used for call, we convert it to
7161 the main variant type. */
7162 type = TYPE_MAIN_VARIANT (type);
7163 align = TYPE_ALIGN (type);
7166 align = GET_MODE_ALIGNMENT (mode);
7167 if (align < PARM_BOUNDARY)
7168 align = PARM_BOUNDARY;
7172 unsigned int saved_align = align;
7176 /* i386 ABI defines XFmode arguments to be 4 byte aligned. */
7179 if (mode == XFmode || mode == XCmode)
7180 align = PARM_BOUNDARY;
7182 else if (!ix86_contains_aligned_value_p (type))
7183 align = PARM_BOUNDARY;
7186 align = PARM_BOUNDARY;
7191 && align != ix86_compat_function_arg_boundary (mode, type,
7195 inform (input_location,
7196 "The ABI for passing parameters with %d-byte"
7197 " alignment has changed in GCC 4.6",
7198 align / BITS_PER_UNIT);
7205 /* Return true if N is a possible register number of function value. */
7208 ix86_function_value_regno_p (const unsigned int regno)
7215 case FIRST_FLOAT_REG:
7216 /* TODO: The function should depend on current function ABI but
7217 builtins.c would need updating then. Therefore we use the
7219 if (TARGET_64BIT && ix86_abi == MS_ABI)
7221 return TARGET_FLOAT_RETURNS_IN_80387;
7227 if (TARGET_MACHO || TARGET_64BIT)
7235 /* Define how to find the value returned by a function.
7236 VALTYPE is the data type of the value (as a tree).
7237 If the precise function being called is known, FUNC is its FUNCTION_DECL;
7238 otherwise, FUNC is 0. */
7241 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
7242 const_tree fntype, const_tree fn)
7246 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
7247 we normally prevent this case when mmx is not available. However
7248 some ABIs may require the result to be returned like DImode. */
7249 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
7250 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
7252 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
7253 we prevent this case when sse is not available. However some ABIs
7254 may require the result to be returned like integer TImode. */
7255 else if (mode == TImode
7256 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
7257 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
7259 /* 32-byte vector modes in %ymm0. */
7260 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
7261 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
7263 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
7264 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
7265 regno = FIRST_FLOAT_REG;
7267 /* Most things go in %eax. */
7270 /* Override FP return register with %xmm0 for local functions when
7271 SSE math is enabled or for functions with sseregparm attribute. */
7272 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
7274 int sse_level = ix86_function_sseregparm (fntype, fn, false);
7275 if ((sse_level >= 1 && mode == SFmode)
7276 || (sse_level == 2 && mode == DFmode))
7277 regno = FIRST_SSE_REG;
7280 /* OImode shouldn't be used directly. */
7281 gcc_assert (mode != OImode);
7283 return gen_rtx_REG (orig_mode, regno);
7287 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
7292 /* Handle libcalls, which don't provide a type node. */
7293 if (valtype == NULL)
7305 return gen_rtx_REG (mode, FIRST_SSE_REG);
7308 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
7312 return gen_rtx_REG (mode, AX_REG);
7316 ret = construct_container (mode, orig_mode, valtype, 1,
7317 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
7318 x86_64_int_return_registers, 0);
7320 /* For zero sized structures, construct_container returns NULL, but we
7321 need to keep rest of compiler happy by returning meaningful value. */
7323 ret = gen_rtx_REG (orig_mode, AX_REG);
7329 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
7331 unsigned int regno = AX_REG;
7335 switch (GET_MODE_SIZE (mode))
7338 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
7339 && !COMPLEX_MODE_P (mode))
7340 regno = FIRST_SSE_REG;
7344 if (mode == SFmode || mode == DFmode)
7345 regno = FIRST_SSE_REG;
7351 return gen_rtx_REG (orig_mode, regno);
7355 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
7356 enum machine_mode orig_mode, enum machine_mode mode)
7358 const_tree fn, fntype;
7361 if (fntype_or_decl && DECL_P (fntype_or_decl))
7362 fn = fntype_or_decl;
7363 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
7365 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
7366 return function_value_ms_64 (orig_mode, mode);
7367 else if (TARGET_64BIT)
7368 return function_value_64 (orig_mode, mode, valtype);
7370 return function_value_32 (orig_mode, mode, fntype, fn);
7374 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
7375 bool outgoing ATTRIBUTE_UNUSED)
7377 enum machine_mode mode, orig_mode;
7379 orig_mode = TYPE_MODE (valtype);
7380 mode = type_natural_mode (valtype, NULL);
7381 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
7385 ix86_libcall_value (enum machine_mode mode)
7387 return ix86_function_value_1 (NULL, NULL, mode, mode);
7390 /* Return true iff type is returned in memory. */
7392 static bool ATTRIBUTE_UNUSED
7393 return_in_memory_32 (const_tree type, enum machine_mode mode)
7397 if (mode == BLKmode)
7400 size = int_size_in_bytes (type);
7402 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
7405 if (VECTOR_MODE_P (mode) || mode == TImode)
7407 /* User-created vectors small enough to fit in EAX. */
7411 /* MMX/3dNow values are returned in MM0,
7412 except when it doesn't exits or the ABI prescribes otherwise. */
7414 return !TARGET_MMX || TARGET_VECT8_RETURNS;
7416 /* SSE values are returned in XMM0, except when it doesn't exist. */
7420 /* AVX values are returned in YMM0, except when it doesn't exist. */
7431 /* OImode shouldn't be used directly. */
7432 gcc_assert (mode != OImode);
7437 static bool ATTRIBUTE_UNUSED
7438 return_in_memory_64 (const_tree type, enum machine_mode mode)
7440 int needed_intregs, needed_sseregs;
7441 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
7444 static bool ATTRIBUTE_UNUSED
7445 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
7447 HOST_WIDE_INT size = int_size_in_bytes (type);
7449 /* __m128 is returned in xmm0. */
7450 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
7451 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
7454 /* Otherwise, the size must be exactly in [1248]. */
7455 return size != 1 && size != 2 && size != 4 && size != 8;
7459 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7461 #ifdef SUBTARGET_RETURN_IN_MEMORY
7462 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
7464 const enum machine_mode mode = type_natural_mode (type, NULL);
7468 if (ix86_function_type_abi (fntype) == MS_ABI)
7469 return return_in_memory_ms_64 (type, mode);
7471 return return_in_memory_64 (type, mode);
7474 return return_in_memory_32 (type, mode);
7478 /* When returning SSE vector types, we have a choice of either
7479 (1) being abi incompatible with a -march switch, or
7480 (2) generating an error.
7481 Given no good solution, I think the safest thing is one warning.
7482 The user won't be able to use -Werror, but....
7484 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
7485 called in response to actually generating a caller or callee that
7486 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
7487 via aggregate_value_p for general type probing from tree-ssa. */
7490 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
7492 static bool warnedsse, warnedmmx;
7494 if (!TARGET_64BIT && type)
7496 /* Look at the return type of the function, not the function type. */
7497 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
7499 if (!TARGET_SSE && !warnedsse)
7502 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
7505 warning (0, "SSE vector return without SSE enabled "
7510 if (!TARGET_MMX && !warnedmmx)
7512 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
7515 warning (0, "MMX vector return without MMX enabled "
7525 /* Create the va_list data type. */
7527 /* Returns the calling convention specific va_list date type.
7528 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
7531 ix86_build_builtin_va_list_abi (enum calling_abi abi)
7533 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
7535 /* For i386 we use plain pointer to argument area. */
7536 if (!TARGET_64BIT || abi == MS_ABI)
7537 return build_pointer_type (char_type_node);
7539 record = lang_hooks.types.make_type (RECORD_TYPE);
7540 type_decl = build_decl (BUILTINS_LOCATION,
7541 TYPE_DECL, get_identifier ("__va_list_tag"), record);
7543 f_gpr = build_decl (BUILTINS_LOCATION,
7544 FIELD_DECL, get_identifier ("gp_offset"),
7545 unsigned_type_node);
7546 f_fpr = build_decl (BUILTINS_LOCATION,
7547 FIELD_DECL, get_identifier ("fp_offset"),
7548 unsigned_type_node);
7549 f_ovf = build_decl (BUILTINS_LOCATION,
7550 FIELD_DECL, get_identifier ("overflow_arg_area"),
7552 f_sav = build_decl (BUILTINS_LOCATION,
7553 FIELD_DECL, get_identifier ("reg_save_area"),
7556 va_list_gpr_counter_field = f_gpr;
7557 va_list_fpr_counter_field = f_fpr;
7559 DECL_FIELD_CONTEXT (f_gpr) = record;
7560 DECL_FIELD_CONTEXT (f_fpr) = record;
7561 DECL_FIELD_CONTEXT (f_ovf) = record;
7562 DECL_FIELD_CONTEXT (f_sav) = record;
7564 TYPE_STUB_DECL (record) = type_decl;
7565 TYPE_NAME (record) = type_decl;
7566 TYPE_FIELDS (record) = f_gpr;
7567 DECL_CHAIN (f_gpr) = f_fpr;
7568 DECL_CHAIN (f_fpr) = f_ovf;
7569 DECL_CHAIN (f_ovf) = f_sav;
7571 layout_type (record);
7573 /* The correct type is an array type of one element. */
7574 return build_array_type (record, build_index_type (size_zero_node));
7577 /* Setup the builtin va_list data type and for 64-bit the additional
7578 calling convention specific va_list data types. */
7581 ix86_build_builtin_va_list (void)
7583 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
7585 /* Initialize abi specific va_list builtin types. */
7589 if (ix86_abi == MS_ABI)
7591 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
7592 if (TREE_CODE (t) != RECORD_TYPE)
7593 t = build_variant_type_copy (t);
7594 sysv_va_list_type_node = t;
7599 if (TREE_CODE (t) != RECORD_TYPE)
7600 t = build_variant_type_copy (t);
7601 sysv_va_list_type_node = t;
7603 if (ix86_abi != MS_ABI)
7605 t = ix86_build_builtin_va_list_abi (MS_ABI);
7606 if (TREE_CODE (t) != RECORD_TYPE)
7607 t = build_variant_type_copy (t);
7608 ms_va_list_type_node = t;
7613 if (TREE_CODE (t) != RECORD_TYPE)
7614 t = build_variant_type_copy (t);
7615 ms_va_list_type_node = t;
7622 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
7625 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
7631 /* GPR size of varargs save area. */
7632 if (cfun->va_list_gpr_size)
7633 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
7635 ix86_varargs_gpr_size = 0;
7637 /* FPR size of varargs save area. We don't need it if we don't pass
7638 anything in SSE registers. */
7639 if (TARGET_SSE && cfun->va_list_fpr_size)
7640 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
7642 ix86_varargs_fpr_size = 0;
7644 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
7647 save_area = frame_pointer_rtx;
7648 set = get_varargs_alias_set ();
7650 max = cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
7651 if (max > X86_64_REGPARM_MAX)
7652 max = X86_64_REGPARM_MAX;
7654 for (i = cum->regno; i < max; i++)
7656 mem = gen_rtx_MEM (Pmode,
7657 plus_constant (save_area, i * UNITS_PER_WORD));
7658 MEM_NOTRAP_P (mem) = 1;
7659 set_mem_alias_set (mem, set);
7660 emit_move_insn (mem, gen_rtx_REG (Pmode,
7661 x86_64_int_parameter_registers[i]));
7664 if (ix86_varargs_fpr_size)
7666 enum machine_mode smode;
7669 /* Now emit code to save SSE registers. The AX parameter contains number
7670 of SSE parameter registers used to call this function, though all we
7671 actually check here is the zero/non-zero status. */
7673 label = gen_label_rtx ();
7674 test = gen_rtx_EQ (VOIDmode, gen_rtx_REG (QImode, AX_REG), const0_rtx);
7675 emit_jump_insn (gen_cbranchqi4 (test, XEXP (test, 0), XEXP (test, 1),
7678 /* ??? If !TARGET_SSE_TYPELESS_STORES, would we perform better if
7679 we used movdqa (i.e. TImode) instead? Perhaps even better would
7680 be if we could determine the real mode of the data, via a hook
7681 into pass_stdarg. Ignore all that for now. */
7683 if (crtl->stack_alignment_needed < GET_MODE_ALIGNMENT (smode))
7684 crtl->stack_alignment_needed = GET_MODE_ALIGNMENT (smode);
7686 max = cum->sse_regno + cfun->va_list_fpr_size / 16;
7687 if (max > X86_64_SSE_REGPARM_MAX)
7688 max = X86_64_SSE_REGPARM_MAX;
7690 for (i = cum->sse_regno; i < max; ++i)
7692 mem = plus_constant (save_area, i * 16 + ix86_varargs_gpr_size);
7693 mem = gen_rtx_MEM (smode, mem);
7694 MEM_NOTRAP_P (mem) = 1;
7695 set_mem_alias_set (mem, set);
7696 set_mem_align (mem, GET_MODE_ALIGNMENT (smode));
7698 emit_move_insn (mem, gen_rtx_REG (smode, SSE_REGNO (i)));
7706 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
7708 alias_set_type set = get_varargs_alias_set ();
7711 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
7715 mem = gen_rtx_MEM (Pmode,
7716 plus_constant (virtual_incoming_args_rtx,
7717 i * UNITS_PER_WORD));
7718 MEM_NOTRAP_P (mem) = 1;
7719 set_mem_alias_set (mem, set);
7721 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
7722 emit_move_insn (mem, reg);
7727 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7728 tree type, int *pretend_size ATTRIBUTE_UNUSED,
7731 CUMULATIVE_ARGS next_cum;
7734 /* This argument doesn't appear to be used anymore. Which is good,
7735 because the old code here didn't suppress rtl generation. */
7736 gcc_assert (!no_rtl);
7741 fntype = TREE_TYPE (current_function_decl);
7743 /* For varargs, we do not want to skip the dummy va_dcl argument.
7744 For stdargs, we do want to skip the last named argument. */
7746 if (stdarg_p (fntype))
7747 ix86_function_arg_advance (&next_cum, mode, type, true);
7749 if (cum->call_abi == MS_ABI)
7750 setup_incoming_varargs_ms_64 (&next_cum);
7752 setup_incoming_varargs_64 (&next_cum);
7755 /* Checks if TYPE is of kind va_list char *. */
7758 is_va_list_char_pointer (tree type)
7762 /* For 32-bit it is always true. */
7765 canonic = ix86_canonical_va_list_type (type);
7766 return (canonic == ms_va_list_type_node
7767 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
7770 /* Implement va_start. */
7773 ix86_va_start (tree valist, rtx nextarg)
7775 HOST_WIDE_INT words, n_gpr, n_fpr;
7776 tree f_gpr, f_fpr, f_ovf, f_sav;
7777 tree gpr, fpr, ovf, sav, t;
7781 if (flag_split_stack
7782 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
7784 unsigned int scratch_regno;
7786 /* When we are splitting the stack, we can't refer to the stack
7787 arguments using internal_arg_pointer, because they may be on
7788 the old stack. The split stack prologue will arrange to
7789 leave a pointer to the old stack arguments in a scratch
7790 register, which we here copy to a pseudo-register. The split
7791 stack prologue can't set the pseudo-register directly because
7792 it (the prologue) runs before any registers have been saved. */
7794 scratch_regno = split_stack_prologue_scratch_regno ();
7795 if (scratch_regno != INVALID_REGNUM)
7799 reg = gen_reg_rtx (Pmode);
7800 cfun->machine->split_stack_varargs_pointer = reg;
7803 emit_move_insn (reg, gen_rtx_REG (Pmode, scratch_regno));
7807 push_topmost_sequence ();
7808 emit_insn_after (seq, entry_of_function ());
7809 pop_topmost_sequence ();
7813 /* Only 64bit target needs something special. */
7814 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7816 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
7817 std_expand_builtin_va_start (valist, nextarg);
7822 va_r = expand_expr (valist, NULL_RTX, VOIDmode, EXPAND_WRITE);
7823 next = expand_binop (ptr_mode, add_optab,
7824 cfun->machine->split_stack_varargs_pointer,
7825 crtl->args.arg_offset_rtx,
7826 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7827 convert_move (va_r, next, 0);
7832 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7833 f_fpr = DECL_CHAIN (f_gpr);
7834 f_ovf = DECL_CHAIN (f_fpr);
7835 f_sav = DECL_CHAIN (f_ovf);
7837 valist = build_simple_mem_ref (valist);
7838 TREE_TYPE (valist) = TREE_TYPE (sysv_va_list_type_node);
7839 /* The following should be folded into the MEM_REF offset. */
7840 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), unshare_expr (valist),
7842 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
7844 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
7846 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
7849 /* Count number of gp and fp argument registers used. */
7850 words = crtl->args.info.words;
7851 n_gpr = crtl->args.info.regno;
7852 n_fpr = crtl->args.info.sse_regno;
7854 if (cfun->va_list_gpr_size)
7856 type = TREE_TYPE (gpr);
7857 t = build2 (MODIFY_EXPR, type,
7858 gpr, build_int_cst (type, n_gpr * 8));
7859 TREE_SIDE_EFFECTS (t) = 1;
7860 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7863 if (TARGET_SSE && cfun->va_list_fpr_size)
7865 type = TREE_TYPE (fpr);
7866 t = build2 (MODIFY_EXPR, type, fpr,
7867 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
7868 TREE_SIDE_EFFECTS (t) = 1;
7869 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7872 /* Find the overflow area. */
7873 type = TREE_TYPE (ovf);
7874 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
7875 ovf_rtx = crtl->args.internal_arg_pointer;
7877 ovf_rtx = cfun->machine->split_stack_varargs_pointer;
7878 t = make_tree (type, ovf_rtx);
7880 t = build2 (POINTER_PLUS_EXPR, type, t,
7881 size_int (words * UNITS_PER_WORD));
7882 t = build2 (MODIFY_EXPR, type, ovf, t);
7883 TREE_SIDE_EFFECTS (t) = 1;
7884 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7886 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
7888 /* Find the register save area.
7889 Prologue of the function save it right above stack frame. */
7890 type = TREE_TYPE (sav);
7891 t = make_tree (type, frame_pointer_rtx);
7892 if (!ix86_varargs_gpr_size)
7893 t = build2 (POINTER_PLUS_EXPR, type, t,
7894 size_int (-8 * X86_64_REGPARM_MAX));
7895 t = build2 (MODIFY_EXPR, type, sav, t);
7896 TREE_SIDE_EFFECTS (t) = 1;
7897 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7901 /* Implement va_arg. */
7904 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7907 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
7908 tree f_gpr, f_fpr, f_ovf, f_sav;
7909 tree gpr, fpr, ovf, sav, t;
7911 tree lab_false, lab_over = NULL_TREE;
7916 enum machine_mode nat_mode;
7917 unsigned int arg_boundary;
7919 /* Only 64bit target needs something special. */
7920 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7921 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
7923 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7924 f_fpr = DECL_CHAIN (f_gpr);
7925 f_ovf = DECL_CHAIN (f_fpr);
7926 f_sav = DECL_CHAIN (f_ovf);
7928 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
7929 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
7930 valist = build_va_arg_indirect_ref (valist);
7931 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7932 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7933 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7935 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
7937 type = build_pointer_type (type);
7938 size = int_size_in_bytes (type);
7939 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7941 nat_mode = type_natural_mode (type, NULL);
7950 /* Unnamed 256bit vector mode parameters are passed on stack. */
7951 if (ix86_cfun_abi () == SYSV_ABI)
7958 container = construct_container (nat_mode, TYPE_MODE (type),
7959 type, 0, X86_64_REGPARM_MAX,
7960 X86_64_SSE_REGPARM_MAX, intreg,
7965 /* Pull the value out of the saved registers. */
7967 addr = create_tmp_var (ptr_type_node, "addr");
7971 int needed_intregs, needed_sseregs;
7973 tree int_addr, sse_addr;
7975 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7976 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7978 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
7980 need_temp = (!REG_P (container)
7981 && ((needed_intregs && TYPE_ALIGN (type) > 64)
7982 || TYPE_ALIGN (type) > 128));
7984 /* In case we are passing structure, verify that it is consecutive block
7985 on the register save area. If not we need to do moves. */
7986 if (!need_temp && !REG_P (container))
7988 /* Verify that all registers are strictly consecutive */
7989 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
7993 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7995 rtx slot = XVECEXP (container, 0, i);
7996 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7997 || INTVAL (XEXP (slot, 1)) != i * 16)
8005 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
8007 rtx slot = XVECEXP (container, 0, i);
8008 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
8009 || INTVAL (XEXP (slot, 1)) != i * 8)
8021 int_addr = create_tmp_var (ptr_type_node, "int_addr");
8022 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
8025 /* First ensure that we fit completely in registers. */
8028 t = build_int_cst (TREE_TYPE (gpr),
8029 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
8030 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
8031 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
8032 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
8033 gimplify_and_add (t, pre_p);
8037 t = build_int_cst (TREE_TYPE (fpr),
8038 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
8039 + X86_64_REGPARM_MAX * 8);
8040 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
8041 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
8042 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
8043 gimplify_and_add (t, pre_p);
8046 /* Compute index to start of area used for integer regs. */
8049 /* int_addr = gpr + sav; */
8050 t = fold_convert (sizetype, gpr);
8051 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
8052 gimplify_assign (int_addr, t, pre_p);
8056 /* sse_addr = fpr + sav; */
8057 t = fold_convert (sizetype, fpr);
8058 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
8059 gimplify_assign (sse_addr, t, pre_p);
8063 int i, prev_size = 0;
8064 tree temp = create_tmp_var (type, "va_arg_tmp");
8067 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
8068 gimplify_assign (addr, t, pre_p);
8070 for (i = 0; i < XVECLEN (container, 0); i++)
8072 rtx slot = XVECEXP (container, 0, i);
8073 rtx reg = XEXP (slot, 0);
8074 enum machine_mode mode = GET_MODE (reg);
8080 tree dest_addr, dest;
8081 int cur_size = GET_MODE_SIZE (mode);
8083 gcc_assert (prev_size <= INTVAL (XEXP (slot, 1)));
8084 prev_size = INTVAL (XEXP (slot, 1));
8085 if (prev_size + cur_size > size)
8087 cur_size = size - prev_size;
8088 mode = mode_for_size (cur_size * BITS_PER_UNIT, MODE_INT, 1);
8089 if (mode == BLKmode)
8092 piece_type = lang_hooks.types.type_for_mode (mode, 1);
8093 if (mode == GET_MODE (reg))
8094 addr_type = build_pointer_type (piece_type);
8096 addr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
8098 daddr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
8101 if (SSE_REGNO_P (REGNO (reg)))
8103 src_addr = sse_addr;
8104 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
8108 src_addr = int_addr;
8109 src_offset = REGNO (reg) * 8;
8111 src_addr = fold_convert (addr_type, src_addr);
8112 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
8113 size_int (src_offset));
8115 dest_addr = fold_convert (daddr_type, addr);
8116 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
8117 size_int (prev_size));
8118 if (cur_size == GET_MODE_SIZE (mode))
8120 src = build_va_arg_indirect_ref (src_addr);
8121 dest = build_va_arg_indirect_ref (dest_addr);
8123 gimplify_assign (dest, src, pre_p);
8128 = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
8129 3, dest_addr, src_addr,
8130 size_int (cur_size));
8131 gimplify_and_add (copy, pre_p);
8133 prev_size += cur_size;
8139 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
8140 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
8141 gimplify_assign (gpr, t, pre_p);
8146 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
8147 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
8148 gimplify_assign (fpr, t, pre_p);
8151 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
8153 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
8156 /* ... otherwise out of the overflow area. */
8158 /* When we align parameter on stack for caller, if the parameter
8159 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
8160 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
8161 here with caller. */
8162 arg_boundary = ix86_function_arg_boundary (VOIDmode, type);
8163 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
8164 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
8166 /* Care for on-stack alignment if needed. */
8167 if (arg_boundary <= 64 || size == 0)
8171 HOST_WIDE_INT align = arg_boundary / 8;
8172 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
8173 size_int (align - 1));
8174 t = fold_convert (sizetype, t);
8175 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
8177 t = fold_convert (TREE_TYPE (ovf), t);
8180 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
8181 gimplify_assign (addr, t, pre_p);
8183 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
8184 size_int (rsize * UNITS_PER_WORD));
8185 gimplify_assign (unshare_expr (ovf), t, pre_p);
8188 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
8190 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
8191 addr = fold_convert (ptrtype, addr);
8194 addr = build_va_arg_indirect_ref (addr);
8195 return build_va_arg_indirect_ref (addr);
8198 /* Return true if OPNUM's MEM should be matched
8199 in movabs* patterns. */
8202 ix86_check_movabs (rtx insn, int opnum)
8206 set = PATTERN (insn);
8207 if (GET_CODE (set) == PARALLEL)
8208 set = XVECEXP (set, 0, 0);
8209 gcc_assert (GET_CODE (set) == SET);
8210 mem = XEXP (set, opnum);
8211 while (GET_CODE (mem) == SUBREG)
8212 mem = SUBREG_REG (mem);
8213 gcc_assert (MEM_P (mem));
8214 return volatile_ok || !MEM_VOLATILE_P (mem);
8217 /* Initialize the table of extra 80387 mathematical constants. */
8220 init_ext_80387_constants (void)
8222 static const char * cst[5] =
8224 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
8225 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
8226 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
8227 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
8228 "3.1415926535897932385128089594061862044", /* 4: fldpi */
8232 for (i = 0; i < 5; i++)
8234 real_from_string (&ext_80387_constants_table[i], cst[i]);
8235 /* Ensure each constant is rounded to XFmode precision. */
8236 real_convert (&ext_80387_constants_table[i],
8237 XFmode, &ext_80387_constants_table[i]);
8240 ext_80387_constants_init = 1;
8243 /* Return non-zero if the constant is something that
8244 can be loaded with a special instruction. */
8247 standard_80387_constant_p (rtx x)
8249 enum machine_mode mode = GET_MODE (x);
8253 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
8256 if (x == CONST0_RTX (mode))
8258 if (x == CONST1_RTX (mode))
8261 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8263 /* For XFmode constants, try to find a special 80387 instruction when
8264 optimizing for size or on those CPUs that benefit from them. */
8266 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
8270 if (! ext_80387_constants_init)
8271 init_ext_80387_constants ();
8273 for (i = 0; i < 5; i++)
8274 if (real_identical (&r, &ext_80387_constants_table[i]))
8278 /* Load of the constant -0.0 or -1.0 will be split as
8279 fldz;fchs or fld1;fchs sequence. */
8280 if (real_isnegzero (&r))
8282 if (real_identical (&r, &dconstm1))
8288 /* Return the opcode of the special instruction to be used to load
8292 standard_80387_constant_opcode (rtx x)
8294 switch (standard_80387_constant_p (x))
8318 /* Return the CONST_DOUBLE representing the 80387 constant that is
8319 loaded by the specified special instruction. The argument IDX
8320 matches the return value from standard_80387_constant_p. */
8323 standard_80387_constant_rtx (int idx)
8327 if (! ext_80387_constants_init)
8328 init_ext_80387_constants ();
8344 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
8348 /* Return 1 if X is all 0s and 2 if x is all 1s
8349 in supported SSE vector mode. */
8352 standard_sse_constant_p (rtx x)
8354 enum machine_mode mode = GET_MODE (x);
8356 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
8358 if (vector_all_ones_operand (x, mode))
8374 /* Return the opcode of the special instruction to be used to load
8378 standard_sse_constant_opcode (rtx insn, rtx x)
8380 switch (standard_sse_constant_p (x))
8383 switch (get_attr_mode (insn))
8386 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
8388 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8389 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
8391 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
8393 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8394 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
8396 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
8398 return "vxorps\t%x0, %x0, %x0";
8400 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8401 return "vxorps\t%x0, %x0, %x0";
8403 return "vxorpd\t%x0, %x0, %x0";
8405 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8406 return "vxorps\t%x0, %x0, %x0";
8408 return "vpxor\t%x0, %x0, %x0";
8413 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
8420 /* Returns true if OP contains a symbol reference */
8423 symbolic_reference_mentioned_p (rtx op)
8428 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
8431 fmt = GET_RTX_FORMAT (GET_CODE (op));
8432 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
8438 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
8439 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
8443 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
8450 /* Return true if it is appropriate to emit `ret' instructions in the
8451 body of a function. Do this only if the epilogue is simple, needing a
8452 couple of insns. Prior to reloading, we can't tell how many registers
8453 must be saved, so return false then. Return false if there is no frame
8454 marker to de-allocate. */
8457 ix86_can_use_return_insn_p (void)
8459 struct ix86_frame frame;
8461 if (! reload_completed || frame_pointer_needed)
8464 /* Don't allow more than 32k pop, since that's all we can do
8465 with one instruction. */
8466 if (crtl->args.pops_args && crtl->args.size >= 32768)
8469 ix86_compute_frame_layout (&frame);
8470 return (frame.stack_pointer_offset == UNITS_PER_WORD
8471 && (frame.nregs + frame.nsseregs) == 0);
8474 /* Value should be nonzero if functions must have frame pointers.
8475 Zero means the frame pointer need not be set up (and parms may
8476 be accessed via the stack pointer) in functions that seem suitable. */
8479 ix86_frame_pointer_required (void)
8481 /* If we accessed previous frames, then the generated code expects
8482 to be able to access the saved ebp value in our frame. */
8483 if (cfun->machine->accesses_prev_frame)
8486 /* Several x86 os'es need a frame pointer for other reasons,
8487 usually pertaining to setjmp. */
8488 if (SUBTARGET_FRAME_POINTER_REQUIRED)
8491 /* In ix86_option_override_internal, TARGET_OMIT_LEAF_FRAME_POINTER
8492 turns off the frame pointer by default. Turn it back on now if
8493 we've not got a leaf function. */
8494 if (TARGET_OMIT_LEAF_FRAME_POINTER
8495 && (!current_function_is_leaf
8496 || ix86_current_function_calls_tls_descriptor))
8499 if (crtl->profile && !flag_fentry)
8505 /* Record that the current function accesses previous call frames. */
8508 ix86_setup_frame_addresses (void)
8510 cfun->machine->accesses_prev_frame = 1;
8513 #ifndef USE_HIDDEN_LINKONCE
8514 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
8515 # define USE_HIDDEN_LINKONCE 1
8517 # define USE_HIDDEN_LINKONCE 0
8521 static int pic_labels_used;
8523 /* Fills in the label name that should be used for a pc thunk for
8524 the given register. */
8527 get_pc_thunk_name (char name[32], unsigned int regno)
8529 gcc_assert (!TARGET_64BIT);
8531 if (USE_HIDDEN_LINKONCE)
8532 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
8534 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
8538 /* This function generates code for -fpic that loads %ebx with
8539 the return address of the caller and then returns. */
8542 ix86_code_end (void)
8547 for (regno = AX_REG; regno <= SP_REG; regno++)
8552 if (!(pic_labels_used & (1 << regno)))
8555 get_pc_thunk_name (name, regno);
8557 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
8558 get_identifier (name),
8559 build_function_type (void_type_node, void_list_node));
8560 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
8561 NULL_TREE, void_type_node);
8562 TREE_PUBLIC (decl) = 1;
8563 TREE_STATIC (decl) = 1;
8568 switch_to_section (darwin_sections[text_coal_section]);
8569 fputs ("\t.weak_definition\t", asm_out_file);
8570 assemble_name (asm_out_file, name);
8571 fputs ("\n\t.private_extern\t", asm_out_file);
8572 assemble_name (asm_out_file, name);
8573 putc ('\n', asm_out_file);
8574 ASM_OUTPUT_LABEL (asm_out_file, name);
8575 DECL_WEAK (decl) = 1;
8579 if (USE_HIDDEN_LINKONCE)
8581 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
8583 targetm.asm_out.unique_section (decl, 0);
8584 switch_to_section (get_named_section (decl, NULL, 0));
8586 targetm.asm_out.globalize_label (asm_out_file, name);
8587 fputs ("\t.hidden\t", asm_out_file);
8588 assemble_name (asm_out_file, name);
8589 putc ('\n', asm_out_file);
8590 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
8594 switch_to_section (text_section);
8595 ASM_OUTPUT_LABEL (asm_out_file, name);
8598 DECL_INITIAL (decl) = make_node (BLOCK);
8599 current_function_decl = decl;
8600 init_function_start (decl);
8601 first_function_block_is_cold = false;
8602 /* Make sure unwind info is emitted for the thunk if needed. */
8603 final_start_function (emit_barrier (), asm_out_file, 1);
8605 /* Pad stack IP move with 4 instructions (two NOPs count
8606 as one instruction). */
8607 if (TARGET_PAD_SHORT_FUNCTION)
8612 fputs ("\tnop\n", asm_out_file);
8615 xops[0] = gen_rtx_REG (Pmode, regno);
8616 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8617 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
8618 fputs ("\tret\n", asm_out_file);
8619 final_end_function ();
8620 init_insn_lengths ();
8621 free_after_compilation (cfun);
8623 current_function_decl = NULL;
8626 if (flag_split_stack)
8627 file_end_indicate_split_stack ();
8630 /* Emit code for the SET_GOT patterns. */
8633 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
8639 if (TARGET_VXWORKS_RTP && flag_pic)
8641 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
8642 xops[2] = gen_rtx_MEM (Pmode,
8643 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
8644 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
8646 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
8647 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
8648 an unadorned address. */
8649 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
8650 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
8651 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
8655 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
8657 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
8659 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
8662 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
8665 output_asm_insn ("call\t%a2", xops);
8666 #ifdef DWARF2_UNWIND_INFO
8667 /* The call to next label acts as a push. */
8668 if (dwarf2out_do_frame ())
8672 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8673 gen_rtx_PLUS (Pmode,
8676 RTX_FRAME_RELATED_P (insn) = 1;
8677 dwarf2out_frame_debug (insn, true);
8684 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
8685 is what will be referenced by the Mach-O PIC subsystem. */
8687 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
8690 targetm.asm_out.internal_label (asm_out_file, "L",
8691 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
8695 output_asm_insn ("pop%z0\t%0", xops);
8696 #ifdef DWARF2_UNWIND_INFO
8697 /* The pop is a pop and clobbers dest, but doesn't restore it
8698 for unwind info purposes. */
8699 if (dwarf2out_do_frame ())
8703 insn = emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
8704 dwarf2out_frame_debug (insn, true);
8705 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8706 gen_rtx_PLUS (Pmode,
8709 RTX_FRAME_RELATED_P (insn) = 1;
8710 dwarf2out_frame_debug (insn, true);
8719 get_pc_thunk_name (name, REGNO (dest));
8720 pic_labels_used |= 1 << REGNO (dest);
8722 #ifdef DWARF2_UNWIND_INFO
8723 /* Ensure all queued register saves are flushed before the
8725 if (dwarf2out_do_frame ())
8726 dwarf2out_flush_queued_reg_saves ();
8728 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
8729 xops[2] = gen_rtx_MEM (QImode, xops[2]);
8730 output_asm_insn ("call\t%X2", xops);
8731 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
8732 is what will be referenced by the Mach-O PIC subsystem. */
8735 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
8737 targetm.asm_out.internal_label (asm_out_file, "L",
8738 CODE_LABEL_NUMBER (label));
8745 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
8746 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
8748 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
8753 /* Generate an "push" pattern for input ARG. */
8758 struct machine_function *m = cfun->machine;
8760 if (m->fs.cfa_reg == stack_pointer_rtx)
8761 m->fs.cfa_offset += UNITS_PER_WORD;
8762 m->fs.sp_offset += UNITS_PER_WORD;
8764 return gen_rtx_SET (VOIDmode,
8766 gen_rtx_PRE_DEC (Pmode,
8767 stack_pointer_rtx)),
8771 /* Generate an "pop" pattern for input ARG. */
8776 return gen_rtx_SET (VOIDmode,
8779 gen_rtx_POST_INC (Pmode,
8780 stack_pointer_rtx)));
8783 /* Return >= 0 if there is an unused call-clobbered register available
8784 for the entire function. */
8787 ix86_select_alt_pic_regnum (void)
8789 if (current_function_is_leaf
8791 && !ix86_current_function_calls_tls_descriptor)
8794 /* Can't use the same register for both PIC and DRAP. */
8796 drap = REGNO (crtl->drap_reg);
8799 for (i = 2; i >= 0; --i)
8800 if (i != drap && !df_regs_ever_live_p (i))
8804 return INVALID_REGNUM;
8807 /* Return 1 if we need to save REGNO. */
8809 ix86_save_reg (unsigned int regno, int maybe_eh_return)
8811 if (pic_offset_table_rtx
8812 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
8813 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8815 || crtl->calls_eh_return
8816 || crtl->uses_const_pool))
8818 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
8823 if (crtl->calls_eh_return && maybe_eh_return)
8828 unsigned test = EH_RETURN_DATA_REGNO (i);
8829 if (test == INVALID_REGNUM)
8836 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
8839 return (df_regs_ever_live_p (regno)
8840 && !call_used_regs[regno]
8841 && !fixed_regs[regno]
8842 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
8845 /* Return number of saved general prupose registers. */
8848 ix86_nsaved_regs (void)
8853 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8854 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8859 /* Return number of saved SSE registrers. */
8862 ix86_nsaved_sseregs (void)
8867 if (ix86_cfun_abi () != MS_ABI)
8869 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8870 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8875 /* Given FROM and TO register numbers, say whether this elimination is
8876 allowed. If stack alignment is needed, we can only replace argument
8877 pointer with hard frame pointer, or replace frame pointer with stack
8878 pointer. Otherwise, frame pointer elimination is automatically
8879 handled and all other eliminations are valid. */
8882 ix86_can_eliminate (const int from, const int to)
8884 if (stack_realign_fp)
8885 return ((from == ARG_POINTER_REGNUM
8886 && to == HARD_FRAME_POINTER_REGNUM)
8887 || (from == FRAME_POINTER_REGNUM
8888 && to == STACK_POINTER_REGNUM));
8890 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
8893 /* Return the offset between two registers, one to be eliminated, and the other
8894 its replacement, at the start of a routine. */
8897 ix86_initial_elimination_offset (int from, int to)
8899 struct ix86_frame frame;
8900 ix86_compute_frame_layout (&frame);
8902 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
8903 return frame.hard_frame_pointer_offset;
8904 else if (from == FRAME_POINTER_REGNUM
8905 && to == HARD_FRAME_POINTER_REGNUM)
8906 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
8909 gcc_assert (to == STACK_POINTER_REGNUM);
8911 if (from == ARG_POINTER_REGNUM)
8912 return frame.stack_pointer_offset;
8914 gcc_assert (from == FRAME_POINTER_REGNUM);
8915 return frame.stack_pointer_offset - frame.frame_pointer_offset;
8919 /* In a dynamically-aligned function, we can't know the offset from
8920 stack pointer to frame pointer, so we must ensure that setjmp
8921 eliminates fp against the hard fp (%ebp) rather than trying to
8922 index from %esp up to the top of the frame across a gap that is
8923 of unknown (at compile-time) size. */
8925 ix86_builtin_setjmp_frame_value (void)
8927 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
8930 /* On the x86 -fsplit-stack and -fstack-protector both use the same
8931 field in the TCB, so they can not be used together. */
8934 ix86_supports_split_stack (bool report ATTRIBUTE_UNUSED)
8938 #ifndef TARGET_THREAD_SPLIT_STACK_OFFSET
8940 error ("%<-fsplit-stack%> currently only supported on GNU/Linux");
8943 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
8946 error ("%<-fsplit-stack%> requires "
8947 "assembler support for CFI directives");
8955 /* When using -fsplit-stack, the allocation routines set a field in
8956 the TCB to the bottom of the stack plus this much space, measured
8959 #define SPLIT_STACK_AVAILABLE 256
8961 /* Fill structure ix86_frame about frame of currently computed function. */
8964 ix86_compute_frame_layout (struct ix86_frame *frame)
8966 unsigned int stack_alignment_needed;
8967 HOST_WIDE_INT offset;
8968 unsigned int preferred_alignment;
8969 HOST_WIDE_INT size = get_frame_size ();
8970 HOST_WIDE_INT to_allocate;
8972 frame->nregs = ix86_nsaved_regs ();
8973 frame->nsseregs = ix86_nsaved_sseregs ();
8975 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
8976 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
8978 /* MS ABI seem to require stack alignment to be always 16 except for function
8979 prologues and leaf. */
8980 if ((ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
8981 && (!current_function_is_leaf || cfun->calls_alloca != 0
8982 || ix86_current_function_calls_tls_descriptor))
8984 preferred_alignment = 16;
8985 stack_alignment_needed = 16;
8986 crtl->preferred_stack_boundary = 128;
8987 crtl->stack_alignment_needed = 128;
8990 gcc_assert (!size || stack_alignment_needed);
8991 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
8992 gcc_assert (preferred_alignment <= stack_alignment_needed);
8994 /* For SEH we have to limit the amount of code movement into the prologue.
8995 At present we do this via a BLOCKAGE, at which point there's very little
8996 scheduling that can be done, which means that there's very little point
8997 in doing anything except PUSHs. */
8999 cfun->machine->use_fast_prologue_epilogue = false;
9001 /* During reload iteration the amount of registers saved can change.
9002 Recompute the value as needed. Do not recompute when amount of registers
9003 didn't change as reload does multiple calls to the function and does not
9004 expect the decision to change within single iteration. */
9005 else if (!optimize_function_for_size_p (cfun)
9006 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
9008 int count = frame->nregs;
9009 struct cgraph_node *node = cgraph_node (current_function_decl);
9011 cfun->machine->use_fast_prologue_epilogue_nregs = count;
9013 /* The fast prologue uses move instead of push to save registers. This
9014 is significantly longer, but also executes faster as modern hardware
9015 can execute the moves in parallel, but can't do that for push/pop.
9017 Be careful about choosing what prologue to emit: When function takes
9018 many instructions to execute we may use slow version as well as in
9019 case function is known to be outside hot spot (this is known with
9020 feedback only). Weight the size of function by number of registers
9021 to save as it is cheap to use one or two push instructions but very
9022 slow to use many of them. */
9024 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
9025 if (node->frequency < NODE_FREQUENCY_NORMAL
9026 || (flag_branch_probabilities
9027 && node->frequency < NODE_FREQUENCY_HOT))
9028 cfun->machine->use_fast_prologue_epilogue = false;
9030 cfun->machine->use_fast_prologue_epilogue
9031 = !expensive_function_p (count);
9033 if (TARGET_PROLOGUE_USING_MOVE
9034 && cfun->machine->use_fast_prologue_epilogue)
9035 frame->save_regs_using_mov = true;
9037 frame->save_regs_using_mov = false;
9039 /* If static stack checking is enabled and done with probes, the registers
9040 need to be saved before allocating the frame. */
9041 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
9042 frame->save_regs_using_mov = false;
9044 /* Skip return address. */
9045 offset = UNITS_PER_WORD;
9047 /* Skip pushed static chain. */
9048 if (ix86_static_chain_on_stack)
9049 offset += UNITS_PER_WORD;
9051 /* Skip saved base pointer. */
9052 if (frame_pointer_needed)
9053 offset += UNITS_PER_WORD;
9054 frame->hfp_save_offset = offset;
9056 /* The traditional frame pointer location is at the top of the frame. */
9057 frame->hard_frame_pointer_offset = offset;
9059 /* Register save area */
9060 offset += frame->nregs * UNITS_PER_WORD;
9061 frame->reg_save_offset = offset;
9063 /* Align and set SSE register save area. */
9064 if (frame->nsseregs)
9066 /* The only ABI that has saved SSE registers (Win64) also has a
9067 16-byte aligned default stack, and thus we don't need to be
9068 within the re-aligned local stack frame to save them. */
9069 gcc_assert (INCOMING_STACK_BOUNDARY >= 128);
9070 offset = (offset + 16 - 1) & -16;
9071 offset += frame->nsseregs * 16;
9073 frame->sse_reg_save_offset = offset;
9075 /* The re-aligned stack starts here. Values before this point are not
9076 directly comparable with values below this point. In order to make
9077 sure that no value happens to be the same before and after, force
9078 the alignment computation below to add a non-zero value. */
9079 if (stack_realign_fp)
9080 offset = (offset + stack_alignment_needed) & -stack_alignment_needed;
9083 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
9084 offset += frame->va_arg_size;
9086 /* Align start of frame for local function. */
9087 offset = (offset + stack_alignment_needed - 1) & -stack_alignment_needed;
9089 /* Frame pointer points here. */
9090 frame->frame_pointer_offset = offset;
9094 /* Add outgoing arguments area. Can be skipped if we eliminated
9095 all the function calls as dead code.
9096 Skipping is however impossible when function calls alloca. Alloca
9097 expander assumes that last crtl->outgoing_args_size
9098 of stack frame are unused. */
9099 if (ACCUMULATE_OUTGOING_ARGS
9100 && (!current_function_is_leaf || cfun->calls_alloca
9101 || ix86_current_function_calls_tls_descriptor))
9103 offset += crtl->outgoing_args_size;
9104 frame->outgoing_arguments_size = crtl->outgoing_args_size;
9107 frame->outgoing_arguments_size = 0;
9109 /* Align stack boundary. Only needed if we're calling another function
9111 if (!current_function_is_leaf || cfun->calls_alloca
9112 || ix86_current_function_calls_tls_descriptor)
9113 offset = (offset + preferred_alignment - 1) & -preferred_alignment;
9115 /* We've reached end of stack frame. */
9116 frame->stack_pointer_offset = offset;
9118 /* Size prologue needs to allocate. */
9119 to_allocate = offset - frame->sse_reg_save_offset;
9121 if ((!to_allocate && frame->nregs <= 1)
9122 || (TARGET_64BIT && to_allocate >= (HOST_WIDE_INT) 0x80000000))
9123 frame->save_regs_using_mov = false;
9125 if (ix86_using_red_zone ()
9126 && current_function_sp_is_unchanging
9127 && current_function_is_leaf
9128 && !ix86_current_function_calls_tls_descriptor)
9130 frame->red_zone_size = to_allocate;
9131 if (frame->save_regs_using_mov)
9132 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
9133 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
9134 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
9137 frame->red_zone_size = 0;
9138 frame->stack_pointer_offset -= frame->red_zone_size;
9140 /* The SEH frame pointer location is near the bottom of the frame.
9141 This is enforced by the fact that the difference between the
9142 stack pointer and the frame pointer is limited to 240 bytes in
9143 the unwind data structure. */
9148 /* If we can leave the frame pointer where it is, do so. */
9149 diff = frame->stack_pointer_offset - frame->hard_frame_pointer_offset;
9150 if (diff > 240 || (diff & 15) != 0)
9152 /* Ideally we'd determine what portion of the local stack frame
9153 (within the constraint of the lowest 240) is most heavily used.
9154 But without that complication, simply bias the frame pointer
9155 by 128 bytes so as to maximize the amount of the local stack
9156 frame that is addressable with 8-bit offsets. */
9157 frame->hard_frame_pointer_offset = frame->stack_pointer_offset - 128;
9162 /* This is semi-inlined memory_address_length, but simplified
9163 since we know that we're always dealing with reg+offset, and
9164 to avoid having to create and discard all that rtl. */
9167 choose_baseaddr_len (unsigned int regno, HOST_WIDE_INT offset)
9173 /* EBP and R13 cannot be encoded without an offset. */
9174 len = (regno == BP_REG || regno == R13_REG);
9176 else if (IN_RANGE (offset, -128, 127))
9179 /* ESP and R12 must be encoded with a SIB byte. */
9180 if (regno == SP_REG || regno == R12_REG)
9186 /* Return an RTX that points to CFA_OFFSET within the stack frame.
9187 The valid base registers are taken from CFUN->MACHINE->FS. */
9190 choose_baseaddr (HOST_WIDE_INT cfa_offset)
9192 const struct machine_function *m = cfun->machine;
9193 rtx base_reg = NULL;
9194 HOST_WIDE_INT base_offset = 0;
9196 if (m->use_fast_prologue_epilogue)
9198 /* Choose the base register most likely to allow the most scheduling
9199 opportunities. Generally FP is valid througout the function,
9200 while DRAP must be reloaded within the epilogue. But choose either
9201 over the SP due to increased encoding size. */
9205 base_reg = hard_frame_pointer_rtx;
9206 base_offset = m->fs.fp_offset - cfa_offset;
9208 else if (m->fs.drap_valid)
9210 base_reg = crtl->drap_reg;
9211 base_offset = 0 - cfa_offset;
9213 else if (m->fs.sp_valid)
9215 base_reg = stack_pointer_rtx;
9216 base_offset = m->fs.sp_offset - cfa_offset;
9221 HOST_WIDE_INT toffset;
9224 /* Choose the base register with the smallest address encoding.
9225 With a tie, choose FP > DRAP > SP. */
9228 base_reg = stack_pointer_rtx;
9229 base_offset = m->fs.sp_offset - cfa_offset;
9230 len = choose_baseaddr_len (STACK_POINTER_REGNUM, base_offset);
9232 if (m->fs.drap_valid)
9234 toffset = 0 - cfa_offset;
9235 tlen = choose_baseaddr_len (REGNO (crtl->drap_reg), toffset);
9238 base_reg = crtl->drap_reg;
9239 base_offset = toffset;
9245 toffset = m->fs.fp_offset - cfa_offset;
9246 tlen = choose_baseaddr_len (HARD_FRAME_POINTER_REGNUM, toffset);
9249 base_reg = hard_frame_pointer_rtx;
9250 base_offset = toffset;
9255 gcc_assert (base_reg != NULL);
9257 return plus_constant (base_reg, base_offset);
9260 /* Emit code to save registers in the prologue. */
9263 ix86_emit_save_regs (void)
9268 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
9269 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9271 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
9272 RTX_FRAME_RELATED_P (insn) = 1;
9276 /* Emit a single register save at CFA - CFA_OFFSET. */
9279 ix86_emit_save_reg_using_mov (enum machine_mode mode, unsigned int regno,
9280 HOST_WIDE_INT cfa_offset)
9282 struct machine_function *m = cfun->machine;
9283 rtx reg = gen_rtx_REG (mode, regno);
9284 rtx mem, addr, base, insn;
9286 addr = choose_baseaddr (cfa_offset);
9287 mem = gen_frame_mem (mode, addr);
9289 /* For SSE saves, we need to indicate the 128-bit alignment. */
9290 set_mem_align (mem, GET_MODE_ALIGNMENT (mode));
9292 insn = emit_move_insn (mem, reg);
9293 RTX_FRAME_RELATED_P (insn) = 1;
9296 if (GET_CODE (base) == PLUS)
9297 base = XEXP (base, 0);
9298 gcc_checking_assert (REG_P (base));
9300 /* When saving registers into a re-aligned local stack frame, avoid
9301 any tricky guessing by dwarf2out. */
9302 if (m->fs.realigned)
9304 gcc_checking_assert (stack_realign_drap);
9306 if (regno == REGNO (crtl->drap_reg))
9308 /* A bit of a hack. We force the DRAP register to be saved in
9309 the re-aligned stack frame, which provides us with a copy
9310 of the CFA that will last past the prologue. Install it. */
9311 gcc_checking_assert (cfun->machine->fs.fp_valid);
9312 addr = plus_constant (hard_frame_pointer_rtx,
9313 cfun->machine->fs.fp_offset - cfa_offset);
9314 mem = gen_rtx_MEM (mode, addr);
9315 add_reg_note (insn, REG_CFA_DEF_CFA, mem);
9319 /* The frame pointer is a stable reference within the
9320 aligned frame. Use it. */
9321 gcc_checking_assert (cfun->machine->fs.fp_valid);
9322 addr = plus_constant (hard_frame_pointer_rtx,
9323 cfun->machine->fs.fp_offset - cfa_offset);
9324 mem = gen_rtx_MEM (mode, addr);
9325 add_reg_note (insn, REG_CFA_EXPRESSION,
9326 gen_rtx_SET (VOIDmode, mem, reg));
9330 /* The memory may not be relative to the current CFA register,
9331 which means that we may need to generate a new pattern for
9332 use by the unwind info. */
9333 else if (base != m->fs.cfa_reg)
9335 addr = plus_constant (m->fs.cfa_reg, m->fs.cfa_offset - cfa_offset);
9336 mem = gen_rtx_MEM (mode, addr);
9337 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (VOIDmode, mem, reg));
9341 /* Emit code to save registers using MOV insns.
9342 First register is stored at CFA - CFA_OFFSET. */
9344 ix86_emit_save_regs_using_mov (HOST_WIDE_INT cfa_offset)
9348 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9349 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9351 ix86_emit_save_reg_using_mov (Pmode, regno, cfa_offset);
9352 cfa_offset -= UNITS_PER_WORD;
9356 /* Emit code to save SSE registers using MOV insns.
9357 First register is stored at CFA - CFA_OFFSET. */
9359 ix86_emit_save_sse_regs_using_mov (HOST_WIDE_INT cfa_offset)
9363 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9364 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9366 ix86_emit_save_reg_using_mov (V4SFmode, regno, cfa_offset);
9371 static GTY(()) rtx queued_cfa_restores;
9373 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
9374 manipulation insn. The value is on the stack at CFA - CFA_OFFSET.
9375 Don't add the note if the previously saved value will be left untouched
9376 within stack red-zone till return, as unwinders can find the same value
9377 in the register and on the stack. */
9380 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT cfa_offset)
9382 if (cfa_offset <= cfun->machine->fs.red_zone_offset)
9387 add_reg_note (insn, REG_CFA_RESTORE, reg);
9388 RTX_FRAME_RELATED_P (insn) = 1;
9392 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
9395 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
9398 ix86_add_queued_cfa_restore_notes (rtx insn)
9401 if (!queued_cfa_restores)
9403 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
9405 XEXP (last, 1) = REG_NOTES (insn);
9406 REG_NOTES (insn) = queued_cfa_restores;
9407 queued_cfa_restores = NULL_RTX;
9408 RTX_FRAME_RELATED_P (insn) = 1;
9411 /* Expand prologue or epilogue stack adjustment.
9412 The pattern exist to put a dependency on all ebp-based memory accesses.
9413 STYLE should be negative if instructions should be marked as frame related,
9414 zero if %r11 register is live and cannot be freely used and positive
9418 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
9419 int style, bool set_cfa)
9421 struct machine_function *m = cfun->machine;
9423 bool add_frame_related_expr = false;
9426 insn = gen_pro_epilogue_adjust_stack_si_add (dest, src, offset);
9427 else if (x86_64_immediate_operand (offset, DImode))
9428 insn = gen_pro_epilogue_adjust_stack_di_add (dest, src, offset);
9432 /* r11 is used by indirect sibcall return as well, set before the
9433 epilogue and used after the epilogue. */
9435 tmp = gen_rtx_REG (DImode, R11_REG);
9438 gcc_assert (src != hard_frame_pointer_rtx
9439 && dest != hard_frame_pointer_rtx);
9440 tmp = hard_frame_pointer_rtx;
9442 insn = emit_insn (gen_rtx_SET (DImode, tmp, offset));
9444 add_frame_related_expr = true;
9446 insn = gen_pro_epilogue_adjust_stack_di_add (dest, src, tmp);
9449 insn = emit_insn (insn);
9451 ix86_add_queued_cfa_restore_notes (insn);
9457 gcc_assert (m->fs.cfa_reg == src);
9458 m->fs.cfa_offset += INTVAL (offset);
9459 m->fs.cfa_reg = dest;
9461 r = gen_rtx_PLUS (Pmode, src, offset);
9462 r = gen_rtx_SET (VOIDmode, dest, r);
9463 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9464 RTX_FRAME_RELATED_P (insn) = 1;
9468 RTX_FRAME_RELATED_P (insn) = 1;
9469 if (add_frame_related_expr)
9471 rtx r = gen_rtx_PLUS (Pmode, src, offset);
9472 r = gen_rtx_SET (VOIDmode, dest, r);
9473 add_reg_note (insn, REG_FRAME_RELATED_EXPR, r);
9477 if (dest == stack_pointer_rtx)
9479 HOST_WIDE_INT ooffset = m->fs.sp_offset;
9480 bool valid = m->fs.sp_valid;
9482 if (src == hard_frame_pointer_rtx)
9484 valid = m->fs.fp_valid;
9485 ooffset = m->fs.fp_offset;
9487 else if (src == crtl->drap_reg)
9489 valid = m->fs.drap_valid;
9494 /* Else there are two possibilities: SP itself, which we set
9495 up as the default above. Or EH_RETURN_STACKADJ_RTX, which is
9496 taken care of this by hand along the eh_return path. */
9497 gcc_checking_assert (src == stack_pointer_rtx
9498 || offset == const0_rtx);
9501 m->fs.sp_offset = ooffset - INTVAL (offset);
9502 m->fs.sp_valid = valid;
9506 /* Find an available register to be used as dynamic realign argument
9507 pointer regsiter. Such a register will be written in prologue and
9508 used in begin of body, so it must not be
9509 1. parameter passing register.
9511 We reuse static-chain register if it is available. Otherwise, we
9512 use DI for i386 and R13 for x86-64. We chose R13 since it has
9515 Return: the regno of chosen register. */
9518 find_drap_reg (void)
9520 tree decl = cfun->decl;
9524 /* Use R13 for nested function or function need static chain.
9525 Since function with tail call may use any caller-saved
9526 registers in epilogue, DRAP must not use caller-saved
9527 register in such case. */
9528 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
9535 /* Use DI for nested function or function need static chain.
9536 Since function with tail call may use any caller-saved
9537 registers in epilogue, DRAP must not use caller-saved
9538 register in such case. */
9539 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
9542 /* Reuse static chain register if it isn't used for parameter
9544 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
9545 && !lookup_attribute ("fastcall",
9546 TYPE_ATTRIBUTES (TREE_TYPE (decl)))
9547 && !lookup_attribute ("thiscall",
9548 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
9555 /* Return minimum incoming stack alignment. */
9558 ix86_minimum_incoming_stack_boundary (bool sibcall)
9560 unsigned int incoming_stack_boundary;
9562 /* Prefer the one specified at command line. */
9563 if (ix86_user_incoming_stack_boundary)
9564 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
9565 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
9566 if -mstackrealign is used, it isn't used for sibcall check and
9567 estimated stack alignment is 128bit. */
9570 && ix86_force_align_arg_pointer
9571 && crtl->stack_alignment_estimated == 128)
9572 incoming_stack_boundary = MIN_STACK_BOUNDARY;
9574 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
9576 /* Incoming stack alignment can be changed on individual functions
9577 via force_align_arg_pointer attribute. We use the smallest
9578 incoming stack boundary. */
9579 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
9580 && lookup_attribute (ix86_force_align_arg_pointer_string,
9581 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
9582 incoming_stack_boundary = MIN_STACK_BOUNDARY;
9584 /* The incoming stack frame has to be aligned at least at
9585 parm_stack_boundary. */
9586 if (incoming_stack_boundary < crtl->parm_stack_boundary)
9587 incoming_stack_boundary = crtl->parm_stack_boundary;
9589 /* Stack at entrance of main is aligned by runtime. We use the
9590 smallest incoming stack boundary. */
9591 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
9592 && DECL_NAME (current_function_decl)
9593 && MAIN_NAME_P (DECL_NAME (current_function_decl))
9594 && DECL_FILE_SCOPE_P (current_function_decl))
9595 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
9597 return incoming_stack_boundary;
9600 /* Update incoming stack boundary and estimated stack alignment. */
9603 ix86_update_stack_boundary (void)
9605 ix86_incoming_stack_boundary
9606 = ix86_minimum_incoming_stack_boundary (false);
9608 /* x86_64 vararg needs 16byte stack alignment for register save
9612 && crtl->stack_alignment_estimated < 128)
9613 crtl->stack_alignment_estimated = 128;
9616 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
9617 needed or an rtx for DRAP otherwise. */
9620 ix86_get_drap_rtx (void)
9622 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
9623 crtl->need_drap = true;
9625 if (stack_realign_drap)
9627 /* Assign DRAP to vDRAP and returns vDRAP */
9628 unsigned int regno = find_drap_reg ();
9633 arg_ptr = gen_rtx_REG (Pmode, regno);
9634 crtl->drap_reg = arg_ptr;
9637 drap_vreg = copy_to_reg (arg_ptr);
9641 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
9644 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
9645 RTX_FRAME_RELATED_P (insn) = 1;
9653 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
9656 ix86_internal_arg_pointer (void)
9658 return virtual_incoming_args_rtx;
9661 struct scratch_reg {
9666 /* Return a short-lived scratch register for use on function entry.
9667 In 32-bit mode, it is valid only after the registers are saved
9668 in the prologue. This register must be released by means of
9669 release_scratch_register_on_entry once it is dead. */
9672 get_scratch_register_on_entry (struct scratch_reg *sr)
9680 /* We always use R11 in 64-bit mode. */
9685 tree decl = current_function_decl, fntype = TREE_TYPE (decl);
9687 = lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)) != NULL_TREE;
9688 bool static_chain_p = DECL_STATIC_CHAIN (decl);
9689 int regparm = ix86_function_regparm (fntype, decl);
9691 = crtl->drap_reg ? REGNO (crtl->drap_reg) : INVALID_REGNUM;
9693 /* 'fastcall' sets regparm to 2, uses ecx/edx for arguments and eax
9694 for the static chain register. */
9695 if ((regparm < 1 || (fastcall_p && !static_chain_p))
9696 && drap_regno != AX_REG)
9698 else if (regparm < 2 && drap_regno != DX_REG)
9700 /* ecx is the static chain register. */
9701 else if (regparm < 3 && !fastcall_p && !static_chain_p
9702 && drap_regno != CX_REG)
9704 else if (ix86_save_reg (BX_REG, true))
9706 /* esi is the static chain register. */
9707 else if (!(regparm == 3 && static_chain_p)
9708 && ix86_save_reg (SI_REG, true))
9710 else if (ix86_save_reg (DI_REG, true))
9714 regno = (drap_regno == AX_REG ? DX_REG : AX_REG);
9719 sr->reg = gen_rtx_REG (Pmode, regno);
9722 rtx insn = emit_insn (gen_push (sr->reg));
9723 RTX_FRAME_RELATED_P (insn) = 1;
9727 /* Release a scratch register obtained from the preceding function. */
9730 release_scratch_register_on_entry (struct scratch_reg *sr)
9734 rtx x, insn = emit_insn (gen_pop (sr->reg));
9736 /* The RTX_FRAME_RELATED_P mechanism doesn't know about pop. */
9737 RTX_FRAME_RELATED_P (insn) = 1;
9738 x = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (UNITS_PER_WORD));
9739 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
9740 add_reg_note (insn, REG_FRAME_RELATED_EXPR, x);
9744 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
9746 /* Emit code to adjust the stack pointer by SIZE bytes while probing it. */
9749 ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
9751 /* We skip the probe for the first interval + a small dope of 4 words and
9752 probe that many bytes past the specified size to maintain a protection
9753 area at the botton of the stack. */
9754 const int dope = 4 * UNITS_PER_WORD;
9755 rtx size_rtx = GEN_INT (size);
9757 /* See if we have a constant small number of probes to generate. If so,
9758 that's the easy case. The run-time loop is made up of 11 insns in the
9759 generic case while the compile-time loop is made up of 3+2*(n-1) insns
9760 for n # of intervals. */
9761 if (size <= 5 * PROBE_INTERVAL)
9763 HOST_WIDE_INT i, adjust;
9764 bool first_probe = true;
9766 /* Adjust SP and probe at PROBE_INTERVAL + N * PROBE_INTERVAL for
9767 values of N from 1 until it exceeds SIZE. If only one probe is
9768 needed, this will not generate any code. Then adjust and probe
9769 to PROBE_INTERVAL + SIZE. */
9770 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
9774 adjust = 2 * PROBE_INTERVAL + dope;
9775 first_probe = false;
9778 adjust = PROBE_INTERVAL;
9780 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9781 plus_constant (stack_pointer_rtx, -adjust)));
9782 emit_stack_probe (stack_pointer_rtx);
9786 adjust = size + PROBE_INTERVAL + dope;
9788 adjust = size + PROBE_INTERVAL - i;
9790 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9791 plus_constant (stack_pointer_rtx, -adjust)));
9792 emit_stack_probe (stack_pointer_rtx);
9794 /* Adjust back to account for the additional first interval. */
9795 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9796 plus_constant (stack_pointer_rtx,
9797 PROBE_INTERVAL + dope)));
9800 /* Otherwise, do the same as above, but in a loop. Note that we must be
9801 extra careful with variables wrapping around because we might be at
9802 the very top (or the very bottom) of the address space and we have
9803 to be able to handle this case properly; in particular, we use an
9804 equality test for the loop condition. */
9807 HOST_WIDE_INT rounded_size;
9808 struct scratch_reg sr;
9810 get_scratch_register_on_entry (&sr);
9813 /* Step 1: round SIZE to the previous multiple of the interval. */
9815 rounded_size = size & -PROBE_INTERVAL;
9818 /* Step 2: compute initial and final value of the loop counter. */
9820 /* SP = SP_0 + PROBE_INTERVAL. */
9821 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9822 plus_constant (stack_pointer_rtx,
9823 - (PROBE_INTERVAL + dope))));
9825 /* LAST_ADDR = SP_0 + PROBE_INTERVAL + ROUNDED_SIZE. */
9826 emit_move_insn (sr.reg, GEN_INT (-rounded_size));
9827 emit_insn (gen_rtx_SET (VOIDmode, sr.reg,
9828 gen_rtx_PLUS (Pmode, sr.reg,
9829 stack_pointer_rtx)));
9834 while (SP != LAST_ADDR)
9836 SP = SP + PROBE_INTERVAL
9840 adjusts SP and probes to PROBE_INTERVAL + N * PROBE_INTERVAL for
9841 values of N from 1 until it is equal to ROUNDED_SIZE. */
9843 emit_insn (ix86_gen_adjust_stack_and_probe (sr.reg, sr.reg, size_rtx));
9846 /* Step 4: adjust SP and probe at PROBE_INTERVAL + SIZE if we cannot
9847 assert at compile-time that SIZE is equal to ROUNDED_SIZE. */
9849 if (size != rounded_size)
9851 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9852 plus_constant (stack_pointer_rtx,
9853 rounded_size - size)));
9854 emit_stack_probe (stack_pointer_rtx);
9857 /* Adjust back to account for the additional first interval. */
9858 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9859 plus_constant (stack_pointer_rtx,
9860 PROBE_INTERVAL + dope)));
9862 release_scratch_register_on_entry (&sr);
9865 gcc_assert (cfun->machine->fs.cfa_reg != stack_pointer_rtx);
9866 cfun->machine->fs.sp_offset += size;
9868 /* Make sure nothing is scheduled before we are done. */
9869 emit_insn (gen_blockage ());
9872 /* Adjust the stack pointer up to REG while probing it. */
9875 output_adjust_stack_and_probe (rtx reg)
9877 static int labelno = 0;
9878 char loop_lab[32], end_lab[32];
9881 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
9882 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
9884 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
9886 /* Jump to END_LAB if SP == LAST_ADDR. */
9887 xops[0] = stack_pointer_rtx;
9889 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops);
9890 fputs ("\tje\t", asm_out_file);
9891 assemble_name_raw (asm_out_file, end_lab);
9892 fputc ('\n', asm_out_file);
9894 /* SP = SP + PROBE_INTERVAL. */
9895 xops[1] = GEN_INT (PROBE_INTERVAL);
9896 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
9899 xops[1] = const0_rtx;
9900 output_asm_insn ("or%z0\t{%1, (%0)|DWORD PTR [%0], %1}", xops);
9902 fprintf (asm_out_file, "\tjmp\t");
9903 assemble_name_raw (asm_out_file, loop_lab);
9904 fputc ('\n', asm_out_file);
9906 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
9911 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
9912 inclusive. These are offsets from the current stack pointer. */
9915 ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
9917 /* See if we have a constant small number of probes to generate. If so,
9918 that's the easy case. The run-time loop is made up of 7 insns in the
9919 generic case while the compile-time loop is made up of n insns for n #
9921 if (size <= 7 * PROBE_INTERVAL)
9925 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
9926 it exceeds SIZE. If only one probe is needed, this will not
9927 generate any code. Then probe at FIRST + SIZE. */
9928 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
9929 emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + i)));
9931 emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + size)));
9934 /* Otherwise, do the same as above, but in a loop. Note that we must be
9935 extra careful with variables wrapping around because we might be at
9936 the very top (or the very bottom) of the address space and we have
9937 to be able to handle this case properly; in particular, we use an
9938 equality test for the loop condition. */
9941 HOST_WIDE_INT rounded_size, last;
9942 struct scratch_reg sr;
9944 get_scratch_register_on_entry (&sr);
9947 /* Step 1: round SIZE to the previous multiple of the interval. */
9949 rounded_size = size & -PROBE_INTERVAL;
9952 /* Step 2: compute initial and final value of the loop counter. */
9954 /* TEST_OFFSET = FIRST. */
9955 emit_move_insn (sr.reg, GEN_INT (-first));
9957 /* LAST_OFFSET = FIRST + ROUNDED_SIZE. */
9958 last = first + rounded_size;
9963 while (TEST_ADDR != LAST_ADDR)
9965 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
9969 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
9970 until it is equal to ROUNDED_SIZE. */
9972 emit_insn (ix86_gen_probe_stack_range (sr.reg, sr.reg, GEN_INT (-last)));
9975 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
9976 that SIZE is equal to ROUNDED_SIZE. */
9978 if (size != rounded_size)
9979 emit_stack_probe (plus_constant (gen_rtx_PLUS (Pmode,
9982 rounded_size - size));
9984 release_scratch_register_on_entry (&sr);
9987 /* Make sure nothing is scheduled before we are done. */
9988 emit_insn (gen_blockage ());
9991 /* Probe a range of stack addresses from REG to END, inclusive. These are
9992 offsets from the current stack pointer. */
9995 output_probe_stack_range (rtx reg, rtx end)
9997 static int labelno = 0;
9998 char loop_lab[32], end_lab[32];
10001 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
10002 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
10004 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
10006 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
10009 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops);
10010 fputs ("\tje\t", asm_out_file);
10011 assemble_name_raw (asm_out_file, end_lab);
10012 fputc ('\n', asm_out_file);
10014 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
10015 xops[1] = GEN_INT (PROBE_INTERVAL);
10016 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
10018 /* Probe at TEST_ADDR. */
10019 xops[0] = stack_pointer_rtx;
10021 xops[2] = const0_rtx;
10022 output_asm_insn ("or%z0\t{%2, (%0,%1)|DWORD PTR [%0+%1], %2}", xops);
10024 fprintf (asm_out_file, "\tjmp\t");
10025 assemble_name_raw (asm_out_file, loop_lab);
10026 fputc ('\n', asm_out_file);
10028 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
10033 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
10034 to be generated in correct form. */
10036 ix86_finalize_stack_realign_flags (void)
10038 /* Check if stack realign is really needed after reload, and
10039 stores result in cfun */
10040 unsigned int incoming_stack_boundary
10041 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
10042 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
10043 unsigned int stack_realign = (incoming_stack_boundary
10044 < (current_function_is_leaf
10045 ? crtl->max_used_stack_slot_alignment
10046 : crtl->stack_alignment_needed));
10048 if (crtl->stack_realign_finalized)
10050 /* After stack_realign_needed is finalized, we can't no longer
10052 gcc_assert (crtl->stack_realign_needed == stack_realign);
10056 crtl->stack_realign_needed = stack_realign;
10057 crtl->stack_realign_finalized = true;
10061 /* Expand the prologue into a bunch of separate insns. */
10064 ix86_expand_prologue (void)
10066 struct machine_function *m = cfun->machine;
10069 struct ix86_frame frame;
10070 HOST_WIDE_INT allocate;
10071 bool int_registers_saved;
10073 ix86_finalize_stack_realign_flags ();
10075 /* DRAP should not coexist with stack_realign_fp */
10076 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
10078 memset (&m->fs, 0, sizeof (m->fs));
10080 /* Initialize CFA state for before the prologue. */
10081 m->fs.cfa_reg = stack_pointer_rtx;
10082 m->fs.cfa_offset = INCOMING_FRAME_SP_OFFSET;
10084 /* Track SP offset to the CFA. We continue tracking this after we've
10085 swapped the CFA register away from SP. In the case of re-alignment
10086 this is fudged; we're interested to offsets within the local frame. */
10087 m->fs.sp_offset = INCOMING_FRAME_SP_OFFSET;
10088 m->fs.sp_valid = true;
10090 ix86_compute_frame_layout (&frame);
10092 if (!TARGET_64BIT && ix86_function_ms_hook_prologue (current_function_decl))
10094 /* We should have already generated an error for any use of
10095 ms_hook on a nested function. */
10096 gcc_checking_assert (!ix86_static_chain_on_stack);
10098 /* Check if profiling is active and we shall use profiling before
10099 prologue variant. If so sorry. */
10100 if (crtl->profile && flag_fentry != 0)
10101 sorry ("ms_hook_prologue attribute isn%'t compatible "
10102 "with -mfentry for 32-bit");
10104 /* In ix86_asm_output_function_label we emitted:
10105 8b ff movl.s %edi,%edi
10107 8b ec movl.s %esp,%ebp
10109 This matches the hookable function prologue in Win32 API
10110 functions in Microsoft Windows XP Service Pack 2 and newer.
10111 Wine uses this to enable Windows apps to hook the Win32 API
10112 functions provided by Wine.
10114 What that means is that we've already set up the frame pointer. */
10116 if (frame_pointer_needed
10117 && !(crtl->drap_reg && crtl->stack_realign_needed))
10121 /* We've decided to use the frame pointer already set up.
10122 Describe this to the unwinder by pretending that both
10123 push and mov insns happen right here.
10125 Putting the unwind info here at the end of the ms_hook
10126 is done so that we can make absolutely certain we get
10127 the required byte sequence at the start of the function,
10128 rather than relying on an assembler that can produce
10129 the exact encoding required.
10131 However it does mean (in the unpatched case) that we have
10132 a 1 insn window where the asynchronous unwind info is
10133 incorrect. However, if we placed the unwind info at
10134 its correct location we would have incorrect unwind info
10135 in the patched case. Which is probably all moot since
10136 I don't expect Wine generates dwarf2 unwind info for the
10137 system libraries that use this feature. */
10139 insn = emit_insn (gen_blockage ());
10141 push = gen_push (hard_frame_pointer_rtx);
10142 mov = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
10143 stack_pointer_rtx);
10144 RTX_FRAME_RELATED_P (push) = 1;
10145 RTX_FRAME_RELATED_P (mov) = 1;
10147 RTX_FRAME_RELATED_P (insn) = 1;
10148 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10149 gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, push, mov)));
10151 /* Note that gen_push incremented m->fs.cfa_offset, even
10152 though we didn't emit the push insn here. */
10153 m->fs.cfa_reg = hard_frame_pointer_rtx;
10154 m->fs.fp_offset = m->fs.cfa_offset;
10155 m->fs.fp_valid = true;
10159 /* The frame pointer is not needed so pop %ebp again.
10160 This leaves us with a pristine state. */
10161 emit_insn (gen_pop (hard_frame_pointer_rtx));
10165 /* The first insn of a function that accepts its static chain on the
10166 stack is to push the register that would be filled in by a direct
10167 call. This insn will be skipped by the trampoline. */
10168 else if (ix86_static_chain_on_stack)
10170 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
10171 emit_insn (gen_blockage ());
10173 /* We don't want to interpret this push insn as a register save,
10174 only as a stack adjustment. The real copy of the register as
10175 a save will be done later, if needed. */
10176 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
10177 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
10178 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
10179 RTX_FRAME_RELATED_P (insn) = 1;
10182 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
10183 of DRAP is needed and stack realignment is really needed after reload */
10184 if (stack_realign_drap)
10186 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
10188 /* Only need to push parameter pointer reg if it is caller saved. */
10189 if (!call_used_regs[REGNO (crtl->drap_reg)])
10191 /* Push arg pointer reg */
10192 insn = emit_insn (gen_push (crtl->drap_reg));
10193 RTX_FRAME_RELATED_P (insn) = 1;
10196 /* Grab the argument pointer. */
10197 t = plus_constant (stack_pointer_rtx, m->fs.sp_offset);
10198 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, t));
10199 RTX_FRAME_RELATED_P (insn) = 1;
10200 m->fs.cfa_reg = crtl->drap_reg;
10201 m->fs.cfa_offset = 0;
10203 /* Align the stack. */
10204 insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
10206 GEN_INT (-align_bytes)));
10207 RTX_FRAME_RELATED_P (insn) = 1;
10209 /* Replicate the return address on the stack so that return
10210 address can be reached via (argp - 1) slot. This is needed
10211 to implement macro RETURN_ADDR_RTX and intrinsic function
10212 expand_builtin_return_addr etc. */
10213 t = plus_constant (crtl->drap_reg, -UNITS_PER_WORD);
10214 t = gen_frame_mem (Pmode, t);
10215 insn = emit_insn (gen_push (t));
10216 RTX_FRAME_RELATED_P (insn) = 1;
10218 /* For the purposes of frame and register save area addressing,
10219 we've started over with a new frame. */
10220 m->fs.sp_offset = INCOMING_FRAME_SP_OFFSET;
10221 m->fs.realigned = true;
10224 if (frame_pointer_needed && !m->fs.fp_valid)
10226 /* Note: AT&T enter does NOT have reversed args. Enter is probably
10227 slower on all targets. Also sdb doesn't like it. */
10228 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
10229 RTX_FRAME_RELATED_P (insn) = 1;
10231 if (m->fs.sp_offset == frame.hard_frame_pointer_offset)
10233 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
10234 RTX_FRAME_RELATED_P (insn) = 1;
10236 if (m->fs.cfa_reg == stack_pointer_rtx)
10237 m->fs.cfa_reg = hard_frame_pointer_rtx;
10238 m->fs.fp_offset = m->fs.sp_offset;
10239 m->fs.fp_valid = true;
10243 int_registers_saved = (frame.nregs == 0);
10245 if (!int_registers_saved)
10247 /* If saving registers via PUSH, do so now. */
10248 if (!frame.save_regs_using_mov)
10250 ix86_emit_save_regs ();
10251 int_registers_saved = true;
10252 gcc_assert (m->fs.sp_offset == frame.reg_save_offset);
10255 /* When using red zone we may start register saving before allocating
10256 the stack frame saving one cycle of the prologue. However, avoid
10257 doing this if we have to probe the stack; at least on x86_64 the
10258 stack probe can turn into a call that clobbers a red zone location. */
10259 else if (ix86_using_red_zone ()
10260 && (! TARGET_STACK_PROBE
10261 || frame.stack_pointer_offset < CHECK_STACK_LIMIT))
10263 ix86_emit_save_regs_using_mov (frame.reg_save_offset);
10264 int_registers_saved = true;
10268 if (stack_realign_fp)
10270 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
10271 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
10273 /* The computation of the size of the re-aligned stack frame means
10274 that we must allocate the size of the register save area before
10275 performing the actual alignment. Otherwise we cannot guarantee
10276 that there's enough storage above the realignment point. */
10277 if (m->fs.sp_offset != frame.sse_reg_save_offset)
10278 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10279 GEN_INT (m->fs.sp_offset
10280 - frame.sse_reg_save_offset),
10283 /* Align the stack. */
10284 insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
10286 GEN_INT (-align_bytes)));
10288 /* For the purposes of register save area addressing, the stack
10289 pointer is no longer valid. As for the value of sp_offset,
10290 see ix86_compute_frame_layout, which we need to match in order
10291 to pass verification of stack_pointer_offset at the end. */
10292 m->fs.sp_offset = (m->fs.sp_offset + align_bytes) & -align_bytes;
10293 m->fs.sp_valid = false;
10296 allocate = frame.stack_pointer_offset - m->fs.sp_offset;
10298 if (flag_stack_usage)
10300 /* We start to count from ARG_POINTER. */
10301 HOST_WIDE_INT stack_size = frame.stack_pointer_offset;
10303 /* If it was realigned, take into account the fake frame. */
10304 if (stack_realign_drap)
10306 if (ix86_static_chain_on_stack)
10307 stack_size += UNITS_PER_WORD;
10309 if (!call_used_regs[REGNO (crtl->drap_reg)])
10310 stack_size += UNITS_PER_WORD;
10312 /* This over-estimates by 1 minimal-stack-alignment-unit but
10313 mitigates that by counting in the new return address slot. */
10314 current_function_dynamic_stack_size
10315 += crtl->stack_alignment_needed / BITS_PER_UNIT;
10318 current_function_static_stack_size = stack_size;
10321 /* The stack has already been decremented by the instruction calling us
10322 so we need to probe unconditionally to preserve the protection area. */
10323 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
10325 /* We expect the registers to be saved when probes are used. */
10326 gcc_assert (int_registers_saved);
10328 if (STACK_CHECK_MOVING_SP)
10330 ix86_adjust_stack_and_probe (allocate);
10335 HOST_WIDE_INT size = allocate;
10337 if (TARGET_64BIT && size >= (HOST_WIDE_INT) 0x80000000)
10338 size = 0x80000000 - STACK_CHECK_PROTECT - 1;
10340 if (TARGET_STACK_PROBE)
10341 ix86_emit_probe_stack_range (0, size + STACK_CHECK_PROTECT);
10343 ix86_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
10349 else if (!ix86_target_stack_probe ()
10350 || frame.stack_pointer_offset < CHECK_STACK_LIMIT)
10352 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10353 GEN_INT (-allocate), -1,
10354 m->fs.cfa_reg == stack_pointer_rtx);
10358 rtx eax = gen_rtx_REG (Pmode, AX_REG);
10360 rtx (*adjust_stack_insn)(rtx, rtx, rtx);
10362 bool eax_live = false;
10363 bool r10_live = false;
10366 r10_live = (DECL_STATIC_CHAIN (current_function_decl) != 0);
10367 if (!TARGET_64BIT_MS_ABI)
10368 eax_live = ix86_eax_live_at_start_p ();
10372 emit_insn (gen_push (eax));
10373 allocate -= UNITS_PER_WORD;
10377 r10 = gen_rtx_REG (Pmode, R10_REG);
10378 emit_insn (gen_push (r10));
10379 allocate -= UNITS_PER_WORD;
10382 emit_move_insn (eax, GEN_INT (allocate));
10383 emit_insn (ix86_gen_allocate_stack_worker (eax, eax));
10385 /* Use the fact that AX still contains ALLOCATE. */
10386 adjust_stack_insn = (TARGET_64BIT
10387 ? gen_pro_epilogue_adjust_stack_di_sub
10388 : gen_pro_epilogue_adjust_stack_si_sub);
10390 insn = emit_insn (adjust_stack_insn (stack_pointer_rtx,
10391 stack_pointer_rtx, eax));
10393 /* Note that SEH directives need to continue tracking the stack
10394 pointer even after the frame pointer has been set up. */
10395 if (m->fs.cfa_reg == stack_pointer_rtx || TARGET_SEH)
10397 if (m->fs.cfa_reg == stack_pointer_rtx)
10398 m->fs.cfa_offset += allocate;
10400 RTX_FRAME_RELATED_P (insn) = 1;
10401 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10402 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10403 plus_constant (stack_pointer_rtx,
10406 m->fs.sp_offset += allocate;
10408 if (r10_live && eax_live)
10410 t = choose_baseaddr (m->fs.sp_offset - allocate);
10411 emit_move_insn (r10, gen_frame_mem (Pmode, t));
10412 t = choose_baseaddr (m->fs.sp_offset - allocate - UNITS_PER_WORD);
10413 emit_move_insn (eax, gen_frame_mem (Pmode, t));
10415 else if (eax_live || r10_live)
10417 t = choose_baseaddr (m->fs.sp_offset - allocate);
10418 emit_move_insn ((eax_live ? eax : r10), gen_frame_mem (Pmode, t));
10421 gcc_assert (m->fs.sp_offset == frame.stack_pointer_offset);
10423 /* If we havn't already set up the frame pointer, do so now. */
10424 if (frame_pointer_needed && !m->fs.fp_valid)
10426 insn = ix86_gen_add3 (hard_frame_pointer_rtx, stack_pointer_rtx,
10427 GEN_INT (frame.stack_pointer_offset
10428 - frame.hard_frame_pointer_offset));
10429 insn = emit_insn (insn);
10430 RTX_FRAME_RELATED_P (insn) = 1;
10431 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);
10433 if (m->fs.cfa_reg == stack_pointer_rtx)
10434 m->fs.cfa_reg = hard_frame_pointer_rtx;
10435 m->fs.fp_offset = frame.hard_frame_pointer_offset;
10436 m->fs.fp_valid = true;
10439 if (!int_registers_saved)
10440 ix86_emit_save_regs_using_mov (frame.reg_save_offset);
10441 if (frame.nsseregs)
10442 ix86_emit_save_sse_regs_using_mov (frame.sse_reg_save_offset);
10444 pic_reg_used = false;
10445 if (pic_offset_table_rtx
10446 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
10449 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
10451 if (alt_pic_reg_used != INVALID_REGNUM)
10452 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
10454 pic_reg_used = true;
10461 if (ix86_cmodel == CM_LARGE_PIC)
10463 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
10464 rtx label = gen_label_rtx ();
10465 emit_label (label);
10466 LABEL_PRESERVE_P (label) = 1;
10467 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
10468 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
10469 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
10470 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
10471 pic_offset_table_rtx, tmp_reg));
10474 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
10477 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
10480 /* In the pic_reg_used case, make sure that the got load isn't deleted
10481 when mcount needs it. Blockage to avoid call movement across mcount
10482 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
10484 if (crtl->profile && !flag_fentry && pic_reg_used)
10485 emit_insn (gen_prologue_use (pic_offset_table_rtx));
10487 if (crtl->drap_reg && !crtl->stack_realign_needed)
10489 /* vDRAP is setup but after reload it turns out stack realign
10490 isn't necessary, here we will emit prologue to setup DRAP
10491 without stack realign adjustment */
10492 t = choose_baseaddr (0);
10493 emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, t));
10496 /* Prevent instructions from being scheduled into register save push
10497 sequence when access to the redzone area is done through frame pointer.
10498 The offset between the frame pointer and the stack pointer is calculated
10499 relative to the value of the stack pointer at the end of the function
10500 prologue, and moving instructions that access redzone area via frame
10501 pointer inside push sequence violates this assumption. */
10502 if (frame_pointer_needed && frame.red_zone_size)
10503 emit_insn (gen_memory_blockage ());
10505 /* Emit cld instruction if stringops are used in the function. */
10506 if (TARGET_CLD && ix86_current_function_needs_cld)
10507 emit_insn (gen_cld ());
10509 /* SEH requires that the prologue end within 256 bytes of the start of
10510 the function. Prevent instruction schedules that would extend that. */
10512 emit_insn (gen_blockage ());
10515 /* Emit code to restore REG using a POP insn. */
10518 ix86_emit_restore_reg_using_pop (rtx reg)
10520 struct machine_function *m = cfun->machine;
10521 rtx insn = emit_insn (gen_pop (reg));
10523 ix86_add_cfa_restore_note (insn, reg, m->fs.sp_offset);
10524 m->fs.sp_offset -= UNITS_PER_WORD;
10526 if (m->fs.cfa_reg == crtl->drap_reg
10527 && REGNO (reg) == REGNO (crtl->drap_reg))
10529 /* Previously we'd represented the CFA as an expression
10530 like *(%ebp - 8). We've just popped that value from
10531 the stack, which means we need to reset the CFA to
10532 the drap register. This will remain until we restore
10533 the stack pointer. */
10534 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
10535 RTX_FRAME_RELATED_P (insn) = 1;
10537 /* This means that the DRAP register is valid for addressing too. */
10538 m->fs.drap_valid = true;
10542 if (m->fs.cfa_reg == stack_pointer_rtx)
10544 rtx x = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
10545 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
10546 add_reg_note (insn, REG_CFA_ADJUST_CFA, x);
10547 RTX_FRAME_RELATED_P (insn) = 1;
10549 m->fs.cfa_offset -= UNITS_PER_WORD;
10552 /* When the frame pointer is the CFA, and we pop it, we are
10553 swapping back to the stack pointer as the CFA. This happens
10554 for stack frames that don't allocate other data, so we assume
10555 the stack pointer is now pointing at the return address, i.e.
10556 the function entry state, which makes the offset be 1 word. */
10557 if (reg == hard_frame_pointer_rtx)
10559 m->fs.fp_valid = false;
10560 if (m->fs.cfa_reg == hard_frame_pointer_rtx)
10562 m->fs.cfa_reg = stack_pointer_rtx;
10563 m->fs.cfa_offset -= UNITS_PER_WORD;
10565 add_reg_note (insn, REG_CFA_DEF_CFA,
10566 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10567 GEN_INT (m->fs.cfa_offset)));
10568 RTX_FRAME_RELATED_P (insn) = 1;
10573 /* Emit code to restore saved registers using POP insns. */
10576 ix86_emit_restore_regs_using_pop (void)
10578 unsigned int regno;
10580 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10581 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
10582 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno));
10585 /* Emit code and notes for the LEAVE instruction. */
10588 ix86_emit_leave (void)
10590 struct machine_function *m = cfun->machine;
10591 rtx insn = emit_insn (ix86_gen_leave ());
10593 ix86_add_queued_cfa_restore_notes (insn);
10595 gcc_assert (m->fs.fp_valid);
10596 m->fs.sp_valid = true;
10597 m->fs.sp_offset = m->fs.fp_offset - UNITS_PER_WORD;
10598 m->fs.fp_valid = false;
10600 if (m->fs.cfa_reg == hard_frame_pointer_rtx)
10602 m->fs.cfa_reg = stack_pointer_rtx;
10603 m->fs.cfa_offset = m->fs.sp_offset;
10605 add_reg_note (insn, REG_CFA_DEF_CFA,
10606 plus_constant (stack_pointer_rtx, m->fs.sp_offset));
10607 RTX_FRAME_RELATED_P (insn) = 1;
10608 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx,
10613 /* Emit code to restore saved registers using MOV insns.
10614 First register is restored from CFA - CFA_OFFSET. */
10616 ix86_emit_restore_regs_using_mov (HOST_WIDE_INT cfa_offset,
10617 int maybe_eh_return)
10619 struct machine_function *m = cfun->machine;
10620 unsigned int regno;
10622 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10623 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
10625 rtx reg = gen_rtx_REG (Pmode, regno);
10628 mem = choose_baseaddr (cfa_offset);
10629 mem = gen_frame_mem (Pmode, mem);
10630 insn = emit_move_insn (reg, mem);
10632 if (m->fs.cfa_reg == crtl->drap_reg && regno == REGNO (crtl->drap_reg))
10634 /* Previously we'd represented the CFA as an expression
10635 like *(%ebp - 8). We've just popped that value from
10636 the stack, which means we need to reset the CFA to
10637 the drap register. This will remain until we restore
10638 the stack pointer. */
10639 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
10640 RTX_FRAME_RELATED_P (insn) = 1;
10642 /* This means that the DRAP register is valid for addressing. */
10643 m->fs.drap_valid = true;
10646 ix86_add_cfa_restore_note (NULL_RTX, reg, cfa_offset);
10648 cfa_offset -= UNITS_PER_WORD;
10652 /* Emit code to restore saved registers using MOV insns.
10653 First register is restored from CFA - CFA_OFFSET. */
10655 ix86_emit_restore_sse_regs_using_mov (HOST_WIDE_INT cfa_offset,
10656 int maybe_eh_return)
10658 unsigned int regno;
10660 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10661 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
10663 rtx reg = gen_rtx_REG (V4SFmode, regno);
10666 mem = choose_baseaddr (cfa_offset);
10667 mem = gen_rtx_MEM (V4SFmode, mem);
10668 set_mem_align (mem, 128);
10669 emit_move_insn (reg, mem);
10671 ix86_add_cfa_restore_note (NULL_RTX, reg, cfa_offset);
10677 /* Restore function stack, frame, and registers. */
10680 ix86_expand_epilogue (int style)
10682 struct machine_function *m = cfun->machine;
10683 struct machine_frame_state frame_state_save = m->fs;
10684 struct ix86_frame frame;
10685 bool restore_regs_via_mov;
10688 ix86_finalize_stack_realign_flags ();
10689 ix86_compute_frame_layout (&frame);
10691 m->fs.sp_valid = (!frame_pointer_needed
10692 || (current_function_sp_is_unchanging
10693 && !stack_realign_fp));
10694 gcc_assert (!m->fs.sp_valid
10695 || m->fs.sp_offset == frame.stack_pointer_offset);
10697 /* The FP must be valid if the frame pointer is present. */
10698 gcc_assert (frame_pointer_needed == m->fs.fp_valid);
10699 gcc_assert (!m->fs.fp_valid
10700 || m->fs.fp_offset == frame.hard_frame_pointer_offset);
10702 /* We must have *some* valid pointer to the stack frame. */
10703 gcc_assert (m->fs.sp_valid || m->fs.fp_valid);
10705 /* The DRAP is never valid at this point. */
10706 gcc_assert (!m->fs.drap_valid);
10708 /* See the comment about red zone and frame
10709 pointer usage in ix86_expand_prologue. */
10710 if (frame_pointer_needed && frame.red_zone_size)
10711 emit_insn (gen_memory_blockage ());
10713 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
10714 gcc_assert (!using_drap || m->fs.cfa_reg == crtl->drap_reg);
10716 /* Determine the CFA offset of the end of the red-zone. */
10717 m->fs.red_zone_offset = 0;
10718 if (ix86_using_red_zone () && crtl->args.pops_args < 65536)
10720 /* The red-zone begins below the return address. */
10721 m->fs.red_zone_offset = RED_ZONE_SIZE + UNITS_PER_WORD;
10723 /* When the register save area is in the aligned portion of
10724 the stack, determine the maximum runtime displacement that
10725 matches up with the aligned frame. */
10726 if (stack_realign_drap)
10727 m->fs.red_zone_offset -= (crtl->stack_alignment_needed / BITS_PER_UNIT
10731 /* Special care must be taken for the normal return case of a function
10732 using eh_return: the eax and edx registers are marked as saved, but
10733 not restored along this path. Adjust the save location to match. */
10734 if (crtl->calls_eh_return && style != 2)
10735 frame.reg_save_offset -= 2 * UNITS_PER_WORD;
10737 /* EH_RETURN requires the use of moves to function properly. */
10738 if (crtl->calls_eh_return)
10739 restore_regs_via_mov = true;
10740 /* SEH requires the use of pops to identify the epilogue. */
10741 else if (TARGET_SEH)
10742 restore_regs_via_mov = false;
10743 /* If we're only restoring one register and sp is not valid then
10744 using a move instruction to restore the register since it's
10745 less work than reloading sp and popping the register. */
10746 else if (!m->fs.sp_valid && frame.nregs <= 1)
10747 restore_regs_via_mov = true;
10748 else if (TARGET_EPILOGUE_USING_MOVE
10749 && cfun->machine->use_fast_prologue_epilogue
10750 && (frame.nregs > 1
10751 || m->fs.sp_offset != frame.reg_save_offset))
10752 restore_regs_via_mov = true;
10753 else if (frame_pointer_needed
10755 && m->fs.sp_offset != frame.reg_save_offset)
10756 restore_regs_via_mov = true;
10757 else if (frame_pointer_needed
10758 && TARGET_USE_LEAVE
10759 && cfun->machine->use_fast_prologue_epilogue
10760 && frame.nregs == 1)
10761 restore_regs_via_mov = true;
10763 restore_regs_via_mov = false;
10765 if (restore_regs_via_mov || frame.nsseregs)
10767 /* Ensure that the entire register save area is addressable via
10768 the stack pointer, if we will restore via sp. */
10770 && m->fs.sp_offset > 0x7fffffff
10771 && !(m->fs.fp_valid || m->fs.drap_valid)
10772 && (frame.nsseregs + frame.nregs) != 0)
10774 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10775 GEN_INT (m->fs.sp_offset
10776 - frame.sse_reg_save_offset),
10778 m->fs.cfa_reg == stack_pointer_rtx);
10782 /* If there are any SSE registers to restore, then we have to do it
10783 via moves, since there's obviously no pop for SSE regs. */
10784 if (frame.nsseregs)
10785 ix86_emit_restore_sse_regs_using_mov (frame.sse_reg_save_offset,
10788 if (restore_regs_via_mov)
10793 ix86_emit_restore_regs_using_mov (frame.reg_save_offset, style == 2);
10795 /* eh_return epilogues need %ecx added to the stack pointer. */
10798 rtx insn, sa = EH_RETURN_STACKADJ_RTX;
10800 /* Stack align doesn't work with eh_return. */
10801 gcc_assert (!stack_realign_drap);
10802 /* Neither does regparm nested functions. */
10803 gcc_assert (!ix86_static_chain_on_stack);
10805 if (frame_pointer_needed)
10807 t = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
10808 t = plus_constant (t, m->fs.fp_offset - UNITS_PER_WORD);
10809 emit_insn (gen_rtx_SET (VOIDmode, sa, t));
10811 t = gen_frame_mem (Pmode, hard_frame_pointer_rtx);
10812 insn = emit_move_insn (hard_frame_pointer_rtx, t);
10814 /* Note that we use SA as a temporary CFA, as the return
10815 address is at the proper place relative to it. We
10816 pretend this happens at the FP restore insn because
10817 prior to this insn the FP would be stored at the wrong
10818 offset relative to SA, and after this insn we have no
10819 other reasonable register to use for the CFA. We don't
10820 bother resetting the CFA to the SP for the duration of
10821 the return insn. */
10822 add_reg_note (insn, REG_CFA_DEF_CFA,
10823 plus_constant (sa, UNITS_PER_WORD));
10824 ix86_add_queued_cfa_restore_notes (insn);
10825 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
10826 RTX_FRAME_RELATED_P (insn) = 1;
10828 m->fs.cfa_reg = sa;
10829 m->fs.cfa_offset = UNITS_PER_WORD;
10830 m->fs.fp_valid = false;
10832 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
10833 const0_rtx, style, false);
10837 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
10838 t = plus_constant (t, m->fs.sp_offset - UNITS_PER_WORD);
10839 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, t));
10840 ix86_add_queued_cfa_restore_notes (insn);
10842 gcc_assert (m->fs.cfa_reg == stack_pointer_rtx);
10843 if (m->fs.cfa_offset != UNITS_PER_WORD)
10845 m->fs.cfa_offset = UNITS_PER_WORD;
10846 add_reg_note (insn, REG_CFA_DEF_CFA,
10847 plus_constant (stack_pointer_rtx,
10849 RTX_FRAME_RELATED_P (insn) = 1;
10852 m->fs.sp_offset = UNITS_PER_WORD;
10853 m->fs.sp_valid = true;
10858 /* SEH requires that the function end with (1) a stack adjustment
10859 if necessary, (2) a sequence of pops, and (3) a return or
10860 jump instruction. Prevent insns from the function body from
10861 being scheduled into this sequence. */
10864 /* Prevent a catch region from being adjacent to the standard
10865 epilogue sequence. Unfortuantely crtl->uses_eh_lsda nor
10866 several other flags that would be interesting to test are
10868 if (flag_non_call_exceptions)
10869 emit_insn (gen_nops (const1_rtx));
10871 emit_insn (gen_blockage ());
10874 /* First step is to deallocate the stack frame so that we can
10875 pop the registers. */
10876 if (!m->fs.sp_valid)
10878 pro_epilogue_adjust_stack (stack_pointer_rtx, hard_frame_pointer_rtx,
10879 GEN_INT (m->fs.fp_offset
10880 - frame.reg_save_offset),
10883 else if (m->fs.sp_offset != frame.reg_save_offset)
10885 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10886 GEN_INT (m->fs.sp_offset
10887 - frame.reg_save_offset),
10889 m->fs.cfa_reg == stack_pointer_rtx);
10892 ix86_emit_restore_regs_using_pop ();
10895 /* If we used a stack pointer and haven't already got rid of it,
10897 if (m->fs.fp_valid)
10899 /* If the stack pointer is valid and pointing at the frame
10900 pointer store address, then we only need a pop. */
10901 if (m->fs.sp_valid && m->fs.sp_offset == frame.hfp_save_offset)
10902 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx);
10903 /* Leave results in shorter dependency chains on CPUs that are
10904 able to grok it fast. */
10905 else if (TARGET_USE_LEAVE
10906 || optimize_function_for_size_p (cfun)
10907 || !cfun->machine->use_fast_prologue_epilogue)
10908 ix86_emit_leave ();
10911 pro_epilogue_adjust_stack (stack_pointer_rtx,
10912 hard_frame_pointer_rtx,
10913 const0_rtx, style, !using_drap);
10914 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx);
10920 int param_ptr_offset = UNITS_PER_WORD;
10923 gcc_assert (stack_realign_drap);
10925 if (ix86_static_chain_on_stack)
10926 param_ptr_offset += UNITS_PER_WORD;
10927 if (!call_used_regs[REGNO (crtl->drap_reg)])
10928 param_ptr_offset += UNITS_PER_WORD;
10930 insn = emit_insn (gen_rtx_SET
10931 (VOIDmode, stack_pointer_rtx,
10932 gen_rtx_PLUS (Pmode,
10934 GEN_INT (-param_ptr_offset))));
10935 m->fs.cfa_reg = stack_pointer_rtx;
10936 m->fs.cfa_offset = param_ptr_offset;
10937 m->fs.sp_offset = param_ptr_offset;
10938 m->fs.realigned = false;
10940 add_reg_note (insn, REG_CFA_DEF_CFA,
10941 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10942 GEN_INT (param_ptr_offset)));
10943 RTX_FRAME_RELATED_P (insn) = 1;
10945 if (!call_used_regs[REGNO (crtl->drap_reg)])
10946 ix86_emit_restore_reg_using_pop (crtl->drap_reg);
10949 /* At this point the stack pointer must be valid, and we must have
10950 restored all of the registers. We may not have deallocated the
10951 entire stack frame. We've delayed this until now because it may
10952 be possible to merge the local stack deallocation with the
10953 deallocation forced by ix86_static_chain_on_stack. */
10954 gcc_assert (m->fs.sp_valid);
10955 gcc_assert (!m->fs.fp_valid);
10956 gcc_assert (!m->fs.realigned);
10957 if (m->fs.sp_offset != UNITS_PER_WORD)
10959 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10960 GEN_INT (m->fs.sp_offset - UNITS_PER_WORD),
10964 /* Sibcall epilogues don't want a return instruction. */
10967 m->fs = frame_state_save;
10971 /* Emit vzeroupper if needed. */
10972 if (TARGET_VZEROUPPER
10973 && cfun->machine->use_avx256_p
10974 && !cfun->machine->caller_return_avx256_p)
10976 cfun->machine->use_vzeroupper_p = 1;
10977 emit_insn (gen_avx_vzeroupper (GEN_INT (call_no_avx256)));
10980 if (crtl->args.pops_args && crtl->args.size)
10982 rtx popc = GEN_INT (crtl->args.pops_args);
10984 /* i386 can only pop 64K bytes. If asked to pop more, pop return
10985 address, do explicit add, and jump indirectly to the caller. */
10987 if (crtl->args.pops_args >= 65536)
10989 rtx ecx = gen_rtx_REG (SImode, CX_REG);
10992 /* There is no "pascal" calling convention in any 64bit ABI. */
10993 gcc_assert (!TARGET_64BIT);
10995 insn = emit_insn (gen_pop (ecx));
10996 m->fs.cfa_offset -= UNITS_PER_WORD;
10997 m->fs.sp_offset -= UNITS_PER_WORD;
10999 add_reg_note (insn, REG_CFA_ADJUST_CFA,
11000 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
11001 add_reg_note (insn, REG_CFA_REGISTER,
11002 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
11003 RTX_FRAME_RELATED_P (insn) = 1;
11005 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
11007 emit_jump_insn (gen_return_indirect_internal (ecx));
11010 emit_jump_insn (gen_return_pop_internal (popc));
11013 emit_jump_insn (gen_return_internal ());
11015 /* Restore the state back to the state from the prologue,
11016 so that it's correct for the next epilogue. */
11017 m->fs = frame_state_save;
11020 /* Reset from the function's potential modifications. */
11023 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
11024 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
11026 if (pic_offset_table_rtx)
11027 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
11029 /* Mach-O doesn't support labels at the end of objects, so if
11030 it looks like we might want one, insert a NOP. */
11032 rtx insn = get_last_insn ();
11035 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
11036 insn = PREV_INSN (insn);
11040 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
11041 fputs ("\tnop\n", file);
11047 /* Return a scratch register to use in the split stack prologue. The
11048 split stack prologue is used for -fsplit-stack. It is the first
11049 instructions in the function, even before the regular prologue.
11050 The scratch register can be any caller-saved register which is not
11051 used for parameters or for the static chain. */
11053 static unsigned int
11054 split_stack_prologue_scratch_regno (void)
11063 is_fastcall = (lookup_attribute ("fastcall",
11064 TYPE_ATTRIBUTES (TREE_TYPE (cfun->decl)))
11066 regparm = ix86_function_regparm (TREE_TYPE (cfun->decl), cfun->decl);
11070 if (DECL_STATIC_CHAIN (cfun->decl))
11072 sorry ("-fsplit-stack does not support fastcall with "
11073 "nested function");
11074 return INVALID_REGNUM;
11078 else if (regparm < 3)
11080 if (!DECL_STATIC_CHAIN (cfun->decl))
11086 sorry ("-fsplit-stack does not support 2 register "
11087 " parameters for a nested function");
11088 return INVALID_REGNUM;
11095 /* FIXME: We could make this work by pushing a register
11096 around the addition and comparison. */
11097 sorry ("-fsplit-stack does not support 3 register parameters");
11098 return INVALID_REGNUM;
11103 /* A SYMBOL_REF for the function which allocates new stackspace for
11106 static GTY(()) rtx split_stack_fn;
11108 /* A SYMBOL_REF for the more stack function when using the large
11111 static GTY(()) rtx split_stack_fn_large;
11113 /* Handle -fsplit-stack. These are the first instructions in the
11114 function, even before the regular prologue. */
11117 ix86_expand_split_stack_prologue (void)
11119 struct ix86_frame frame;
11120 HOST_WIDE_INT allocate;
11121 unsigned HOST_WIDE_INT args_size;
11122 rtx label, limit, current, jump_insn, allocate_rtx, call_insn, call_fusage;
11123 rtx scratch_reg = NULL_RTX;
11124 rtx varargs_label = NULL_RTX;
11127 gcc_assert (flag_split_stack && reload_completed);
11129 ix86_finalize_stack_realign_flags ();
11130 ix86_compute_frame_layout (&frame);
11131 allocate = frame.stack_pointer_offset - INCOMING_FRAME_SP_OFFSET;
11133 /* This is the label we will branch to if we have enough stack
11134 space. We expect the basic block reordering pass to reverse this
11135 branch if optimizing, so that we branch in the unlikely case. */
11136 label = gen_label_rtx ();
11138 /* We need to compare the stack pointer minus the frame size with
11139 the stack boundary in the TCB. The stack boundary always gives
11140 us SPLIT_STACK_AVAILABLE bytes, so if we need less than that we
11141 can compare directly. Otherwise we need to do an addition. */
11143 limit = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
11144 UNSPEC_STACK_CHECK);
11145 limit = gen_rtx_CONST (Pmode, limit);
11146 limit = gen_rtx_MEM (Pmode, limit);
11147 if (allocate < SPLIT_STACK_AVAILABLE)
11148 current = stack_pointer_rtx;
11151 unsigned int scratch_regno;
11154 /* We need a scratch register to hold the stack pointer minus
11155 the required frame size. Since this is the very start of the
11156 function, the scratch register can be any caller-saved
11157 register which is not used for parameters. */
11158 offset = GEN_INT (- allocate);
11159 scratch_regno = split_stack_prologue_scratch_regno ();
11160 if (scratch_regno == INVALID_REGNUM)
11162 scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
11163 if (!TARGET_64BIT || x86_64_immediate_operand (offset, Pmode))
11165 /* We don't use ix86_gen_add3 in this case because it will
11166 want to split to lea, but when not optimizing the insn
11167 will not be split after this point. */
11168 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11169 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11174 emit_move_insn (scratch_reg, offset);
11175 emit_insn (gen_adddi3 (scratch_reg, scratch_reg,
11176 stack_pointer_rtx));
11178 current = scratch_reg;
11181 ix86_expand_branch (GEU, current, limit, label);
11182 jump_insn = get_last_insn ();
11183 JUMP_LABEL (jump_insn) = label;
11185 /* Mark the jump as very likely to be taken. */
11186 add_reg_note (jump_insn, REG_BR_PROB,
11187 GEN_INT (REG_BR_PROB_BASE - REG_BR_PROB_BASE / 100));
11189 if (split_stack_fn == NULL_RTX)
11190 split_stack_fn = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11191 fn = split_stack_fn;
11193 /* Get more stack space. We pass in the desired stack space and the
11194 size of the arguments to copy to the new stack. In 32-bit mode
11195 we push the parameters; __morestack will return on a new stack
11196 anyhow. In 64-bit mode we pass the parameters in r10 and
11198 allocate_rtx = GEN_INT (allocate);
11199 args_size = crtl->args.size >= 0 ? crtl->args.size : 0;
11200 call_fusage = NULL_RTX;
11205 reg10 = gen_rtx_REG (Pmode, R10_REG);
11206 reg11 = gen_rtx_REG (Pmode, R11_REG);
11208 /* If this function uses a static chain, it will be in %r10.
11209 Preserve it across the call to __morestack. */
11210 if (DECL_STATIC_CHAIN (cfun->decl))
11214 rax = gen_rtx_REG (Pmode, AX_REG);
11215 emit_move_insn (rax, reg10);
11216 use_reg (&call_fusage, rax);
11219 if (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
11221 HOST_WIDE_INT argval;
11223 /* When using the large model we need to load the address
11224 into a register, and we've run out of registers. So we
11225 switch to a different calling convention, and we call a
11226 different function: __morestack_large. We pass the
11227 argument size in the upper 32 bits of r10 and pass the
11228 frame size in the lower 32 bits. */
11229 gcc_assert ((allocate & (HOST_WIDE_INT) 0xffffffff) == allocate);
11230 gcc_assert ((args_size & 0xffffffff) == args_size);
11232 if (split_stack_fn_large == NULL_RTX)
11233 split_stack_fn_large =
11234 gen_rtx_SYMBOL_REF (Pmode, "__morestack_large_model");
11236 if (ix86_cmodel == CM_LARGE_PIC)
11240 label = gen_label_rtx ();
11241 emit_label (label);
11242 LABEL_PRESERVE_P (label) = 1;
11243 emit_insn (gen_set_rip_rex64 (reg10, label));
11244 emit_insn (gen_set_got_offset_rex64 (reg11, label));
11245 emit_insn (gen_adddi3 (reg10, reg10, reg11));
11246 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, split_stack_fn_large),
11248 x = gen_rtx_CONST (Pmode, x);
11249 emit_move_insn (reg11, x);
11250 x = gen_rtx_PLUS (Pmode, reg10, reg11);
11251 x = gen_const_mem (Pmode, x);
11252 emit_move_insn (reg11, x);
11255 emit_move_insn (reg11, split_stack_fn_large);
11259 argval = ((args_size << 16) << 16) + allocate;
11260 emit_move_insn (reg10, GEN_INT (argval));
11264 emit_move_insn (reg10, allocate_rtx);
11265 emit_move_insn (reg11, GEN_INT (args_size));
11266 use_reg (&call_fusage, reg11);
11269 use_reg (&call_fusage, reg10);
11273 emit_insn (gen_push (GEN_INT (args_size)));
11274 emit_insn (gen_push (allocate_rtx));
11276 call_insn = ix86_expand_call (NULL_RTX, gen_rtx_MEM (QImode, fn),
11277 GEN_INT (UNITS_PER_WORD), constm1_rtx,
11279 add_function_usage_to (call_insn, call_fusage);
11281 /* In order to make call/return prediction work right, we now need
11282 to execute a return instruction. See
11283 libgcc/config/i386/morestack.S for the details on how this works.
11285 For flow purposes gcc must not see this as a return
11286 instruction--we need control flow to continue at the subsequent
11287 label. Therefore, we use an unspec. */
11288 gcc_assert (crtl->args.pops_args < 65536);
11289 emit_insn (gen_split_stack_return (GEN_INT (crtl->args.pops_args)));
11291 /* If we are in 64-bit mode and this function uses a static chain,
11292 we saved %r10 in %rax before calling _morestack. */
11293 if (TARGET_64BIT && DECL_STATIC_CHAIN (cfun->decl))
11294 emit_move_insn (gen_rtx_REG (Pmode, R10_REG),
11295 gen_rtx_REG (Pmode, AX_REG));
11297 /* If this function calls va_start, we need to store a pointer to
11298 the arguments on the old stack, because they may not have been
11299 all copied to the new stack. At this point the old stack can be
11300 found at the frame pointer value used by __morestack, because
11301 __morestack has set that up before calling back to us. Here we
11302 store that pointer in a scratch register, and in
11303 ix86_expand_prologue we store the scratch register in a stack
11305 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11307 unsigned int scratch_regno;
11311 scratch_regno = split_stack_prologue_scratch_regno ();
11312 scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
11313 frame_reg = gen_rtx_REG (Pmode, BP_REG);
11317 return address within this function
11318 return address of caller of this function
11320 So we add three words to get to the stack arguments.
11324 return address within this function
11325 first argument to __morestack
11326 second argument to __morestack
11327 return address of caller of this function
11329 So we add five words to get to the stack arguments.
11331 words = TARGET_64BIT ? 3 : 5;
11332 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11333 gen_rtx_PLUS (Pmode, frame_reg,
11334 GEN_INT (words * UNITS_PER_WORD))));
11336 varargs_label = gen_label_rtx ();
11337 emit_jump_insn (gen_jump (varargs_label));
11338 JUMP_LABEL (get_last_insn ()) = varargs_label;
11343 emit_label (label);
11344 LABEL_NUSES (label) = 1;
11346 /* If this function calls va_start, we now have to set the scratch
11347 register for the case where we do not call __morestack. In this
11348 case we need to set it based on the stack pointer. */
11349 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11351 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11352 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11353 GEN_INT (UNITS_PER_WORD))));
11355 emit_label (varargs_label);
11356 LABEL_NUSES (varargs_label) = 1;
11360 /* We may have to tell the dataflow pass that the split stack prologue
11361 is initializing a scratch register. */
11364 ix86_live_on_entry (bitmap regs)
11366 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11368 gcc_assert (flag_split_stack);
11369 bitmap_set_bit (regs, split_stack_prologue_scratch_regno ());
11373 /* Extract the parts of an RTL expression that is a valid memory address
11374 for an instruction. Return 0 if the structure of the address is
11375 grossly off. Return -1 if the address contains ASHIFT, so it is not
11376 strictly valid, but still used for computing length of lea instruction. */
11379 ix86_decompose_address (rtx addr, struct ix86_address *out)
11381 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
11382 rtx base_reg, index_reg;
11383 HOST_WIDE_INT scale = 1;
11384 rtx scale_rtx = NULL_RTX;
11387 enum ix86_address_seg seg = SEG_DEFAULT;
11389 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
11391 else if (GET_CODE (addr) == PLUS)
11393 rtx addends[4], op;
11401 addends[n++] = XEXP (op, 1);
11404 while (GET_CODE (op) == PLUS);
11409 for (i = n; i >= 0; --i)
11412 switch (GET_CODE (op))
11417 index = XEXP (op, 0);
11418 scale_rtx = XEXP (op, 1);
11424 index = XEXP (op, 0);
11425 tmp = XEXP (op, 1);
11426 if (!CONST_INT_P (tmp))
11428 scale = INTVAL (tmp);
11429 if ((unsigned HOST_WIDE_INT) scale > 3)
11431 scale = 1 << scale;
11435 if (XINT (op, 1) == UNSPEC_TP
11436 && TARGET_TLS_DIRECT_SEG_REFS
11437 && seg == SEG_DEFAULT)
11438 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
11467 else if (GET_CODE (addr) == MULT)
11469 index = XEXP (addr, 0); /* index*scale */
11470 scale_rtx = XEXP (addr, 1);
11472 else if (GET_CODE (addr) == ASHIFT)
11474 /* We're called for lea too, which implements ashift on occasion. */
11475 index = XEXP (addr, 0);
11476 tmp = XEXP (addr, 1);
11477 if (!CONST_INT_P (tmp))
11479 scale = INTVAL (tmp);
11480 if ((unsigned HOST_WIDE_INT) scale > 3)
11482 scale = 1 << scale;
11486 disp = addr; /* displacement */
11488 /* Extract the integral value of scale. */
11491 if (!CONST_INT_P (scale_rtx))
11493 scale = INTVAL (scale_rtx);
11496 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
11497 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
11499 /* Avoid useless 0 displacement. */
11500 if (disp == const0_rtx && (base || index))
11503 /* Allow arg pointer and stack pointer as index if there is not scaling. */
11504 if (base_reg && index_reg && scale == 1
11505 && (index_reg == arg_pointer_rtx
11506 || index_reg == frame_pointer_rtx
11507 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
11510 tmp = base, base = index, index = tmp;
11511 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
11514 /* Special case: %ebp cannot be encoded as a base without a displacement.
11518 && (base_reg == hard_frame_pointer_rtx
11519 || base_reg == frame_pointer_rtx
11520 || base_reg == arg_pointer_rtx
11521 || (REG_P (base_reg)
11522 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
11523 || REGNO (base_reg) == R13_REG))))
11526 /* Special case: on K6, [%esi] makes the instruction vector decoded.
11527 Avoid this by transforming to [%esi+0].
11528 Reload calls address legitimization without cfun defined, so we need
11529 to test cfun for being non-NULL. */
11530 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
11531 && base_reg && !index_reg && !disp
11532 && REG_P (base_reg) && REGNO (base_reg) == SI_REG)
11535 /* Special case: encode reg+reg instead of reg*2. */
11536 if (!base && index && scale == 2)
11537 base = index, base_reg = index_reg, scale = 1;
11539 /* Special case: scaling cannot be encoded without base or displacement. */
11540 if (!base && !disp && index && scale != 1)
11544 out->index = index;
11546 out->scale = scale;
11552 /* Return cost of the memory address x.
11553 For i386, it is better to use a complex address than let gcc copy
11554 the address into a reg and make a new pseudo. But not if the address
11555 requires to two regs - that would mean more pseudos with longer
11558 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
11560 struct ix86_address parts;
11562 int ok = ix86_decompose_address (x, &parts);
11566 if (parts.base && GET_CODE (parts.base) == SUBREG)
11567 parts.base = SUBREG_REG (parts.base);
11568 if (parts.index && GET_CODE (parts.index) == SUBREG)
11569 parts.index = SUBREG_REG (parts.index);
11571 /* Attempt to minimize number of registers in the address. */
11573 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
11575 && (!REG_P (parts.index)
11576 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
11580 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
11582 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
11583 && parts.base != parts.index)
11586 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
11587 since it's predecode logic can't detect the length of instructions
11588 and it degenerates to vector decoded. Increase cost of such
11589 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
11590 to split such addresses or even refuse such addresses at all.
11592 Following addressing modes are affected:
11597 The first and last case may be avoidable by explicitly coding the zero in
11598 memory address, but I don't have AMD-K6 machine handy to check this
11602 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
11603 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
11604 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
11610 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
11611 this is used for to form addresses to local data when -fPIC is in
11615 darwin_local_data_pic (rtx disp)
11617 return (GET_CODE (disp) == UNSPEC
11618 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
11621 /* Determine if a given RTX is a valid constant. We already know this
11622 satisfies CONSTANT_P. */
11625 legitimate_constant_p (rtx x)
11627 switch (GET_CODE (x))
11632 if (GET_CODE (x) == PLUS)
11634 if (!CONST_INT_P (XEXP (x, 1)))
11639 if (TARGET_MACHO && darwin_local_data_pic (x))
11642 /* Only some unspecs are valid as "constants". */
11643 if (GET_CODE (x) == UNSPEC)
11644 switch (XINT (x, 1))
11647 case UNSPEC_GOTOFF:
11648 case UNSPEC_PLTOFF:
11649 return TARGET_64BIT;
11651 case UNSPEC_NTPOFF:
11652 x = XVECEXP (x, 0, 0);
11653 return (GET_CODE (x) == SYMBOL_REF
11654 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
11655 case UNSPEC_DTPOFF:
11656 x = XVECEXP (x, 0, 0);
11657 return (GET_CODE (x) == SYMBOL_REF
11658 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
11663 /* We must have drilled down to a symbol. */
11664 if (GET_CODE (x) == LABEL_REF)
11666 if (GET_CODE (x) != SYMBOL_REF)
11671 /* TLS symbols are never valid. */
11672 if (SYMBOL_REF_TLS_MODEL (x))
11675 /* DLLIMPORT symbols are never valid. */
11676 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
11677 && SYMBOL_REF_DLLIMPORT_P (x))
11681 /* mdynamic-no-pic */
11682 if (MACHO_DYNAMIC_NO_PIC_P)
11683 return machopic_symbol_defined_p (x);
11688 if (GET_MODE (x) == TImode
11689 && x != CONST0_RTX (TImode)
11695 if (!standard_sse_constant_p (x))
11702 /* Otherwise we handle everything else in the move patterns. */
11706 /* Determine if it's legal to put X into the constant pool. This
11707 is not possible for the address of thread-local symbols, which
11708 is checked above. */
11711 ix86_cannot_force_const_mem (rtx x)
11713 /* We can always put integral constants and vectors in memory. */
11714 switch (GET_CODE (x))
11724 return !legitimate_constant_p (x);
11728 /* Nonzero if the constant value X is a legitimate general operand
11729 when generating PIC code. It is given that flag_pic is on and
11730 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
11733 legitimate_pic_operand_p (rtx x)
11737 switch (GET_CODE (x))
11740 inner = XEXP (x, 0);
11741 if (GET_CODE (inner) == PLUS
11742 && CONST_INT_P (XEXP (inner, 1)))
11743 inner = XEXP (inner, 0);
11745 /* Only some unspecs are valid as "constants". */
11746 if (GET_CODE (inner) == UNSPEC)
11747 switch (XINT (inner, 1))
11750 case UNSPEC_GOTOFF:
11751 case UNSPEC_PLTOFF:
11752 return TARGET_64BIT;
11754 x = XVECEXP (inner, 0, 0);
11755 return (GET_CODE (x) == SYMBOL_REF
11756 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
11757 case UNSPEC_MACHOPIC_OFFSET:
11758 return legitimate_pic_address_disp_p (x);
11766 return legitimate_pic_address_disp_p (x);
11773 /* Determine if a given CONST RTX is a valid memory displacement
11777 legitimate_pic_address_disp_p (rtx disp)
11781 /* In 64bit mode we can allow direct addresses of symbols and labels
11782 when they are not dynamic symbols. */
11785 rtx op0 = disp, op1;
11787 switch (GET_CODE (disp))
11793 if (GET_CODE (XEXP (disp, 0)) != PLUS)
11795 op0 = XEXP (XEXP (disp, 0), 0);
11796 op1 = XEXP (XEXP (disp, 0), 1);
11797 if (!CONST_INT_P (op1)
11798 || INTVAL (op1) >= 16*1024*1024
11799 || INTVAL (op1) < -16*1024*1024)
11801 if (GET_CODE (op0) == LABEL_REF)
11803 if (GET_CODE (op0) != SYMBOL_REF)
11808 /* TLS references should always be enclosed in UNSPEC. */
11809 if (SYMBOL_REF_TLS_MODEL (op0))
11811 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
11812 && ix86_cmodel != CM_LARGE_PIC)
11820 if (GET_CODE (disp) != CONST)
11822 disp = XEXP (disp, 0);
11826 /* We are unsafe to allow PLUS expressions. This limit allowed distance
11827 of GOT tables. We should not need these anyway. */
11828 if (GET_CODE (disp) != UNSPEC
11829 || (XINT (disp, 1) != UNSPEC_GOTPCREL
11830 && XINT (disp, 1) != UNSPEC_GOTOFF
11831 && XINT (disp, 1) != UNSPEC_PCREL
11832 && XINT (disp, 1) != UNSPEC_PLTOFF))
11835 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
11836 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
11842 if (GET_CODE (disp) == PLUS)
11844 if (!CONST_INT_P (XEXP (disp, 1)))
11846 disp = XEXP (disp, 0);
11850 if (TARGET_MACHO && darwin_local_data_pic (disp))
11853 if (GET_CODE (disp) != UNSPEC)
11856 switch (XINT (disp, 1))
11861 /* We need to check for both symbols and labels because VxWorks loads
11862 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
11864 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
11865 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
11866 case UNSPEC_GOTOFF:
11867 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
11868 While ABI specify also 32bit relocation but we don't produce it in
11869 small PIC model at all. */
11870 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
11871 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
11873 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
11875 case UNSPEC_GOTTPOFF:
11876 case UNSPEC_GOTNTPOFF:
11877 case UNSPEC_INDNTPOFF:
11880 disp = XVECEXP (disp, 0, 0);
11881 return (GET_CODE (disp) == SYMBOL_REF
11882 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
11883 case UNSPEC_NTPOFF:
11884 disp = XVECEXP (disp, 0, 0);
11885 return (GET_CODE (disp) == SYMBOL_REF
11886 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
11887 case UNSPEC_DTPOFF:
11888 disp = XVECEXP (disp, 0, 0);
11889 return (GET_CODE (disp) == SYMBOL_REF
11890 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
11896 /* Recognizes RTL expressions that are valid memory addresses for an
11897 instruction. The MODE argument is the machine mode for the MEM
11898 expression that wants to use this address.
11900 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
11901 convert common non-canonical forms to canonical form so that they will
11905 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
11906 rtx addr, bool strict)
11908 struct ix86_address parts;
11909 rtx base, index, disp;
11910 HOST_WIDE_INT scale;
11912 if (ix86_decompose_address (addr, &parts) <= 0)
11913 /* Decomposition failed. */
11917 index = parts.index;
11919 scale = parts.scale;
11921 /* Validate base register.
11923 Don't allow SUBREG's that span more than a word here. It can lead to spill
11924 failures when the base is one word out of a two word structure, which is
11925 represented internally as a DImode int. */
11933 else if (GET_CODE (base) == SUBREG
11934 && REG_P (SUBREG_REG (base))
11935 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
11937 reg = SUBREG_REG (base);
11939 /* Base is not a register. */
11942 if (GET_MODE (base) != Pmode)
11943 /* Base is not in Pmode. */
11946 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
11947 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
11948 /* Base is not valid. */
11952 /* Validate index register.
11954 Don't allow SUBREG's that span more than a word here -- same as above. */
11962 else if (GET_CODE (index) == SUBREG
11963 && REG_P (SUBREG_REG (index))
11964 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
11966 reg = SUBREG_REG (index);
11968 /* Index is not a register. */
11971 if (GET_MODE (index) != Pmode)
11972 /* Index is not in Pmode. */
11975 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
11976 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
11977 /* Index is not valid. */
11981 /* Validate scale factor. */
11985 /* Scale without index. */
11988 if (scale != 2 && scale != 4 && scale != 8)
11989 /* Scale is not a valid multiplier. */
11993 /* Validate displacement. */
11996 if (GET_CODE (disp) == CONST
11997 && GET_CODE (XEXP (disp, 0)) == UNSPEC
11998 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
11999 switch (XINT (XEXP (disp, 0), 1))
12001 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
12002 used. While ABI specify also 32bit relocations, we don't produce
12003 them at all and use IP relative instead. */
12005 case UNSPEC_GOTOFF:
12006 gcc_assert (flag_pic);
12008 goto is_legitimate_pic;
12010 /* 64bit address unspec. */
12013 case UNSPEC_GOTPCREL:
12015 gcc_assert (flag_pic);
12016 goto is_legitimate_pic;
12018 case UNSPEC_GOTTPOFF:
12019 case UNSPEC_GOTNTPOFF:
12020 case UNSPEC_INDNTPOFF:
12021 case UNSPEC_NTPOFF:
12022 case UNSPEC_DTPOFF:
12025 case UNSPEC_STACK_CHECK:
12026 gcc_assert (flag_split_stack);
12030 /* Invalid address unspec. */
12034 else if (SYMBOLIC_CONST (disp)
12038 && MACHOPIC_INDIRECT
12039 && !machopic_operand_p (disp)
12045 if (TARGET_64BIT && (index || base))
12047 /* foo@dtpoff(%rX) is ok. */
12048 if (GET_CODE (disp) != CONST
12049 || GET_CODE (XEXP (disp, 0)) != PLUS
12050 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
12051 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
12052 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
12053 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
12054 /* Non-constant pic memory reference. */
12057 else if ((!TARGET_MACHO || flag_pic)
12058 && ! legitimate_pic_address_disp_p (disp))
12059 /* Displacement is an invalid pic construct. */
12062 else if (MACHO_DYNAMIC_NO_PIC_P && !legitimate_constant_p (disp))
12063 /* displacment must be referenced via non_lazy_pointer */
12067 /* This code used to verify that a symbolic pic displacement
12068 includes the pic_offset_table_rtx register.
12070 While this is good idea, unfortunately these constructs may
12071 be created by "adds using lea" optimization for incorrect
12080 This code is nonsensical, but results in addressing
12081 GOT table with pic_offset_table_rtx base. We can't
12082 just refuse it easily, since it gets matched by
12083 "addsi3" pattern, that later gets split to lea in the
12084 case output register differs from input. While this
12085 can be handled by separate addsi pattern for this case
12086 that never results in lea, this seems to be easier and
12087 correct fix for crash to disable this test. */
12089 else if (GET_CODE (disp) != LABEL_REF
12090 && !CONST_INT_P (disp)
12091 && (GET_CODE (disp) != CONST
12092 || !legitimate_constant_p (disp))
12093 && (GET_CODE (disp) != SYMBOL_REF
12094 || !legitimate_constant_p (disp)))
12095 /* Displacement is not constant. */
12097 else if (TARGET_64BIT
12098 && !x86_64_immediate_operand (disp, VOIDmode))
12099 /* Displacement is out of range. */
12103 /* Everything looks valid. */
12107 /* Determine if a given RTX is a valid constant address. */
12110 constant_address_p (rtx x)
12112 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
12115 /* Return a unique alias set for the GOT. */
12117 static alias_set_type
12118 ix86_GOT_alias_set (void)
12120 static alias_set_type set = -1;
12122 set = new_alias_set ();
12126 /* Return a legitimate reference for ORIG (an address) using the
12127 register REG. If REG is 0, a new pseudo is generated.
12129 There are two types of references that must be handled:
12131 1. Global data references must load the address from the GOT, via
12132 the PIC reg. An insn is emitted to do this load, and the reg is
12135 2. Static data references, constant pool addresses, and code labels
12136 compute the address as an offset from the GOT, whose base is in
12137 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
12138 differentiate them from global data objects. The returned
12139 address is the PIC reg + an unspec constant.
12141 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
12142 reg also appears in the address. */
12145 legitimize_pic_address (rtx orig, rtx reg)
12148 rtx new_rtx = orig;
12152 if (TARGET_MACHO && !TARGET_64BIT)
12155 reg = gen_reg_rtx (Pmode);
12156 /* Use the generic Mach-O PIC machinery. */
12157 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
12161 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
12163 else if (TARGET_64BIT
12164 && ix86_cmodel != CM_SMALL_PIC
12165 && gotoff_operand (addr, Pmode))
12168 /* This symbol may be referenced via a displacement from the PIC
12169 base address (@GOTOFF). */
12171 if (reload_in_progress)
12172 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12173 if (GET_CODE (addr) == CONST)
12174 addr = XEXP (addr, 0);
12175 if (GET_CODE (addr) == PLUS)
12177 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
12179 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
12182 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
12183 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12185 tmpreg = gen_reg_rtx (Pmode);
12188 emit_move_insn (tmpreg, new_rtx);
12192 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
12193 tmpreg, 1, OPTAB_DIRECT);
12196 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
12198 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
12200 /* This symbol may be referenced via a displacement from the PIC
12201 base address (@GOTOFF). */
12203 if (reload_in_progress)
12204 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12205 if (GET_CODE (addr) == CONST)
12206 addr = XEXP (addr, 0);
12207 if (GET_CODE (addr) == PLUS)
12209 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
12211 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
12214 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
12215 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12216 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12220 emit_move_insn (reg, new_rtx);
12224 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
12225 /* We can't use @GOTOFF for text labels on VxWorks;
12226 see gotoff_operand. */
12227 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
12229 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
12231 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
12232 return legitimize_dllimport_symbol (addr, true);
12233 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
12234 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
12235 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
12237 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
12238 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
12242 /* For x64 PE-COFF there is no GOT table. So we use address
12244 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
12246 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_PCREL);
12247 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12250 reg = gen_reg_rtx (Pmode);
12251 emit_move_insn (reg, new_rtx);
12254 else if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
12256 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
12257 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12258 new_rtx = gen_const_mem (Pmode, new_rtx);
12259 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
12262 reg = gen_reg_rtx (Pmode);
12263 /* Use directly gen_movsi, otherwise the address is loaded
12264 into register for CSE. We don't want to CSE this addresses,
12265 instead we CSE addresses from the GOT table, so skip this. */
12266 emit_insn (gen_movsi (reg, new_rtx));
12271 /* This symbol must be referenced via a load from the
12272 Global Offset Table (@GOT). */
12274 if (reload_in_progress)
12275 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12276 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
12277 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12279 new_rtx = force_reg (Pmode, new_rtx);
12280 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12281 new_rtx = gen_const_mem (Pmode, new_rtx);
12282 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
12285 reg = gen_reg_rtx (Pmode);
12286 emit_move_insn (reg, new_rtx);
12292 if (CONST_INT_P (addr)
12293 && !x86_64_immediate_operand (addr, VOIDmode))
12297 emit_move_insn (reg, addr);
12301 new_rtx = force_reg (Pmode, addr);
12303 else if (GET_CODE (addr) == CONST)
12305 addr = XEXP (addr, 0);
12307 /* We must match stuff we generate before. Assume the only
12308 unspecs that can get here are ours. Not that we could do
12309 anything with them anyway.... */
12310 if (GET_CODE (addr) == UNSPEC
12311 || (GET_CODE (addr) == PLUS
12312 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
12314 gcc_assert (GET_CODE (addr) == PLUS);
12316 if (GET_CODE (addr) == PLUS)
12318 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
12320 /* Check first to see if this is a constant offset from a @GOTOFF
12321 symbol reference. */
12322 if (gotoff_operand (op0, Pmode)
12323 && CONST_INT_P (op1))
12327 if (reload_in_progress)
12328 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12329 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
12331 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
12332 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12333 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12337 emit_move_insn (reg, new_rtx);
12343 if (INTVAL (op1) < -16*1024*1024
12344 || INTVAL (op1) >= 16*1024*1024)
12346 if (!x86_64_immediate_operand (op1, Pmode))
12347 op1 = force_reg (Pmode, op1);
12348 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
12354 base = legitimize_pic_address (XEXP (addr, 0), reg);
12355 new_rtx = legitimize_pic_address (XEXP (addr, 1),
12356 base == reg ? NULL_RTX : reg);
12358 if (CONST_INT_P (new_rtx))
12359 new_rtx = plus_constant (base, INTVAL (new_rtx));
12362 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
12364 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
12365 new_rtx = XEXP (new_rtx, 1);
12367 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
12375 /* Load the thread pointer. If TO_REG is true, force it into a register. */
12378 get_thread_pointer (int to_reg)
12382 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
12386 reg = gen_reg_rtx (Pmode);
12387 insn = gen_rtx_SET (VOIDmode, reg, tp);
12388 insn = emit_insn (insn);
12393 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
12394 false if we expect this to be used for a memory address and true if
12395 we expect to load the address into a register. */
12398 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
12400 rtx dest, base, off, pic, tp;
12405 case TLS_MODEL_GLOBAL_DYNAMIC:
12406 dest = gen_reg_rtx (Pmode);
12407 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
12409 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
12411 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
12414 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
12415 insns = get_insns ();
12418 RTL_CONST_CALL_P (insns) = 1;
12419 emit_libcall_block (insns, dest, rax, x);
12421 else if (TARGET_64BIT && TARGET_GNU2_TLS)
12422 emit_insn (gen_tls_global_dynamic_64 (dest, x));
12424 emit_insn (gen_tls_global_dynamic_32 (dest, x));
12426 if (TARGET_GNU2_TLS)
12428 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
12430 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
12434 case TLS_MODEL_LOCAL_DYNAMIC:
12435 base = gen_reg_rtx (Pmode);
12436 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
12438 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
12440 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
12443 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
12444 insns = get_insns ();
12447 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
12448 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
12449 RTL_CONST_CALL_P (insns) = 1;
12450 emit_libcall_block (insns, base, rax, note);
12452 else if (TARGET_64BIT && TARGET_GNU2_TLS)
12453 emit_insn (gen_tls_local_dynamic_base_64 (base));
12455 emit_insn (gen_tls_local_dynamic_base_32 (base));
12457 if (TARGET_GNU2_TLS)
12459 rtx x = ix86_tls_module_base ();
12461 set_unique_reg_note (get_last_insn (), REG_EQUIV,
12462 gen_rtx_MINUS (Pmode, x, tp));
12465 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
12466 off = gen_rtx_CONST (Pmode, off);
12468 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
12470 if (TARGET_GNU2_TLS)
12472 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
12474 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
12479 case TLS_MODEL_INITIAL_EXEC:
12483 type = UNSPEC_GOTNTPOFF;
12487 if (reload_in_progress)
12488 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12489 pic = pic_offset_table_rtx;
12490 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
12492 else if (!TARGET_ANY_GNU_TLS)
12494 pic = gen_reg_rtx (Pmode);
12495 emit_insn (gen_set_got (pic));
12496 type = UNSPEC_GOTTPOFF;
12501 type = UNSPEC_INDNTPOFF;
12504 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
12505 off = gen_rtx_CONST (Pmode, off);
12507 off = gen_rtx_PLUS (Pmode, pic, off);
12508 off = gen_const_mem (Pmode, off);
12509 set_mem_alias_set (off, ix86_GOT_alias_set ());
12511 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12513 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
12514 off = force_reg (Pmode, off);
12515 return gen_rtx_PLUS (Pmode, base, off);
12519 base = get_thread_pointer (true);
12520 dest = gen_reg_rtx (Pmode);
12521 emit_insn (gen_subsi3 (dest, base, off));
12525 case TLS_MODEL_LOCAL_EXEC:
12526 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
12527 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12528 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
12529 off = gen_rtx_CONST (Pmode, off);
12531 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12533 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
12534 return gen_rtx_PLUS (Pmode, base, off);
12538 base = get_thread_pointer (true);
12539 dest = gen_reg_rtx (Pmode);
12540 emit_insn (gen_subsi3 (dest, base, off));
12545 gcc_unreachable ();
12551 /* Create or return the unique __imp_DECL dllimport symbol corresponding
12554 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
12555 htab_t dllimport_map;
12558 get_dllimport_decl (tree decl)
12560 struct tree_map *h, in;
12563 const char *prefix;
12564 size_t namelen, prefixlen;
12569 if (!dllimport_map)
12570 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
12572 in.hash = htab_hash_pointer (decl);
12573 in.base.from = decl;
12574 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
12575 h = (struct tree_map *) *loc;
12579 *loc = h = ggc_alloc_tree_map ();
12581 h->base.from = decl;
12582 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
12583 VAR_DECL, NULL, ptr_type_node);
12584 DECL_ARTIFICIAL (to) = 1;
12585 DECL_IGNORED_P (to) = 1;
12586 DECL_EXTERNAL (to) = 1;
12587 TREE_READONLY (to) = 1;
12589 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
12590 name = targetm.strip_name_encoding (name);
12591 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
12592 ? "*__imp_" : "*__imp__";
12593 namelen = strlen (name);
12594 prefixlen = strlen (prefix);
12595 imp_name = (char *) alloca (namelen + prefixlen + 1);
12596 memcpy (imp_name, prefix, prefixlen);
12597 memcpy (imp_name + prefixlen, name, namelen + 1);
12599 name = ggc_alloc_string (imp_name, namelen + prefixlen);
12600 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
12601 SET_SYMBOL_REF_DECL (rtl, to);
12602 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
12604 rtl = gen_const_mem (Pmode, rtl);
12605 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
12607 SET_DECL_RTL (to, rtl);
12608 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
12613 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
12614 true if we require the result be a register. */
12617 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
12622 gcc_assert (SYMBOL_REF_DECL (symbol));
12623 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
12625 x = DECL_RTL (imp_decl);
12627 x = force_reg (Pmode, x);
12631 /* Try machine-dependent ways of modifying an illegitimate address
12632 to be legitimate. If we find one, return the new, valid address.
12633 This macro is used in only one place: `memory_address' in explow.c.
12635 OLDX is the address as it was before break_out_memory_refs was called.
12636 In some cases it is useful to look at this to decide what needs to be done.
12638 It is always safe for this macro to do nothing. It exists to recognize
12639 opportunities to optimize the output.
12641 For the 80386, we handle X+REG by loading X into a register R and
12642 using R+REG. R will go in a general reg and indexing will be used.
12643 However, if REG is a broken-out memory address or multiplication,
12644 nothing needs to be done because REG can certainly go in a general reg.
12646 When -fpic is used, special handling is needed for symbolic references.
12647 See comments by legitimize_pic_address in i386.c for details. */
12650 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
12651 enum machine_mode mode)
12656 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
12658 return legitimize_tls_address (x, (enum tls_model) log, false);
12659 if (GET_CODE (x) == CONST
12660 && GET_CODE (XEXP (x, 0)) == PLUS
12661 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
12662 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
12664 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
12665 (enum tls_model) log, false);
12666 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
12669 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
12671 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
12672 return legitimize_dllimport_symbol (x, true);
12673 if (GET_CODE (x) == CONST
12674 && GET_CODE (XEXP (x, 0)) == PLUS
12675 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
12676 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
12678 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
12679 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
12683 if (flag_pic && SYMBOLIC_CONST (x))
12684 return legitimize_pic_address (x, 0);
12687 if (MACHO_DYNAMIC_NO_PIC_P && SYMBOLIC_CONST (x))
12688 return machopic_indirect_data_reference (x, 0);
12691 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
12692 if (GET_CODE (x) == ASHIFT
12693 && CONST_INT_P (XEXP (x, 1))
12694 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
12697 log = INTVAL (XEXP (x, 1));
12698 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
12699 GEN_INT (1 << log));
12702 if (GET_CODE (x) == PLUS)
12704 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
12706 if (GET_CODE (XEXP (x, 0)) == ASHIFT
12707 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
12708 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
12711 log = INTVAL (XEXP (XEXP (x, 0), 1));
12712 XEXP (x, 0) = gen_rtx_MULT (Pmode,
12713 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
12714 GEN_INT (1 << log));
12717 if (GET_CODE (XEXP (x, 1)) == ASHIFT
12718 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
12719 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
12722 log = INTVAL (XEXP (XEXP (x, 1), 1));
12723 XEXP (x, 1) = gen_rtx_MULT (Pmode,
12724 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
12725 GEN_INT (1 << log));
12728 /* Put multiply first if it isn't already. */
12729 if (GET_CODE (XEXP (x, 1)) == MULT)
12731 rtx tmp = XEXP (x, 0);
12732 XEXP (x, 0) = XEXP (x, 1);
12737 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
12738 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
12739 created by virtual register instantiation, register elimination, and
12740 similar optimizations. */
12741 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
12744 x = gen_rtx_PLUS (Pmode,
12745 gen_rtx_PLUS (Pmode, XEXP (x, 0),
12746 XEXP (XEXP (x, 1), 0)),
12747 XEXP (XEXP (x, 1), 1));
12751 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
12752 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
12753 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
12754 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
12755 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
12756 && CONSTANT_P (XEXP (x, 1)))
12759 rtx other = NULL_RTX;
12761 if (CONST_INT_P (XEXP (x, 1)))
12763 constant = XEXP (x, 1);
12764 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
12766 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
12768 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
12769 other = XEXP (x, 1);
12777 x = gen_rtx_PLUS (Pmode,
12778 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
12779 XEXP (XEXP (XEXP (x, 0), 1), 0)),
12780 plus_constant (other, INTVAL (constant)));
12784 if (changed && ix86_legitimate_address_p (mode, x, false))
12787 if (GET_CODE (XEXP (x, 0)) == MULT)
12790 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
12793 if (GET_CODE (XEXP (x, 1)) == MULT)
12796 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
12800 && REG_P (XEXP (x, 1))
12801 && REG_P (XEXP (x, 0)))
12804 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
12807 x = legitimize_pic_address (x, 0);
12810 if (changed && ix86_legitimate_address_p (mode, x, false))
12813 if (REG_P (XEXP (x, 0)))
12815 rtx temp = gen_reg_rtx (Pmode);
12816 rtx val = force_operand (XEXP (x, 1), temp);
12818 emit_move_insn (temp, val);
12820 XEXP (x, 1) = temp;
12824 else if (REG_P (XEXP (x, 1)))
12826 rtx temp = gen_reg_rtx (Pmode);
12827 rtx val = force_operand (XEXP (x, 0), temp);
12829 emit_move_insn (temp, val);
12831 XEXP (x, 0) = temp;
12839 /* Print an integer constant expression in assembler syntax. Addition
12840 and subtraction are the only arithmetic that may appear in these
12841 expressions. FILE is the stdio stream to write to, X is the rtx, and
12842 CODE is the operand print code from the output string. */
12845 output_pic_addr_const (FILE *file, rtx x, int code)
12849 switch (GET_CODE (x))
12852 gcc_assert (flag_pic);
12857 if (TARGET_64BIT || ! TARGET_MACHO_BRANCH_ISLANDS)
12858 output_addr_const (file, x);
12861 const char *name = XSTR (x, 0);
12863 /* Mark the decl as referenced so that cgraph will
12864 output the function. */
12865 if (SYMBOL_REF_DECL (x))
12866 mark_decl_referenced (SYMBOL_REF_DECL (x));
12869 if (MACHOPIC_INDIRECT
12870 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
12871 name = machopic_indirection_name (x, /*stub_p=*/true);
12873 assemble_name (file, name);
12875 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
12876 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
12877 fputs ("@PLT", file);
12884 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
12885 assemble_name (asm_out_file, buf);
12889 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
12893 /* This used to output parentheses around the expression,
12894 but that does not work on the 386 (either ATT or BSD assembler). */
12895 output_pic_addr_const (file, XEXP (x, 0), code);
12899 if (GET_MODE (x) == VOIDmode)
12901 /* We can use %d if the number is <32 bits and positive. */
12902 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
12903 fprintf (file, "0x%lx%08lx",
12904 (unsigned long) CONST_DOUBLE_HIGH (x),
12905 (unsigned long) CONST_DOUBLE_LOW (x));
12907 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
12910 /* We can't handle floating point constants;
12911 TARGET_PRINT_OPERAND must handle them. */
12912 output_operand_lossage ("floating constant misused");
12916 /* Some assemblers need integer constants to appear first. */
12917 if (CONST_INT_P (XEXP (x, 0)))
12919 output_pic_addr_const (file, XEXP (x, 0), code);
12921 output_pic_addr_const (file, XEXP (x, 1), code);
12925 gcc_assert (CONST_INT_P (XEXP (x, 1)));
12926 output_pic_addr_const (file, XEXP (x, 1), code);
12928 output_pic_addr_const (file, XEXP (x, 0), code);
12934 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
12935 output_pic_addr_const (file, XEXP (x, 0), code);
12937 output_pic_addr_const (file, XEXP (x, 1), code);
12939 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
12943 if (XINT (x, 1) == UNSPEC_STACK_CHECK)
12945 bool f = i386_asm_output_addr_const_extra (file, x);
12950 gcc_assert (XVECLEN (x, 0) == 1);
12951 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
12952 switch (XINT (x, 1))
12955 fputs ("@GOT", file);
12957 case UNSPEC_GOTOFF:
12958 fputs ("@GOTOFF", file);
12960 case UNSPEC_PLTOFF:
12961 fputs ("@PLTOFF", file);
12964 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12965 "(%rip)" : "[rip]", file);
12967 case UNSPEC_GOTPCREL:
12968 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12969 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
12971 case UNSPEC_GOTTPOFF:
12972 /* FIXME: This might be @TPOFF in Sun ld too. */
12973 fputs ("@gottpoff", file);
12976 fputs ("@tpoff", file);
12978 case UNSPEC_NTPOFF:
12980 fputs ("@tpoff", file);
12982 fputs ("@ntpoff", file);
12984 case UNSPEC_DTPOFF:
12985 fputs ("@dtpoff", file);
12987 case UNSPEC_GOTNTPOFF:
12989 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12990 "@gottpoff(%rip)": "@gottpoff[rip]", file);
12992 fputs ("@gotntpoff", file);
12994 case UNSPEC_INDNTPOFF:
12995 fputs ("@indntpoff", file);
12998 case UNSPEC_MACHOPIC_OFFSET:
13000 machopic_output_function_base_name (file);
13004 output_operand_lossage ("invalid UNSPEC as operand");
13010 output_operand_lossage ("invalid expression as operand");
13014 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
13015 We need to emit DTP-relative relocations. */
13017 static void ATTRIBUTE_UNUSED
13018 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
13020 fputs (ASM_LONG, file);
13021 output_addr_const (file, x);
13022 fputs ("@dtpoff", file);
13028 fputs (", 0", file);
13031 gcc_unreachable ();
13035 /* Return true if X is a representation of the PIC register. This copes
13036 with calls from ix86_find_base_term, where the register might have
13037 been replaced by a cselib value. */
13040 ix86_pic_register_p (rtx x)
13042 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
13043 return (pic_offset_table_rtx
13044 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
13046 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
13049 /* Helper function for ix86_delegitimize_address.
13050 Attempt to delegitimize TLS local-exec accesses. */
13053 ix86_delegitimize_tls_address (rtx orig_x)
13055 rtx x = orig_x, unspec;
13056 struct ix86_address addr;
13058 if (!TARGET_TLS_DIRECT_SEG_REFS)
13062 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
13064 if (ix86_decompose_address (x, &addr) == 0
13065 || addr.seg != (TARGET_64BIT ? SEG_FS : SEG_GS)
13066 || addr.disp == NULL_RTX
13067 || GET_CODE (addr.disp) != CONST)
13069 unspec = XEXP (addr.disp, 0);
13070 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
13071 unspec = XEXP (unspec, 0);
13072 if (GET_CODE (unspec) != UNSPEC || XINT (unspec, 1) != UNSPEC_NTPOFF)
13074 x = XVECEXP (unspec, 0, 0);
13075 gcc_assert (GET_CODE (x) == SYMBOL_REF);
13076 if (unspec != XEXP (addr.disp, 0))
13077 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.disp, 0), 1));
13080 rtx idx = addr.index;
13081 if (addr.scale != 1)
13082 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
13083 x = gen_rtx_PLUS (Pmode, idx, x);
13086 x = gen_rtx_PLUS (Pmode, addr.base, x);
13087 if (MEM_P (orig_x))
13088 x = replace_equiv_address_nv (orig_x, x);
13092 /* In the name of slightly smaller debug output, and to cater to
13093 general assembler lossage, recognize PIC+GOTOFF and turn it back
13094 into a direct symbol reference.
13096 On Darwin, this is necessary to avoid a crash, because Darwin
13097 has a different PIC label for each routine but the DWARF debugging
13098 information is not associated with any particular routine, so it's
13099 necessary to remove references to the PIC label from RTL stored by
13100 the DWARF output code. */
13103 ix86_delegitimize_address (rtx x)
13105 rtx orig_x = delegitimize_mem_from_attrs (x);
13106 /* addend is NULL or some rtx if x is something+GOTOFF where
13107 something doesn't include the PIC register. */
13108 rtx addend = NULL_RTX;
13109 /* reg_addend is NULL or a multiple of some register. */
13110 rtx reg_addend = NULL_RTX;
13111 /* const_addend is NULL or a const_int. */
13112 rtx const_addend = NULL_RTX;
13113 /* This is the result, or NULL. */
13114 rtx result = NULL_RTX;
13123 if (GET_CODE (x) != CONST
13124 || GET_CODE (XEXP (x, 0)) != UNSPEC
13125 || (XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
13126 && XINT (XEXP (x, 0), 1) != UNSPEC_PCREL)
13127 || !MEM_P (orig_x))
13128 return ix86_delegitimize_tls_address (orig_x);
13129 x = XVECEXP (XEXP (x, 0), 0, 0);
13130 if (GET_MODE (orig_x) != Pmode)
13131 return simplify_gen_subreg (GET_MODE (orig_x), x, Pmode, 0);
13135 if (GET_CODE (x) != PLUS
13136 || GET_CODE (XEXP (x, 1)) != CONST)
13137 return ix86_delegitimize_tls_address (orig_x);
13139 if (ix86_pic_register_p (XEXP (x, 0)))
13140 /* %ebx + GOT/GOTOFF */
13142 else if (GET_CODE (XEXP (x, 0)) == PLUS)
13144 /* %ebx + %reg * scale + GOT/GOTOFF */
13145 reg_addend = XEXP (x, 0);
13146 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
13147 reg_addend = XEXP (reg_addend, 1);
13148 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
13149 reg_addend = XEXP (reg_addend, 0);
13152 reg_addend = NULL_RTX;
13153 addend = XEXP (x, 0);
13157 addend = XEXP (x, 0);
13159 x = XEXP (XEXP (x, 1), 0);
13160 if (GET_CODE (x) == PLUS
13161 && CONST_INT_P (XEXP (x, 1)))
13163 const_addend = XEXP (x, 1);
13167 if (GET_CODE (x) == UNSPEC
13168 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
13169 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
13170 result = XVECEXP (x, 0, 0);
13172 if (TARGET_MACHO && darwin_local_data_pic (x)
13173 && !MEM_P (orig_x))
13174 result = XVECEXP (x, 0, 0);
13177 return ix86_delegitimize_tls_address (orig_x);
13180 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
13182 result = gen_rtx_PLUS (Pmode, reg_addend, result);
13185 /* If the rest of original X doesn't involve the PIC register, add
13186 addend and subtract pic_offset_table_rtx. This can happen e.g.
13188 leal (%ebx, %ecx, 4), %ecx
13190 movl foo@GOTOFF(%ecx), %edx
13191 in which case we return (%ecx - %ebx) + foo. */
13192 if (pic_offset_table_rtx)
13193 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
13194 pic_offset_table_rtx),
13199 if (GET_MODE (orig_x) != Pmode && MEM_P (orig_x))
13200 return simplify_gen_subreg (GET_MODE (orig_x), result, Pmode, 0);
13204 /* If X is a machine specific address (i.e. a symbol or label being
13205 referenced as a displacement from the GOT implemented using an
13206 UNSPEC), then return the base term. Otherwise return X. */
13209 ix86_find_base_term (rtx x)
13215 if (GET_CODE (x) != CONST)
13217 term = XEXP (x, 0);
13218 if (GET_CODE (term) == PLUS
13219 && (CONST_INT_P (XEXP (term, 1))
13220 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
13221 term = XEXP (term, 0);
13222 if (GET_CODE (term) != UNSPEC
13223 || (XINT (term, 1) != UNSPEC_GOTPCREL
13224 && XINT (term, 1) != UNSPEC_PCREL))
13227 return XVECEXP (term, 0, 0);
13230 return ix86_delegitimize_address (x);
13234 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
13235 int fp, FILE *file)
13237 const char *suffix;
13239 if (mode == CCFPmode || mode == CCFPUmode)
13241 code = ix86_fp_compare_code_to_integer (code);
13245 code = reverse_condition (code);
13296 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
13300 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
13301 Those same assemblers have the same but opposite lossage on cmov. */
13302 if (mode == CCmode)
13303 suffix = fp ? "nbe" : "a";
13304 else if (mode == CCCmode)
13307 gcc_unreachable ();
13323 gcc_unreachable ();
13327 gcc_assert (mode == CCmode || mode == CCCmode);
13344 gcc_unreachable ();
13348 /* ??? As above. */
13349 gcc_assert (mode == CCmode || mode == CCCmode);
13350 suffix = fp ? "nb" : "ae";
13353 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
13357 /* ??? As above. */
13358 if (mode == CCmode)
13360 else if (mode == CCCmode)
13361 suffix = fp ? "nb" : "ae";
13363 gcc_unreachable ();
13366 suffix = fp ? "u" : "p";
13369 suffix = fp ? "nu" : "np";
13372 gcc_unreachable ();
13374 fputs (suffix, file);
13377 /* Print the name of register X to FILE based on its machine mode and number.
13378 If CODE is 'w', pretend the mode is HImode.
13379 If CODE is 'b', pretend the mode is QImode.
13380 If CODE is 'k', pretend the mode is SImode.
13381 If CODE is 'q', pretend the mode is DImode.
13382 If CODE is 'x', pretend the mode is V4SFmode.
13383 If CODE is 't', pretend the mode is V8SFmode.
13384 If CODE is 'h', pretend the reg is the 'high' byte register.
13385 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
13386 If CODE is 'd', duplicate the operand for AVX instruction.
13390 print_reg (rtx x, int code, FILE *file)
13393 bool duplicated = code == 'd' && TARGET_AVX;
13395 gcc_assert (x == pc_rtx
13396 || (REGNO (x) != ARG_POINTER_REGNUM
13397 && REGNO (x) != FRAME_POINTER_REGNUM
13398 && REGNO (x) != FLAGS_REG
13399 && REGNO (x) != FPSR_REG
13400 && REGNO (x) != FPCR_REG));
13402 if (ASSEMBLER_DIALECT == ASM_ATT)
13407 gcc_assert (TARGET_64BIT);
13408 fputs ("rip", file);
13412 if (code == 'w' || MMX_REG_P (x))
13414 else if (code == 'b')
13416 else if (code == 'k')
13418 else if (code == 'q')
13420 else if (code == 'y')
13422 else if (code == 'h')
13424 else if (code == 'x')
13426 else if (code == 't')
13429 code = GET_MODE_SIZE (GET_MODE (x));
13431 /* Irritatingly, AMD extended registers use different naming convention
13432 from the normal registers. */
13433 if (REX_INT_REG_P (x))
13435 gcc_assert (TARGET_64BIT);
13439 error ("extended registers have no high halves");
13442 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
13445 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
13448 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
13451 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
13454 error ("unsupported operand size for extended register");
13464 if (STACK_TOP_P (x))
13473 if (! ANY_FP_REG_P (x))
13474 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
13479 reg = hi_reg_name[REGNO (x)];
13482 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
13484 reg = qi_reg_name[REGNO (x)];
13487 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
13489 reg = qi_high_reg_name[REGNO (x)];
13494 gcc_assert (!duplicated);
13496 fputs (hi_reg_name[REGNO (x)] + 1, file);
13501 gcc_unreachable ();
13507 if (ASSEMBLER_DIALECT == ASM_ATT)
13508 fprintf (file, ", %%%s", reg);
13510 fprintf (file, ", %s", reg);
13514 /* Locate some local-dynamic symbol still in use by this function
13515 so that we can print its name in some tls_local_dynamic_base
13519 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
13523 if (GET_CODE (x) == SYMBOL_REF
13524 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
13526 cfun->machine->some_ld_name = XSTR (x, 0);
13533 static const char *
13534 get_some_local_dynamic_name (void)
13538 if (cfun->machine->some_ld_name)
13539 return cfun->machine->some_ld_name;
13541 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
13542 if (NONDEBUG_INSN_P (insn)
13543 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
13544 return cfun->machine->some_ld_name;
13549 /* Meaning of CODE:
13550 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
13551 C -- print opcode suffix for set/cmov insn.
13552 c -- like C, but print reversed condition
13553 F,f -- likewise, but for floating-point.
13554 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
13556 R -- print the prefix for register names.
13557 z -- print the opcode suffix for the size of the current operand.
13558 Z -- likewise, with special suffixes for x87 instructions.
13559 * -- print a star (in certain assembler syntax)
13560 A -- print an absolute memory reference.
13561 w -- print the operand as if it's a "word" (HImode) even if it isn't.
13562 s -- print a shift double count, followed by the assemblers argument
13564 b -- print the QImode name of the register for the indicated operand.
13565 %b0 would print %al if operands[0] is reg 0.
13566 w -- likewise, print the HImode name of the register.
13567 k -- likewise, print the SImode name of the register.
13568 q -- likewise, print the DImode name of the register.
13569 x -- likewise, print the V4SFmode name of the register.
13570 t -- likewise, print the V8SFmode name of the register.
13571 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
13572 y -- print "st(0)" instead of "st" as a register.
13573 d -- print duplicated register operand for AVX instruction.
13574 D -- print condition for SSE cmp instruction.
13575 P -- if PIC, print an @PLT suffix.
13576 X -- don't print any sort of PIC '@' suffix for a symbol.
13577 & -- print some in-use local-dynamic symbol name.
13578 H -- print a memory address offset by 8; used for sse high-parts
13579 Y -- print condition for XOP pcom* instruction.
13580 + -- print a branch hint as 'cs' or 'ds' prefix
13581 ; -- print a semicolon (after prefixes due to bug in older gas).
13582 @ -- print a segment register of thread base pointer load
13586 ix86_print_operand (FILE *file, rtx x, int code)
13593 if (ASSEMBLER_DIALECT == ASM_ATT)
13599 const char *name = get_some_local_dynamic_name ();
13601 output_operand_lossage ("'%%&' used without any "
13602 "local dynamic TLS references");
13604 assemble_name (file, name);
13609 switch (ASSEMBLER_DIALECT)
13616 /* Intel syntax. For absolute addresses, registers should not
13617 be surrounded by braces. */
13621 ix86_print_operand (file, x, 0);
13628 gcc_unreachable ();
13631 ix86_print_operand (file, x, 0);
13636 if (ASSEMBLER_DIALECT == ASM_ATT)
13641 if (ASSEMBLER_DIALECT == ASM_ATT)
13646 if (ASSEMBLER_DIALECT == ASM_ATT)
13651 if (ASSEMBLER_DIALECT == ASM_ATT)
13656 if (ASSEMBLER_DIALECT == ASM_ATT)
13661 if (ASSEMBLER_DIALECT == ASM_ATT)
13666 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
13668 /* Opcodes don't get size suffixes if using Intel opcodes. */
13669 if (ASSEMBLER_DIALECT == ASM_INTEL)
13672 switch (GET_MODE_SIZE (GET_MODE (x)))
13691 output_operand_lossage
13692 ("invalid operand size for operand code '%c'", code);
13697 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
13699 (0, "non-integer operand used with operand code '%c'", code);
13703 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
13704 if (ASSEMBLER_DIALECT == ASM_INTEL)
13707 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
13709 switch (GET_MODE_SIZE (GET_MODE (x)))
13712 #ifdef HAVE_AS_IX86_FILDS
13722 #ifdef HAVE_AS_IX86_FILDQ
13725 fputs ("ll", file);
13733 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
13735 /* 387 opcodes don't get size suffixes
13736 if the operands are registers. */
13737 if (STACK_REG_P (x))
13740 switch (GET_MODE_SIZE (GET_MODE (x)))
13761 output_operand_lossage
13762 ("invalid operand type used with operand code '%c'", code);
13766 output_operand_lossage
13767 ("invalid operand size for operand code '%c'", code);
13784 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
13786 ix86_print_operand (file, x, 0);
13787 fputs (", ", file);
13792 /* Little bit of braindamage here. The SSE compare instructions
13793 does use completely different names for the comparisons that the
13794 fp conditional moves. */
13797 switch (GET_CODE (x))
13800 fputs ("eq", file);
13803 fputs ("eq_us", file);
13806 fputs ("lt", file);
13809 fputs ("nge", file);
13812 fputs ("le", file);
13815 fputs ("ngt", file);
13818 fputs ("unord", file);
13821 fputs ("neq", file);
13824 fputs ("neq_oq", file);
13827 fputs ("ge", file);
13830 fputs ("nlt", file);
13833 fputs ("gt", file);
13836 fputs ("nle", file);
13839 fputs ("ord", file);
13842 output_operand_lossage ("operand is not a condition code, "
13843 "invalid operand code 'D'");
13849 switch (GET_CODE (x))
13853 fputs ("eq", file);
13857 fputs ("lt", file);
13861 fputs ("le", file);
13864 fputs ("unord", file);
13868 fputs ("neq", file);
13872 fputs ("nlt", file);
13876 fputs ("nle", file);
13879 fputs ("ord", file);
13882 output_operand_lossage ("operand is not a condition code, "
13883 "invalid operand code 'D'");
13889 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
13890 if (ASSEMBLER_DIALECT == ASM_ATT)
13892 switch (GET_MODE (x))
13894 case HImode: putc ('w', file); break;
13896 case SFmode: putc ('l', file); break;
13898 case DFmode: putc ('q', file); break;
13899 default: gcc_unreachable ();
13906 if (!COMPARISON_P (x))
13908 output_operand_lossage ("operand is neither a constant nor a "
13909 "condition code, invalid operand code "
13913 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
13916 if (!COMPARISON_P (x))
13918 output_operand_lossage ("operand is neither a constant nor a "
13919 "condition code, invalid operand code "
13923 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
13924 if (ASSEMBLER_DIALECT == ASM_ATT)
13927 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
13930 /* Like above, but reverse condition */
13932 /* Check to see if argument to %c is really a constant
13933 and not a condition code which needs to be reversed. */
13934 if (!COMPARISON_P (x))
13936 output_operand_lossage ("operand is neither a constant nor a "
13937 "condition code, invalid operand "
13941 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
13944 if (!COMPARISON_P (x))
13946 output_operand_lossage ("operand is neither a constant nor a "
13947 "condition code, invalid operand "
13951 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
13952 if (ASSEMBLER_DIALECT == ASM_ATT)
13955 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
13959 /* It doesn't actually matter what mode we use here, as we're
13960 only going to use this for printing. */
13961 x = adjust_address_nv (x, DImode, 8);
13969 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
13972 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
13975 int pred_val = INTVAL (XEXP (x, 0));
13977 if (pred_val < REG_BR_PROB_BASE * 45 / 100
13978 || pred_val > REG_BR_PROB_BASE * 55 / 100)
13980 int taken = pred_val > REG_BR_PROB_BASE / 2;
13981 int cputaken = final_forward_branch_p (current_output_insn) == 0;
13983 /* Emit hints only in the case default branch prediction
13984 heuristics would fail. */
13985 if (taken != cputaken)
13987 /* We use 3e (DS) prefix for taken branches and
13988 2e (CS) prefix for not taken branches. */
13990 fputs ("ds ; ", file);
13992 fputs ("cs ; ", file);
14000 switch (GET_CODE (x))
14003 fputs ("neq", file);
14006 fputs ("eq", file);
14010 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
14014 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
14018 fputs ("le", file);
14022 fputs ("lt", file);
14025 fputs ("unord", file);
14028 fputs ("ord", file);
14031 fputs ("ueq", file);
14034 fputs ("nlt", file);
14037 fputs ("nle", file);
14040 fputs ("ule", file);
14043 fputs ("ult", file);
14046 fputs ("une", file);
14049 output_operand_lossage ("operand is not a condition code, "
14050 "invalid operand code 'Y'");
14056 #ifndef HAVE_AS_IX86_REP_LOCK_PREFIX
14062 if (ASSEMBLER_DIALECT == ASM_ATT)
14065 /* The kernel uses a different segment register for performance
14066 reasons; a system call would not have to trash the userspace
14067 segment register, which would be expensive. */
14068 if (TARGET_64BIT && ix86_cmodel != CM_KERNEL)
14069 fputs ("fs", file);
14071 fputs ("gs", file);
14075 output_operand_lossage ("invalid operand code '%c'", code);
14080 print_reg (x, code, file);
14082 else if (MEM_P (x))
14084 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
14085 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
14086 && GET_MODE (x) != BLKmode)
14089 switch (GET_MODE_SIZE (GET_MODE (x)))
14091 case 1: size = "BYTE"; break;
14092 case 2: size = "WORD"; break;
14093 case 4: size = "DWORD"; break;
14094 case 8: size = "QWORD"; break;
14095 case 12: size = "TBYTE"; break;
14097 if (GET_MODE (x) == XFmode)
14102 case 32: size = "YMMWORD"; break;
14104 gcc_unreachable ();
14107 /* Check for explicit size override (codes 'b', 'w' and 'k') */
14110 else if (code == 'w')
14112 else if (code == 'k')
14115 fputs (size, file);
14116 fputs (" PTR ", file);
14120 /* Avoid (%rip) for call operands. */
14121 if (CONSTANT_ADDRESS_P (x) && code == 'P'
14122 && !CONST_INT_P (x))
14123 output_addr_const (file, x);
14124 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
14125 output_operand_lossage ("invalid constraints for operand");
14127 output_address (x);
14130 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
14135 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
14136 REAL_VALUE_TO_TARGET_SINGLE (r, l);
14138 if (ASSEMBLER_DIALECT == ASM_ATT)
14140 /* Sign extend 32bit SFmode immediate to 8 bytes. */
14142 fprintf (file, "0x%08llx", (unsigned long long) (int) l);
14144 fprintf (file, "0x%08x", (unsigned int) l);
14147 /* These float cases don't actually occur as immediate operands. */
14148 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
14152 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
14153 fputs (dstr, file);
14156 else if (GET_CODE (x) == CONST_DOUBLE
14157 && GET_MODE (x) == XFmode)
14161 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
14162 fputs (dstr, file);
14167 /* We have patterns that allow zero sets of memory, for instance.
14168 In 64-bit mode, we should probably support all 8-byte vectors,
14169 since we can in fact encode that into an immediate. */
14170 if (GET_CODE (x) == CONST_VECTOR)
14172 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
14178 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
14180 if (ASSEMBLER_DIALECT == ASM_ATT)
14183 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
14184 || GET_CODE (x) == LABEL_REF)
14186 if (ASSEMBLER_DIALECT == ASM_ATT)
14189 fputs ("OFFSET FLAT:", file);
14192 if (CONST_INT_P (x))
14193 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
14194 else if (flag_pic || MACHOPIC_INDIRECT)
14195 output_pic_addr_const (file, x, code);
14197 output_addr_const (file, x);
14202 ix86_print_operand_punct_valid_p (unsigned char code)
14204 return (code == '@' || code == '*' || code == '+'
14205 || code == '&' || code == ';');
14208 /* Print a memory operand whose address is ADDR. */
14211 ix86_print_operand_address (FILE *file, rtx addr)
14213 struct ix86_address parts;
14214 rtx base, index, disp;
14216 int ok = ix86_decompose_address (addr, &parts);
14221 index = parts.index;
14223 scale = parts.scale;
14231 if (ASSEMBLER_DIALECT == ASM_ATT)
14233 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
14236 gcc_unreachable ();
14239 /* Use one byte shorter RIP relative addressing for 64bit mode. */
14240 if (TARGET_64BIT && !base && !index)
14244 if (GET_CODE (disp) == CONST
14245 && GET_CODE (XEXP (disp, 0)) == PLUS
14246 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
14247 symbol = XEXP (XEXP (disp, 0), 0);
14249 if (GET_CODE (symbol) == LABEL_REF
14250 || (GET_CODE (symbol) == SYMBOL_REF
14251 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
14254 if (!base && !index)
14256 /* Displacement only requires special attention. */
14258 if (CONST_INT_P (disp))
14260 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
14261 fputs ("ds:", file);
14262 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
14265 output_pic_addr_const (file, disp, 0);
14267 output_addr_const (file, disp);
14271 if (ASSEMBLER_DIALECT == ASM_ATT)
14276 output_pic_addr_const (file, disp, 0);
14277 else if (GET_CODE (disp) == LABEL_REF)
14278 output_asm_label (disp);
14280 output_addr_const (file, disp);
14285 print_reg (base, 0, file);
14289 print_reg (index, 0, file);
14291 fprintf (file, ",%d", scale);
14297 rtx offset = NULL_RTX;
14301 /* Pull out the offset of a symbol; print any symbol itself. */
14302 if (GET_CODE (disp) == CONST
14303 && GET_CODE (XEXP (disp, 0)) == PLUS
14304 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
14306 offset = XEXP (XEXP (disp, 0), 1);
14307 disp = gen_rtx_CONST (VOIDmode,
14308 XEXP (XEXP (disp, 0), 0));
14312 output_pic_addr_const (file, disp, 0);
14313 else if (GET_CODE (disp) == LABEL_REF)
14314 output_asm_label (disp);
14315 else if (CONST_INT_P (disp))
14318 output_addr_const (file, disp);
14324 print_reg (base, 0, file);
14327 if (INTVAL (offset) >= 0)
14329 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
14333 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
14340 print_reg (index, 0, file);
14342 fprintf (file, "*%d", scale);
14349 /* Implementation of TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
14352 i386_asm_output_addr_const_extra (FILE *file, rtx x)
14356 if (GET_CODE (x) != UNSPEC)
14359 op = XVECEXP (x, 0, 0);
14360 switch (XINT (x, 1))
14362 case UNSPEC_GOTTPOFF:
14363 output_addr_const (file, op);
14364 /* FIXME: This might be @TPOFF in Sun ld. */
14365 fputs ("@gottpoff", file);
14368 output_addr_const (file, op);
14369 fputs ("@tpoff", file);
14371 case UNSPEC_NTPOFF:
14372 output_addr_const (file, op);
14374 fputs ("@tpoff", file);
14376 fputs ("@ntpoff", file);
14378 case UNSPEC_DTPOFF:
14379 output_addr_const (file, op);
14380 fputs ("@dtpoff", file);
14382 case UNSPEC_GOTNTPOFF:
14383 output_addr_const (file, op);
14385 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
14386 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
14388 fputs ("@gotntpoff", file);
14390 case UNSPEC_INDNTPOFF:
14391 output_addr_const (file, op);
14392 fputs ("@indntpoff", file);
14395 case UNSPEC_MACHOPIC_OFFSET:
14396 output_addr_const (file, op);
14398 machopic_output_function_base_name (file);
14402 case UNSPEC_STACK_CHECK:
14406 gcc_assert (flag_split_stack);
14408 #ifdef TARGET_THREAD_SPLIT_STACK_OFFSET
14409 offset = TARGET_THREAD_SPLIT_STACK_OFFSET;
14411 gcc_unreachable ();
14414 fprintf (file, "%s:%d", TARGET_64BIT ? "%fs" : "%gs", offset);
14425 /* Split one or more double-mode RTL references into pairs of half-mode
14426 references. The RTL can be REG, offsettable MEM, integer constant, or
14427 CONST_DOUBLE. "operands" is a pointer to an array of double-mode RTLs to
14428 split and "num" is its length. lo_half and hi_half are output arrays
14429 that parallel "operands". */
14432 split_double_mode (enum machine_mode mode, rtx operands[],
14433 int num, rtx lo_half[], rtx hi_half[])
14435 enum machine_mode half_mode;
14441 half_mode = DImode;
14444 half_mode = SImode;
14447 gcc_unreachable ();
14450 byte = GET_MODE_SIZE (half_mode);
14454 rtx op = operands[num];
14456 /* simplify_subreg refuse to split volatile memory addresses,
14457 but we still have to handle it. */
14460 lo_half[num] = adjust_address (op, half_mode, 0);
14461 hi_half[num] = adjust_address (op, half_mode, byte);
14465 lo_half[num] = simplify_gen_subreg (half_mode, op,
14466 GET_MODE (op) == VOIDmode
14467 ? mode : GET_MODE (op), 0);
14468 hi_half[num] = simplify_gen_subreg (half_mode, op,
14469 GET_MODE (op) == VOIDmode
14470 ? mode : GET_MODE (op), byte);
14475 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
14476 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
14477 is the expression of the binary operation. The output may either be
14478 emitted here, or returned to the caller, like all output_* functions.
14480 There is no guarantee that the operands are the same mode, as they
14481 might be within FLOAT or FLOAT_EXTEND expressions. */
14483 #ifndef SYSV386_COMPAT
14484 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
14485 wants to fix the assemblers because that causes incompatibility
14486 with gcc. No-one wants to fix gcc because that causes
14487 incompatibility with assemblers... You can use the option of
14488 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
14489 #define SYSV386_COMPAT 1
14493 output_387_binary_op (rtx insn, rtx *operands)
14495 static char buf[40];
14498 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
14500 #ifdef ENABLE_CHECKING
14501 /* Even if we do not want to check the inputs, this documents input
14502 constraints. Which helps in understanding the following code. */
14503 if (STACK_REG_P (operands[0])
14504 && ((REG_P (operands[1])
14505 && REGNO (operands[0]) == REGNO (operands[1])
14506 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
14507 || (REG_P (operands[2])
14508 && REGNO (operands[0]) == REGNO (operands[2])
14509 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
14510 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
14513 gcc_assert (is_sse);
14516 switch (GET_CODE (operands[3]))
14519 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14520 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14528 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14529 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14537 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14538 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14546 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14547 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14555 gcc_unreachable ();
14562 strcpy (buf, ssep);
14563 if (GET_MODE (operands[0]) == SFmode)
14564 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
14566 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
14570 strcpy (buf, ssep + 1);
14571 if (GET_MODE (operands[0]) == SFmode)
14572 strcat (buf, "ss\t{%2, %0|%0, %2}");
14574 strcat (buf, "sd\t{%2, %0|%0, %2}");
14580 switch (GET_CODE (operands[3]))
14584 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
14586 rtx temp = operands[2];
14587 operands[2] = operands[1];
14588 operands[1] = temp;
14591 /* know operands[0] == operands[1]. */
14593 if (MEM_P (operands[2]))
14599 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
14601 if (STACK_TOP_P (operands[0]))
14602 /* How is it that we are storing to a dead operand[2]?
14603 Well, presumably operands[1] is dead too. We can't
14604 store the result to st(0) as st(0) gets popped on this
14605 instruction. Instead store to operands[2] (which I
14606 think has to be st(1)). st(1) will be popped later.
14607 gcc <= 2.8.1 didn't have this check and generated
14608 assembly code that the Unixware assembler rejected. */
14609 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
14611 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
14615 if (STACK_TOP_P (operands[0]))
14616 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
14618 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
14623 if (MEM_P (operands[1]))
14629 if (MEM_P (operands[2]))
14635 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
14638 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
14639 derived assemblers, confusingly reverse the direction of
14640 the operation for fsub{r} and fdiv{r} when the
14641 destination register is not st(0). The Intel assembler
14642 doesn't have this brain damage. Read !SYSV386_COMPAT to
14643 figure out what the hardware really does. */
14644 if (STACK_TOP_P (operands[0]))
14645 p = "{p\t%0, %2|rp\t%2, %0}";
14647 p = "{rp\t%2, %0|p\t%0, %2}";
14649 if (STACK_TOP_P (operands[0]))
14650 /* As above for fmul/fadd, we can't store to st(0). */
14651 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
14653 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
14658 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
14661 if (STACK_TOP_P (operands[0]))
14662 p = "{rp\t%0, %1|p\t%1, %0}";
14664 p = "{p\t%1, %0|rp\t%0, %1}";
14666 if (STACK_TOP_P (operands[0]))
14667 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
14669 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
14674 if (STACK_TOP_P (operands[0]))
14676 if (STACK_TOP_P (operands[1]))
14677 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
14679 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
14682 else if (STACK_TOP_P (operands[1]))
14685 p = "{\t%1, %0|r\t%0, %1}";
14687 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
14693 p = "{r\t%2, %0|\t%0, %2}";
14695 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
14701 gcc_unreachable ();
14708 /* Return needed mode for entity in optimize_mode_switching pass. */
14711 ix86_mode_needed (int entity, rtx insn)
14713 enum attr_i387_cw mode;
14715 /* The mode UNINITIALIZED is used to store control word after a
14716 function call or ASM pattern. The mode ANY specify that function
14717 has no requirements on the control word and make no changes in the
14718 bits we are interested in. */
14721 || (NONJUMP_INSN_P (insn)
14722 && (asm_noperands (PATTERN (insn)) >= 0
14723 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
14724 return I387_CW_UNINITIALIZED;
14726 if (recog_memoized (insn) < 0)
14727 return I387_CW_ANY;
14729 mode = get_attr_i387_cw (insn);
14734 if (mode == I387_CW_TRUNC)
14739 if (mode == I387_CW_FLOOR)
14744 if (mode == I387_CW_CEIL)
14749 if (mode == I387_CW_MASK_PM)
14754 gcc_unreachable ();
14757 return I387_CW_ANY;
14760 /* Output code to initialize control word copies used by trunc?f?i and
14761 rounding patterns. CURRENT_MODE is set to current control word,
14762 while NEW_MODE is set to new control word. */
14765 emit_i387_cw_initialization (int mode)
14767 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
14770 enum ix86_stack_slot slot;
14772 rtx reg = gen_reg_rtx (HImode);
14774 emit_insn (gen_x86_fnstcw_1 (stored_mode));
14775 emit_move_insn (reg, copy_rtx (stored_mode));
14777 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
14778 || optimize_function_for_size_p (cfun))
14782 case I387_CW_TRUNC:
14783 /* round toward zero (truncate) */
14784 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
14785 slot = SLOT_CW_TRUNC;
14788 case I387_CW_FLOOR:
14789 /* round down toward -oo */
14790 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
14791 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
14792 slot = SLOT_CW_FLOOR;
14796 /* round up toward +oo */
14797 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
14798 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
14799 slot = SLOT_CW_CEIL;
14802 case I387_CW_MASK_PM:
14803 /* mask precision exception for nearbyint() */
14804 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
14805 slot = SLOT_CW_MASK_PM;
14809 gcc_unreachable ();
14816 case I387_CW_TRUNC:
14817 /* round toward zero (truncate) */
14818 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
14819 slot = SLOT_CW_TRUNC;
14822 case I387_CW_FLOOR:
14823 /* round down toward -oo */
14824 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
14825 slot = SLOT_CW_FLOOR;
14829 /* round up toward +oo */
14830 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
14831 slot = SLOT_CW_CEIL;
14834 case I387_CW_MASK_PM:
14835 /* mask precision exception for nearbyint() */
14836 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
14837 slot = SLOT_CW_MASK_PM;
14841 gcc_unreachable ();
14845 gcc_assert (slot < MAX_386_STACK_LOCALS);
14847 new_mode = assign_386_stack_local (HImode, slot);
14848 emit_move_insn (new_mode, reg);
14851 /* Output code for INSN to convert a float to a signed int. OPERANDS
14852 are the insn operands. The output may be [HSD]Imode and the input
14853 operand may be [SDX]Fmode. */
14856 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
14858 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
14859 int dimode_p = GET_MODE (operands[0]) == DImode;
14860 int round_mode = get_attr_i387_cw (insn);
14862 /* Jump through a hoop or two for DImode, since the hardware has no
14863 non-popping instruction. We used to do this a different way, but
14864 that was somewhat fragile and broke with post-reload splitters. */
14865 if ((dimode_p || fisttp) && !stack_top_dies)
14866 output_asm_insn ("fld\t%y1", operands);
14868 gcc_assert (STACK_TOP_P (operands[1]));
14869 gcc_assert (MEM_P (operands[0]));
14870 gcc_assert (GET_MODE (operands[1]) != TFmode);
14873 output_asm_insn ("fisttp%Z0\t%0", operands);
14876 if (round_mode != I387_CW_ANY)
14877 output_asm_insn ("fldcw\t%3", operands);
14878 if (stack_top_dies || dimode_p)
14879 output_asm_insn ("fistp%Z0\t%0", operands);
14881 output_asm_insn ("fist%Z0\t%0", operands);
14882 if (round_mode != I387_CW_ANY)
14883 output_asm_insn ("fldcw\t%2", operands);
14889 /* Output code for x87 ffreep insn. The OPNO argument, which may only
14890 have the values zero or one, indicates the ffreep insn's operand
14891 from the OPERANDS array. */
14893 static const char *
14894 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
14896 if (TARGET_USE_FFREEP)
14897 #ifdef HAVE_AS_IX86_FFREEP
14898 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
14901 static char retval[32];
14902 int regno = REGNO (operands[opno]);
14904 gcc_assert (FP_REGNO_P (regno));
14906 regno -= FIRST_STACK_REG;
14908 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
14913 return opno ? "fstp\t%y1" : "fstp\t%y0";
14917 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
14918 should be used. UNORDERED_P is true when fucom should be used. */
14921 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
14923 int stack_top_dies;
14924 rtx cmp_op0, cmp_op1;
14925 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
14929 cmp_op0 = operands[0];
14930 cmp_op1 = operands[1];
14934 cmp_op0 = operands[1];
14935 cmp_op1 = operands[2];
14940 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
14941 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
14942 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
14943 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
14945 if (GET_MODE (operands[0]) == SFmode)
14947 return &ucomiss[TARGET_AVX ? 0 : 1];
14949 return &comiss[TARGET_AVX ? 0 : 1];
14952 return &ucomisd[TARGET_AVX ? 0 : 1];
14954 return &comisd[TARGET_AVX ? 0 : 1];
14957 gcc_assert (STACK_TOP_P (cmp_op0));
14959 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
14961 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
14963 if (stack_top_dies)
14965 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
14966 return output_387_ffreep (operands, 1);
14969 return "ftst\n\tfnstsw\t%0";
14972 if (STACK_REG_P (cmp_op1)
14974 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
14975 && REGNO (cmp_op1) != FIRST_STACK_REG)
14977 /* If both the top of the 387 stack dies, and the other operand
14978 is also a stack register that dies, then this must be a
14979 `fcompp' float compare */
14983 /* There is no double popping fcomi variant. Fortunately,
14984 eflags is immune from the fstp's cc clobbering. */
14986 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
14988 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
14989 return output_387_ffreep (operands, 0);
14994 return "fucompp\n\tfnstsw\t%0";
14996 return "fcompp\n\tfnstsw\t%0";
15001 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
15003 static const char * const alt[16] =
15005 "fcom%Z2\t%y2\n\tfnstsw\t%0",
15006 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
15007 "fucom%Z2\t%y2\n\tfnstsw\t%0",
15008 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
15010 "ficom%Z2\t%y2\n\tfnstsw\t%0",
15011 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
15015 "fcomi\t{%y1, %0|%0, %y1}",
15016 "fcomip\t{%y1, %0|%0, %y1}",
15017 "fucomi\t{%y1, %0|%0, %y1}",
15018 "fucomip\t{%y1, %0|%0, %y1}",
15029 mask = eflags_p << 3;
15030 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
15031 mask |= unordered_p << 1;
15032 mask |= stack_top_dies;
15034 gcc_assert (mask < 16);
15043 ix86_output_addr_vec_elt (FILE *file, int value)
15045 const char *directive = ASM_LONG;
15049 directive = ASM_QUAD;
15051 gcc_assert (!TARGET_64BIT);
15054 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
15058 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
15060 const char *directive = ASM_LONG;
15063 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
15064 directive = ASM_QUAD;
15066 gcc_assert (!TARGET_64BIT);
15068 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
15069 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
15070 fprintf (file, "%s%s%d-%s%d\n",
15071 directive, LPREFIX, value, LPREFIX, rel);
15072 else if (HAVE_AS_GOTOFF_IN_DATA)
15073 fprintf (file, ASM_LONG "%s%d@GOTOFF\n", LPREFIX, value);
15075 else if (TARGET_MACHO)
15077 fprintf (file, ASM_LONG "%s%d-", LPREFIX, value);
15078 machopic_output_function_base_name (file);
15083 asm_fprintf (file, ASM_LONG "%U%s+[.-%s%d]\n",
15084 GOT_SYMBOL_NAME, LPREFIX, value);
15087 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
15091 ix86_expand_clear (rtx dest)
15095 /* We play register width games, which are only valid after reload. */
15096 gcc_assert (reload_completed);
15098 /* Avoid HImode and its attendant prefix byte. */
15099 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
15100 dest = gen_rtx_REG (SImode, REGNO (dest));
15101 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
15103 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
15104 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
15106 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
15107 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
15113 /* X is an unchanging MEM. If it is a constant pool reference, return
15114 the constant pool rtx, else NULL. */
15117 maybe_get_pool_constant (rtx x)
15119 x = ix86_delegitimize_address (XEXP (x, 0));
15121 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
15122 return get_pool_constant (x);
15128 ix86_expand_move (enum machine_mode mode, rtx operands[])
15131 enum tls_model model;
15133 if (VALID_AVX256_REG_MODE (mode))
15134 cfun->machine->use_avx256_p = true;
15139 if (GET_CODE (op1) == SYMBOL_REF)
15141 model = SYMBOL_REF_TLS_MODEL (op1);
15144 op1 = legitimize_tls_address (op1, model, true);
15145 op1 = force_operand (op1, op0);
15149 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
15150 && SYMBOL_REF_DLLIMPORT_P (op1))
15151 op1 = legitimize_dllimport_symbol (op1, false);
15153 else if (GET_CODE (op1) == CONST
15154 && GET_CODE (XEXP (op1, 0)) == PLUS
15155 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
15157 rtx addend = XEXP (XEXP (op1, 0), 1);
15158 rtx symbol = XEXP (XEXP (op1, 0), 0);
15161 model = SYMBOL_REF_TLS_MODEL (symbol);
15163 tmp = legitimize_tls_address (symbol, model, true);
15164 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
15165 && SYMBOL_REF_DLLIMPORT_P (symbol))
15166 tmp = legitimize_dllimport_symbol (symbol, true);
15170 tmp = force_operand (tmp, NULL);
15171 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
15172 op0, 1, OPTAB_DIRECT);
15178 if ((flag_pic || MACHOPIC_INDIRECT)
15179 && mode == Pmode && symbolic_operand (op1, Pmode))
15181 if (TARGET_MACHO && !TARGET_64BIT)
15184 /* dynamic-no-pic */
15185 if (MACHOPIC_INDIRECT)
15187 rtx temp = ((reload_in_progress
15188 || ((op0 && REG_P (op0))
15190 ? op0 : gen_reg_rtx (Pmode));
15191 op1 = machopic_indirect_data_reference (op1, temp);
15193 op1 = machopic_legitimize_pic_address (op1, mode,
15194 temp == op1 ? 0 : temp);
15196 if (op0 != op1 && GET_CODE (op0) != MEM)
15198 rtx insn = gen_rtx_SET (VOIDmode, op0, op1);
15202 if (GET_CODE (op0) == MEM)
15203 op1 = force_reg (Pmode, op1);
15207 if (GET_CODE (temp) != REG)
15208 temp = gen_reg_rtx (Pmode);
15209 temp = legitimize_pic_address (op1, temp);
15214 /* dynamic-no-pic */
15220 op1 = force_reg (Pmode, op1);
15221 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
15223 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
15224 op1 = legitimize_pic_address (op1, reg);
15233 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
15234 || !push_operand (op0, mode))
15236 op1 = force_reg (mode, op1);
15238 if (push_operand (op0, mode)
15239 && ! general_no_elim_operand (op1, mode))
15240 op1 = copy_to_mode_reg (mode, op1);
15242 /* Force large constants in 64bit compilation into register
15243 to get them CSEed. */
15244 if (can_create_pseudo_p ()
15245 && (mode == DImode) && TARGET_64BIT
15246 && immediate_operand (op1, mode)
15247 && !x86_64_zext_immediate_operand (op1, VOIDmode)
15248 && !register_operand (op0, mode)
15250 op1 = copy_to_mode_reg (mode, op1);
15252 if (can_create_pseudo_p ()
15253 && FLOAT_MODE_P (mode)
15254 && GET_CODE (op1) == CONST_DOUBLE)
15256 /* If we are loading a floating point constant to a register,
15257 force the value to memory now, since we'll get better code
15258 out the back end. */
15260 op1 = validize_mem (force_const_mem (mode, op1));
15261 if (!register_operand (op0, mode))
15263 rtx temp = gen_reg_rtx (mode);
15264 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
15265 emit_move_insn (op0, temp);
15271 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
15275 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
15277 rtx op0 = operands[0], op1 = operands[1];
15278 unsigned int align = GET_MODE_ALIGNMENT (mode);
15280 if (VALID_AVX256_REG_MODE (mode))
15281 cfun->machine->use_avx256_p = true;
15283 /* Force constants other than zero into memory. We do not know how
15284 the instructions used to build constants modify the upper 64 bits
15285 of the register, once we have that information we may be able
15286 to handle some of them more efficiently. */
15287 if (can_create_pseudo_p ()
15288 && register_operand (op0, mode)
15289 && (CONSTANT_P (op1)
15290 || (GET_CODE (op1) == SUBREG
15291 && CONSTANT_P (SUBREG_REG (op1))))
15292 && !standard_sse_constant_p (op1))
15293 op1 = validize_mem (force_const_mem (mode, op1));
15295 /* We need to check memory alignment for SSE mode since attribute
15296 can make operands unaligned. */
15297 if (can_create_pseudo_p ()
15298 && SSE_REG_MODE_P (mode)
15299 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
15300 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
15304 /* ix86_expand_vector_move_misalign() does not like constants ... */
15305 if (CONSTANT_P (op1)
15306 || (GET_CODE (op1) == SUBREG
15307 && CONSTANT_P (SUBREG_REG (op1))))
15308 op1 = validize_mem (force_const_mem (mode, op1));
15310 /* ... nor both arguments in memory. */
15311 if (!register_operand (op0, mode)
15312 && !register_operand (op1, mode))
15313 op1 = force_reg (mode, op1);
15315 tmp[0] = op0; tmp[1] = op1;
15316 ix86_expand_vector_move_misalign (mode, tmp);
15320 /* Make operand1 a register if it isn't already. */
15321 if (can_create_pseudo_p ()
15322 && !register_operand (op0, mode)
15323 && !register_operand (op1, mode))
15325 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
15329 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
15332 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
15333 straight to ix86_expand_vector_move. */
15334 /* Code generation for scalar reg-reg moves of single and double precision data:
15335 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
15339 if (x86_sse_partial_reg_dependency == true)
15344 Code generation for scalar loads of double precision data:
15345 if (x86_sse_split_regs == true)
15346 movlpd mem, reg (gas syntax)
15350 Code generation for unaligned packed loads of single precision data
15351 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
15352 if (x86_sse_unaligned_move_optimal)
15355 if (x86_sse_partial_reg_dependency == true)
15367 Code generation for unaligned packed loads of double precision data
15368 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
15369 if (x86_sse_unaligned_move_optimal)
15372 if (x86_sse_split_regs == true)
15385 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
15389 if (VALID_AVX256_REG_MODE (mode))
15390 cfun->machine->use_avx256_p = true;
15397 switch (GET_MODE_CLASS (mode))
15399 case MODE_VECTOR_INT:
15401 switch (GET_MODE_SIZE (mode))
15404 /* If we're optimizing for size, movups is the smallest. */
15405 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15407 op0 = gen_lowpart (V4SFmode, op0);
15408 op1 = gen_lowpart (V4SFmode, op1);
15409 emit_insn (gen_avx_movups (op0, op1));
15412 op0 = gen_lowpart (V16QImode, op0);
15413 op1 = gen_lowpart (V16QImode, op1);
15414 emit_insn (gen_avx_movdqu (op0, op1));
15417 op0 = gen_lowpart (V32QImode, op0);
15418 op1 = gen_lowpart (V32QImode, op1);
15419 emit_insn (gen_avx_movdqu256 (op0, op1));
15422 gcc_unreachable ();
15425 case MODE_VECTOR_FLOAT:
15426 op0 = gen_lowpart (mode, op0);
15427 op1 = gen_lowpart (mode, op1);
15432 emit_insn (gen_avx_movups (op0, op1));
15435 emit_insn (gen_avx_movups256 (op0, op1));
15438 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15440 op0 = gen_lowpart (V4SFmode, op0);
15441 op1 = gen_lowpart (V4SFmode, op1);
15442 emit_insn (gen_avx_movups (op0, op1));
15445 emit_insn (gen_avx_movupd (op0, op1));
15448 emit_insn (gen_avx_movupd256 (op0, op1));
15451 gcc_unreachable ();
15456 gcc_unreachable ();
15464 /* If we're optimizing for size, movups is the smallest. */
15465 if (optimize_insn_for_size_p ()
15466 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15468 op0 = gen_lowpart (V4SFmode, op0);
15469 op1 = gen_lowpart (V4SFmode, op1);
15470 emit_insn (gen_sse_movups (op0, op1));
15474 /* ??? If we have typed data, then it would appear that using
15475 movdqu is the only way to get unaligned data loaded with
15477 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
15479 op0 = gen_lowpart (V16QImode, op0);
15480 op1 = gen_lowpart (V16QImode, op1);
15481 emit_insn (gen_sse2_movdqu (op0, op1));
15485 if (TARGET_SSE2 && mode == V2DFmode)
15489 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
15491 op0 = gen_lowpart (V2DFmode, op0);
15492 op1 = gen_lowpart (V2DFmode, op1);
15493 emit_insn (gen_sse2_movupd (op0, op1));
15497 /* When SSE registers are split into halves, we can avoid
15498 writing to the top half twice. */
15499 if (TARGET_SSE_SPLIT_REGS)
15501 emit_clobber (op0);
15506 /* ??? Not sure about the best option for the Intel chips.
15507 The following would seem to satisfy; the register is
15508 entirely cleared, breaking the dependency chain. We
15509 then store to the upper half, with a dependency depth
15510 of one. A rumor has it that Intel recommends two movsd
15511 followed by an unpacklpd, but this is unconfirmed. And
15512 given that the dependency depth of the unpacklpd would
15513 still be one, I'm not sure why this would be better. */
15514 zero = CONST0_RTX (V2DFmode);
15517 m = adjust_address (op1, DFmode, 0);
15518 emit_insn (gen_sse2_loadlpd (op0, zero, m));
15519 m = adjust_address (op1, DFmode, 8);
15520 emit_insn (gen_sse2_loadhpd (op0, op0, m));
15524 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
15526 op0 = gen_lowpart (V4SFmode, op0);
15527 op1 = gen_lowpart (V4SFmode, op1);
15528 emit_insn (gen_sse_movups (op0, op1));
15532 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
15533 emit_move_insn (op0, CONST0_RTX (mode));
15535 emit_clobber (op0);
15537 if (mode != V4SFmode)
15538 op0 = gen_lowpart (V4SFmode, op0);
15539 m = adjust_address (op1, V2SFmode, 0);
15540 emit_insn (gen_sse_loadlps (op0, op0, m));
15541 m = adjust_address (op1, V2SFmode, 8);
15542 emit_insn (gen_sse_loadhps (op0, op0, m));
15545 else if (MEM_P (op0))
15547 /* If we're optimizing for size, movups is the smallest. */
15548 if (optimize_insn_for_size_p ()
15549 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15551 op0 = gen_lowpart (V4SFmode, op0);
15552 op1 = gen_lowpart (V4SFmode, op1);
15553 emit_insn (gen_sse_movups (op0, op1));
15557 /* ??? Similar to above, only less clear because of quote
15558 typeless stores unquote. */
15559 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
15560 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
15562 op0 = gen_lowpart (V16QImode, op0);
15563 op1 = gen_lowpart (V16QImode, op1);
15564 emit_insn (gen_sse2_movdqu (op0, op1));
15568 if (TARGET_SSE2 && mode == V2DFmode)
15570 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
15572 op0 = gen_lowpart (V2DFmode, op0);
15573 op1 = gen_lowpart (V2DFmode, op1);
15574 emit_insn (gen_sse2_movupd (op0, op1));
15578 m = adjust_address (op0, DFmode, 0);
15579 emit_insn (gen_sse2_storelpd (m, op1));
15580 m = adjust_address (op0, DFmode, 8);
15581 emit_insn (gen_sse2_storehpd (m, op1));
15586 if (mode != V4SFmode)
15587 op1 = gen_lowpart (V4SFmode, op1);
15589 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
15591 op0 = gen_lowpart (V4SFmode, op0);
15592 emit_insn (gen_sse_movups (op0, op1));
15596 m = adjust_address (op0, V2SFmode, 0);
15597 emit_insn (gen_sse_storelps (m, op1));
15598 m = adjust_address (op0, V2SFmode, 8);
15599 emit_insn (gen_sse_storehps (m, op1));
15604 gcc_unreachable ();
15607 /* Expand a push in MODE. This is some mode for which we do not support
15608 proper push instructions, at least from the registers that we expect
15609 the value to live in. */
15612 ix86_expand_push (enum machine_mode mode, rtx x)
15616 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
15617 GEN_INT (-GET_MODE_SIZE (mode)),
15618 stack_pointer_rtx, 1, OPTAB_DIRECT);
15619 if (tmp != stack_pointer_rtx)
15620 emit_move_insn (stack_pointer_rtx, tmp);
15622 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
15624 /* When we push an operand onto stack, it has to be aligned at least
15625 at the function argument boundary. However since we don't have
15626 the argument type, we can't determine the actual argument
15628 emit_move_insn (tmp, x);
15631 /* Helper function of ix86_fixup_binary_operands to canonicalize
15632 operand order. Returns true if the operands should be swapped. */
15635 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
15638 rtx dst = operands[0];
15639 rtx src1 = operands[1];
15640 rtx src2 = operands[2];
15642 /* If the operation is not commutative, we can't do anything. */
15643 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
15646 /* Highest priority is that src1 should match dst. */
15647 if (rtx_equal_p (dst, src1))
15649 if (rtx_equal_p (dst, src2))
15652 /* Next highest priority is that immediate constants come second. */
15653 if (immediate_operand (src2, mode))
15655 if (immediate_operand (src1, mode))
15658 /* Lowest priority is that memory references should come second. */
15668 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
15669 destination to use for the operation. If different from the true
15670 destination in operands[0], a copy operation will be required. */
15673 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
15676 rtx dst = operands[0];
15677 rtx src1 = operands[1];
15678 rtx src2 = operands[2];
15680 /* Canonicalize operand order. */
15681 if (ix86_swap_binary_operands_p (code, mode, operands))
15685 /* It is invalid to swap operands of different modes. */
15686 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
15693 /* Both source operands cannot be in memory. */
15694 if (MEM_P (src1) && MEM_P (src2))
15696 /* Optimization: Only read from memory once. */
15697 if (rtx_equal_p (src1, src2))
15699 src2 = force_reg (mode, src2);
15703 src2 = force_reg (mode, src2);
15706 /* If the destination is memory, and we do not have matching source
15707 operands, do things in registers. */
15708 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
15709 dst = gen_reg_rtx (mode);
15711 /* Source 1 cannot be a constant. */
15712 if (CONSTANT_P (src1))
15713 src1 = force_reg (mode, src1);
15715 /* Source 1 cannot be a non-matching memory. */
15716 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
15717 src1 = force_reg (mode, src1);
15719 operands[1] = src1;
15720 operands[2] = src2;
15724 /* Similarly, but assume that the destination has already been
15725 set up properly. */
15728 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
15729 enum machine_mode mode, rtx operands[])
15731 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
15732 gcc_assert (dst == operands[0]);
15735 /* Attempt to expand a binary operator. Make the expansion closer to the
15736 actual machine, then just general_operand, which will allow 3 separate
15737 memory references (one output, two input) in a single insn. */
15740 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
15743 rtx src1, src2, dst, op, clob;
15745 dst = ix86_fixup_binary_operands (code, mode, operands);
15746 src1 = operands[1];
15747 src2 = operands[2];
15749 /* Emit the instruction. */
15751 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
15752 if (reload_in_progress)
15754 /* Reload doesn't know about the flags register, and doesn't know that
15755 it doesn't want to clobber it. We can only do this with PLUS. */
15756 gcc_assert (code == PLUS);
15759 else if (reload_completed
15761 && !rtx_equal_p (dst, src1))
15763 /* This is going to be an LEA; avoid splitting it later. */
15768 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
15769 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
15772 /* Fix up the destination if needed. */
15773 if (dst != operands[0])
15774 emit_move_insn (operands[0], dst);
15777 /* Return TRUE or FALSE depending on whether the binary operator meets the
15778 appropriate constraints. */
15781 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
15784 rtx dst = operands[0];
15785 rtx src1 = operands[1];
15786 rtx src2 = operands[2];
15788 /* Both source operands cannot be in memory. */
15789 if (MEM_P (src1) && MEM_P (src2))
15792 /* Canonicalize operand order for commutative operators. */
15793 if (ix86_swap_binary_operands_p (code, mode, operands))
15800 /* If the destination is memory, we must have a matching source operand. */
15801 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
15804 /* Source 1 cannot be a constant. */
15805 if (CONSTANT_P (src1))
15808 /* Source 1 cannot be a non-matching memory. */
15809 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
15811 /* Support "andhi/andsi/anddi" as a zero-extending move. */
15812 return (code == AND
15815 || (TARGET_64BIT && mode == DImode))
15816 && CONST_INT_P (src2)
15817 && (INTVAL (src2) == 0xff
15818 || INTVAL (src2) == 0xffff));
15824 /* Attempt to expand a unary operator. Make the expansion closer to the
15825 actual machine, then just general_operand, which will allow 2 separate
15826 memory references (one output, one input) in a single insn. */
15829 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
15832 int matching_memory;
15833 rtx src, dst, op, clob;
15838 /* If the destination is memory, and we do not have matching source
15839 operands, do things in registers. */
15840 matching_memory = 0;
15843 if (rtx_equal_p (dst, src))
15844 matching_memory = 1;
15846 dst = gen_reg_rtx (mode);
15849 /* When source operand is memory, destination must match. */
15850 if (MEM_P (src) && !matching_memory)
15851 src = force_reg (mode, src);
15853 /* Emit the instruction. */
15855 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
15856 if (reload_in_progress || code == NOT)
15858 /* Reload doesn't know about the flags register, and doesn't know that
15859 it doesn't want to clobber it. */
15860 gcc_assert (code == NOT);
15865 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
15866 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
15869 /* Fix up the destination if needed. */
15870 if (dst != operands[0])
15871 emit_move_insn (operands[0], dst);
15874 /* Split 32bit/64bit divmod with 8bit unsigned divmod if dividend and
15875 divisor are within the the range [0-255]. */
15878 ix86_split_idivmod (enum machine_mode mode, rtx operands[],
15881 rtx end_label, qimode_label;
15882 rtx insn, div, mod;
15883 rtx scratch, tmp0, tmp1, tmp2;
15884 rtx (*gen_divmod4_1) (rtx, rtx, rtx, rtx);
15885 rtx (*gen_zero_extend) (rtx, rtx);
15886 rtx (*gen_test_ccno_1) (rtx, rtx);
15891 gen_divmod4_1 = signed_p ? gen_divmodsi4_1 : gen_udivmodsi4_1;
15892 gen_test_ccno_1 = gen_testsi_ccno_1;
15893 gen_zero_extend = gen_zero_extendqisi2;
15896 gen_divmod4_1 = signed_p ? gen_divmoddi4_1 : gen_udivmoddi4_1;
15897 gen_test_ccno_1 = gen_testdi_ccno_1;
15898 gen_zero_extend = gen_zero_extendqidi2;
15901 gcc_unreachable ();
15904 end_label = gen_label_rtx ();
15905 qimode_label = gen_label_rtx ();
15907 scratch = gen_reg_rtx (mode);
15909 /* Use 8bit unsigned divimod if dividend and divisor are within the
15910 the range [0-255]. */
15911 emit_move_insn (scratch, operands[2]);
15912 scratch = expand_simple_binop (mode, IOR, scratch, operands[3],
15913 scratch, 1, OPTAB_DIRECT);
15914 emit_insn (gen_test_ccno_1 (scratch, GEN_INT (-0x100)));
15915 tmp0 = gen_rtx_REG (CCNOmode, FLAGS_REG);
15916 tmp0 = gen_rtx_EQ (VOIDmode, tmp0, const0_rtx);
15917 tmp0 = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp0,
15918 gen_rtx_LABEL_REF (VOIDmode, qimode_label),
15920 insn = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp0));
15921 predict_jump (REG_BR_PROB_BASE * 50 / 100);
15922 JUMP_LABEL (insn) = qimode_label;
15924 /* Generate original signed/unsigned divimod. */
15925 div = gen_divmod4_1 (operands[0], operands[1],
15926 operands[2], operands[3]);
15929 /* Branch to the end. */
15930 emit_jump_insn (gen_jump (end_label));
15933 /* Generate 8bit unsigned divide. */
15934 emit_label (qimode_label);
15935 /* Don't use operands[0] for result of 8bit divide since not all
15936 registers support QImode ZERO_EXTRACT. */
15937 tmp0 = simplify_gen_subreg (HImode, scratch, mode, 0);
15938 tmp1 = simplify_gen_subreg (HImode, operands[2], mode, 0);
15939 tmp2 = simplify_gen_subreg (QImode, operands[3], mode, 0);
15940 emit_insn (gen_udivmodhiqi3 (tmp0, tmp1, tmp2));
15944 div = gen_rtx_DIV (SImode, operands[2], operands[3]);
15945 mod = gen_rtx_MOD (SImode, operands[2], operands[3]);
15949 div = gen_rtx_UDIV (SImode, operands[2], operands[3]);
15950 mod = gen_rtx_UMOD (SImode, operands[2], operands[3]);
15953 /* Extract remainder from AH. */
15954 tmp1 = gen_rtx_ZERO_EXTRACT (mode, tmp0, GEN_INT (8), GEN_INT (8));
15955 if (REG_P (operands[1]))
15956 insn = emit_move_insn (operands[1], tmp1);
15959 /* Need a new scratch register since the old one has result
15961 scratch = gen_reg_rtx (mode);
15962 emit_move_insn (scratch, tmp1);
15963 insn = emit_move_insn (operands[1], scratch);
15965 set_unique_reg_note (insn, REG_EQUAL, mod);
15967 /* Zero extend quotient from AL. */
15968 tmp1 = gen_lowpart (QImode, tmp0);
15969 insn = emit_insn (gen_zero_extend (operands[0], tmp1));
15970 set_unique_reg_note (insn, REG_EQUAL, div);
15972 emit_label (end_label);
15975 #define LEA_SEARCH_THRESHOLD 12
15977 /* Search backward for non-agu definition of register number REGNO1
15978 or register number REGNO2 in INSN's basic block until
15979 1. Pass LEA_SEARCH_THRESHOLD instructions, or
15980 2. Reach BB boundary, or
15981 3. Reach agu definition.
15982 Returns the distance between the non-agu definition point and INSN.
15983 If no definition point, returns -1. */
15986 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
15989 basic_block bb = BLOCK_FOR_INSN (insn);
15992 enum attr_type insn_type;
15994 if (insn != BB_HEAD (bb))
15996 rtx prev = PREV_INSN (insn);
15997 while (prev && distance < LEA_SEARCH_THRESHOLD)
15999 if (NONDEBUG_INSN_P (prev))
16002 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
16003 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16004 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16005 && (regno1 == DF_REF_REGNO (*def_rec)
16006 || regno2 == DF_REF_REGNO (*def_rec)))
16008 insn_type = get_attr_type (prev);
16009 if (insn_type != TYPE_LEA)
16013 if (prev == BB_HEAD (bb))
16015 prev = PREV_INSN (prev);
16019 if (distance < LEA_SEARCH_THRESHOLD)
16023 bool simple_loop = false;
16025 FOR_EACH_EDGE (e, ei, bb->preds)
16028 simple_loop = true;
16034 rtx prev = BB_END (bb);
16037 && distance < LEA_SEARCH_THRESHOLD)
16039 if (NONDEBUG_INSN_P (prev))
16042 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
16043 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16044 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16045 && (regno1 == DF_REF_REGNO (*def_rec)
16046 || regno2 == DF_REF_REGNO (*def_rec)))
16048 insn_type = get_attr_type (prev);
16049 if (insn_type != TYPE_LEA)
16053 prev = PREV_INSN (prev);
16061 /* get_attr_type may modify recog data. We want to make sure
16062 that recog data is valid for instruction INSN, on which
16063 distance_non_agu_define is called. INSN is unchanged here. */
16064 extract_insn_cached (insn);
16068 /* Return the distance between INSN and the next insn that uses
16069 register number REGNO0 in memory address. Return -1 if no such
16070 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
16073 distance_agu_use (unsigned int regno0, rtx insn)
16075 basic_block bb = BLOCK_FOR_INSN (insn);
16080 if (insn != BB_END (bb))
16082 rtx next = NEXT_INSN (insn);
16083 while (next && distance < LEA_SEARCH_THRESHOLD)
16085 if (NONDEBUG_INSN_P (next))
16089 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
16090 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
16091 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
16092 && regno0 == DF_REF_REGNO (*use_rec))
16094 /* Return DISTANCE if OP0 is used in memory
16095 address in NEXT. */
16099 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
16100 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16101 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16102 && regno0 == DF_REF_REGNO (*def_rec))
16104 /* Return -1 if OP0 is set in NEXT. */
16108 if (next == BB_END (bb))
16110 next = NEXT_INSN (next);
16114 if (distance < LEA_SEARCH_THRESHOLD)
16118 bool simple_loop = false;
16120 FOR_EACH_EDGE (e, ei, bb->succs)
16123 simple_loop = true;
16129 rtx next = BB_HEAD (bb);
16132 && distance < LEA_SEARCH_THRESHOLD)
16134 if (NONDEBUG_INSN_P (next))
16138 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
16139 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
16140 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
16141 && regno0 == DF_REF_REGNO (*use_rec))
16143 /* Return DISTANCE if OP0 is used in memory
16144 address in NEXT. */
16148 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
16149 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16150 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16151 && regno0 == DF_REF_REGNO (*def_rec))
16153 /* Return -1 if OP0 is set in NEXT. */
16158 next = NEXT_INSN (next);
16166 /* Define this macro to tune LEA priority vs ADD, it take effect when
16167 there is a dilemma of choicing LEA or ADD
16168 Negative value: ADD is more preferred than LEA
16170 Positive value: LEA is more preferred than ADD*/
16171 #define IX86_LEA_PRIORITY 2
16173 /* Return true if it is ok to optimize an ADD operation to LEA
16174 operation to avoid flag register consumation. For most processors,
16175 ADD is faster than LEA. For the processors like ATOM, if the
16176 destination register of LEA holds an actual address which will be
16177 used soon, LEA is better and otherwise ADD is better. */
16180 ix86_lea_for_add_ok (rtx insn, rtx operands[])
16182 unsigned int regno0 = true_regnum (operands[0]);
16183 unsigned int regno1 = true_regnum (operands[1]);
16184 unsigned int regno2 = true_regnum (operands[2]);
16186 /* If a = b + c, (a!=b && a!=c), must use lea form. */
16187 if (regno0 != regno1 && regno0 != regno2)
16190 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
16194 int dist_define, dist_use;
16196 /* Return false if REGNO0 isn't used in memory address. */
16197 dist_use = distance_agu_use (regno0, insn);
16201 dist_define = distance_non_agu_define (regno1, regno2, insn);
16202 if (dist_define <= 0)
16205 /* If this insn has both backward non-agu dependence and forward
16206 agu dependence, the one with short distance take effect. */
16207 if ((dist_define + IX86_LEA_PRIORITY) < dist_use)
16214 /* Return true if destination reg of SET_BODY is shift count of
16218 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
16224 /* Retrieve destination of SET_BODY. */
16225 switch (GET_CODE (set_body))
16228 set_dest = SET_DEST (set_body);
16229 if (!set_dest || !REG_P (set_dest))
16233 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
16234 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
16242 /* Retrieve shift count of USE_BODY. */
16243 switch (GET_CODE (use_body))
16246 shift_rtx = XEXP (use_body, 1);
16249 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
16250 if (ix86_dep_by_shift_count_body (set_body,
16251 XVECEXP (use_body, 0, i)))
16259 && (GET_CODE (shift_rtx) == ASHIFT
16260 || GET_CODE (shift_rtx) == LSHIFTRT
16261 || GET_CODE (shift_rtx) == ASHIFTRT
16262 || GET_CODE (shift_rtx) == ROTATE
16263 || GET_CODE (shift_rtx) == ROTATERT))
16265 rtx shift_count = XEXP (shift_rtx, 1);
16267 /* Return true if shift count is dest of SET_BODY. */
16268 if (REG_P (shift_count)
16269 && true_regnum (set_dest) == true_regnum (shift_count))
16276 /* Return true if destination reg of SET_INSN is shift count of
16280 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
16282 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
16283 PATTERN (use_insn));
16286 /* Return TRUE or FALSE depending on whether the unary operator meets the
16287 appropriate constraints. */
16290 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
16291 enum machine_mode mode ATTRIBUTE_UNUSED,
16292 rtx operands[2] ATTRIBUTE_UNUSED)
16294 /* If one of operands is memory, source and destination must match. */
16295 if ((MEM_P (operands[0])
16296 || MEM_P (operands[1]))
16297 && ! rtx_equal_p (operands[0], operands[1]))
16302 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
16303 are ok, keeping in mind the possible movddup alternative. */
16306 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
16308 if (MEM_P (operands[0]))
16309 return rtx_equal_p (operands[0], operands[1 + high]);
16310 if (MEM_P (operands[1]) && MEM_P (operands[2]))
16311 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
16315 /* Post-reload splitter for converting an SF or DFmode value in an
16316 SSE register into an unsigned SImode. */
16319 ix86_split_convert_uns_si_sse (rtx operands[])
16321 enum machine_mode vecmode;
16322 rtx value, large, zero_or_two31, input, two31, x;
16324 large = operands[1];
16325 zero_or_two31 = operands[2];
16326 input = operands[3];
16327 two31 = operands[4];
16328 vecmode = GET_MODE (large);
16329 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
16331 /* Load up the value into the low element. We must ensure that the other
16332 elements are valid floats -- zero is the easiest such value. */
16335 if (vecmode == V4SFmode)
16336 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
16338 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
16342 input = gen_rtx_REG (vecmode, REGNO (input));
16343 emit_move_insn (value, CONST0_RTX (vecmode));
16344 if (vecmode == V4SFmode)
16345 emit_insn (gen_sse_movss (value, value, input));
16347 emit_insn (gen_sse2_movsd (value, value, input));
16350 emit_move_insn (large, two31);
16351 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
16353 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
16354 emit_insn (gen_rtx_SET (VOIDmode, large, x));
16356 x = gen_rtx_AND (vecmode, zero_or_two31, large);
16357 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
16359 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
16360 emit_insn (gen_rtx_SET (VOIDmode, value, x));
16362 large = gen_rtx_REG (V4SImode, REGNO (large));
16363 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
16365 x = gen_rtx_REG (V4SImode, REGNO (value));
16366 if (vecmode == V4SFmode)
16367 emit_insn (gen_sse2_cvttps2dq (x, value));
16369 emit_insn (gen_sse2_cvttpd2dq (x, value));
16372 emit_insn (gen_xorv4si3 (value, value, large));
16375 /* Convert an unsigned DImode value into a DFmode, using only SSE.
16376 Expects the 64-bit DImode to be supplied in a pair of integral
16377 registers. Requires SSE2; will use SSE3 if available. For x86_32,
16378 -mfpmath=sse, !optimize_size only. */
16381 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
16383 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
16384 rtx int_xmm, fp_xmm;
16385 rtx biases, exponents;
16388 int_xmm = gen_reg_rtx (V4SImode);
16389 if (TARGET_INTER_UNIT_MOVES)
16390 emit_insn (gen_movdi_to_sse (int_xmm, input));
16391 else if (TARGET_SSE_SPLIT_REGS)
16393 emit_clobber (int_xmm);
16394 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
16398 x = gen_reg_rtx (V2DImode);
16399 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
16400 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
16403 x = gen_rtx_CONST_VECTOR (V4SImode,
16404 gen_rtvec (4, GEN_INT (0x43300000UL),
16405 GEN_INT (0x45300000UL),
16406 const0_rtx, const0_rtx));
16407 exponents = validize_mem (force_const_mem (V4SImode, x));
16409 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
16410 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
16412 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
16413 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
16414 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
16415 (0x1.0p84 + double(fp_value_hi_xmm)).
16416 Note these exponents differ by 32. */
16418 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
16420 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
16421 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
16422 real_ldexp (&bias_lo_rvt, &dconst1, 52);
16423 real_ldexp (&bias_hi_rvt, &dconst1, 84);
16424 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
16425 x = const_double_from_real_value (bias_hi_rvt, DFmode);
16426 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
16427 biases = validize_mem (force_const_mem (V2DFmode, biases));
16428 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
16430 /* Add the upper and lower DFmode values together. */
16432 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
16435 x = copy_to_mode_reg (V2DFmode, fp_xmm);
16436 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
16437 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
16440 ix86_expand_vector_extract (false, target, fp_xmm, 0);
16443 /* Not used, but eases macroization of patterns. */
16445 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
16446 rtx input ATTRIBUTE_UNUSED)
16448 gcc_unreachable ();
16451 /* Convert an unsigned SImode value into a DFmode. Only currently used
16452 for SSE, but applicable anywhere. */
16455 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
16457 REAL_VALUE_TYPE TWO31r;
16460 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
16461 NULL, 1, OPTAB_DIRECT);
16463 fp = gen_reg_rtx (DFmode);
16464 emit_insn (gen_floatsidf2 (fp, x));
16466 real_ldexp (&TWO31r, &dconst1, 31);
16467 x = const_double_from_real_value (TWO31r, DFmode);
16469 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
16471 emit_move_insn (target, x);
16474 /* Convert a signed DImode value into a DFmode. Only used for SSE in
16475 32-bit mode; otherwise we have a direct convert instruction. */
16478 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
16480 REAL_VALUE_TYPE TWO32r;
16481 rtx fp_lo, fp_hi, x;
16483 fp_lo = gen_reg_rtx (DFmode);
16484 fp_hi = gen_reg_rtx (DFmode);
16486 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
16488 real_ldexp (&TWO32r, &dconst1, 32);
16489 x = const_double_from_real_value (TWO32r, DFmode);
16490 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
16492 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
16494 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
16497 emit_move_insn (target, x);
16500 /* Convert an unsigned SImode value into a SFmode, using only SSE.
16501 For x86_32, -mfpmath=sse, !optimize_size only. */
16503 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
16505 REAL_VALUE_TYPE ONE16r;
16506 rtx fp_hi, fp_lo, int_hi, int_lo, x;
16508 real_ldexp (&ONE16r, &dconst1, 16);
16509 x = const_double_from_real_value (ONE16r, SFmode);
16510 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
16511 NULL, 0, OPTAB_DIRECT);
16512 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
16513 NULL, 0, OPTAB_DIRECT);
16514 fp_hi = gen_reg_rtx (SFmode);
16515 fp_lo = gen_reg_rtx (SFmode);
16516 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
16517 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
16518 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
16520 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
16522 if (!rtx_equal_p (target, fp_hi))
16523 emit_move_insn (target, fp_hi);
16526 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
16527 then replicate the value for all elements of the vector
16531 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
16538 v = gen_rtvec (4, value, value, value, value);
16539 return gen_rtx_CONST_VECTOR (V4SImode, v);
16543 v = gen_rtvec (2, value, value);
16544 return gen_rtx_CONST_VECTOR (V2DImode, v);
16548 v = gen_rtvec (8, value, value, value, value,
16549 value, value, value, value);
16551 v = gen_rtvec (8, value, CONST0_RTX (SFmode),
16552 CONST0_RTX (SFmode), CONST0_RTX (SFmode),
16553 CONST0_RTX (SFmode), CONST0_RTX (SFmode),
16554 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
16555 return gen_rtx_CONST_VECTOR (V8SFmode, v);
16559 v = gen_rtvec (4, value, value, value, value);
16561 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
16562 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
16563 return gen_rtx_CONST_VECTOR (V4SFmode, v);
16567 v = gen_rtvec (4, value, value, value, value);
16569 v = gen_rtvec (4, value, CONST0_RTX (DFmode),
16570 CONST0_RTX (DFmode), CONST0_RTX (DFmode));
16571 return gen_rtx_CONST_VECTOR (V4DFmode, v);
16575 v = gen_rtvec (2, value, value);
16577 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
16578 return gen_rtx_CONST_VECTOR (V2DFmode, v);
16581 gcc_unreachable ();
16585 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
16586 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
16587 for an SSE register. If VECT is true, then replicate the mask for
16588 all elements of the vector register. If INVERT is true, then create
16589 a mask excluding the sign bit. */
16592 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
16594 enum machine_mode vec_mode, imode;
16595 HOST_WIDE_INT hi, lo;
16600 /* Find the sign bit, sign extended to 2*HWI. */
16607 mode = GET_MODE_INNER (mode);
16609 lo = 0x80000000, hi = lo < 0;
16616 mode = GET_MODE_INNER (mode);
16618 if (HOST_BITS_PER_WIDE_INT >= 64)
16619 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
16621 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
16626 vec_mode = VOIDmode;
16627 if (HOST_BITS_PER_WIDE_INT >= 64)
16630 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
16637 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
16641 lo = ~lo, hi = ~hi;
16647 mask = immed_double_const (lo, hi, imode);
16649 vec = gen_rtvec (2, v, mask);
16650 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
16651 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
16658 gcc_unreachable ();
16662 lo = ~lo, hi = ~hi;
16664 /* Force this value into the low part of a fp vector constant. */
16665 mask = immed_double_const (lo, hi, imode);
16666 mask = gen_lowpart (mode, mask);
16668 if (vec_mode == VOIDmode)
16669 return force_reg (mode, mask);
16671 v = ix86_build_const_vector (vec_mode, vect, mask);
16672 return force_reg (vec_mode, v);
16675 /* Generate code for floating point ABS or NEG. */
16678 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
16681 rtx mask, set, dst, src;
16682 bool use_sse = false;
16683 bool vector_mode = VECTOR_MODE_P (mode);
16684 enum machine_mode vmode = mode;
16688 else if (mode == TFmode)
16690 else if (TARGET_SSE_MATH)
16692 use_sse = SSE_FLOAT_MODE_P (mode);
16693 if (mode == SFmode)
16695 else if (mode == DFmode)
16699 /* NEG and ABS performed with SSE use bitwise mask operations.
16700 Create the appropriate mask now. */
16702 mask = ix86_build_signbit_mask (vmode, vector_mode, code == ABS);
16709 set = gen_rtx_fmt_e (code, mode, src);
16710 set = gen_rtx_SET (VOIDmode, dst, set);
16717 use = gen_rtx_USE (VOIDmode, mask);
16719 par = gen_rtvec (2, set, use);
16722 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
16723 par = gen_rtvec (3, set, use, clob);
16725 emit_insn (gen_rtx_PARALLEL (VOIDmode, par));
16731 /* Expand a copysign operation. Special case operand 0 being a constant. */
16734 ix86_expand_copysign (rtx operands[])
16736 enum machine_mode mode, vmode;
16737 rtx dest, op0, op1, mask, nmask;
16739 dest = operands[0];
16743 mode = GET_MODE (dest);
16745 if (mode == SFmode)
16747 else if (mode == DFmode)
16752 if (GET_CODE (op0) == CONST_DOUBLE)
16754 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
16756 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
16757 op0 = simplify_unary_operation (ABS, mode, op0, mode);
16759 if (mode == SFmode || mode == DFmode)
16761 if (op0 == CONST0_RTX (mode))
16762 op0 = CONST0_RTX (vmode);
16765 rtx v = ix86_build_const_vector (vmode, false, op0);
16767 op0 = force_reg (vmode, v);
16770 else if (op0 != CONST0_RTX (mode))
16771 op0 = force_reg (mode, op0);
16773 mask = ix86_build_signbit_mask (vmode, 0, 0);
16775 if (mode == SFmode)
16776 copysign_insn = gen_copysignsf3_const;
16777 else if (mode == DFmode)
16778 copysign_insn = gen_copysigndf3_const;
16780 copysign_insn = gen_copysigntf3_const;
16782 emit_insn (copysign_insn (dest, op0, op1, mask));
16786 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
16788 nmask = ix86_build_signbit_mask (vmode, 0, 1);
16789 mask = ix86_build_signbit_mask (vmode, 0, 0);
16791 if (mode == SFmode)
16792 copysign_insn = gen_copysignsf3_var;
16793 else if (mode == DFmode)
16794 copysign_insn = gen_copysigndf3_var;
16796 copysign_insn = gen_copysigntf3_var;
16798 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
16802 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
16803 be a constant, and so has already been expanded into a vector constant. */
16806 ix86_split_copysign_const (rtx operands[])
16808 enum machine_mode mode, vmode;
16809 rtx dest, op0, mask, x;
16811 dest = operands[0];
16813 mask = operands[3];
16815 mode = GET_MODE (dest);
16816 vmode = GET_MODE (mask);
16818 dest = simplify_gen_subreg (vmode, dest, mode, 0);
16819 x = gen_rtx_AND (vmode, dest, mask);
16820 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16822 if (op0 != CONST0_RTX (vmode))
16824 x = gen_rtx_IOR (vmode, dest, op0);
16825 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16829 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
16830 so we have to do two masks. */
16833 ix86_split_copysign_var (rtx operands[])
16835 enum machine_mode mode, vmode;
16836 rtx dest, scratch, op0, op1, mask, nmask, x;
16838 dest = operands[0];
16839 scratch = operands[1];
16842 nmask = operands[4];
16843 mask = operands[5];
16845 mode = GET_MODE (dest);
16846 vmode = GET_MODE (mask);
16848 if (rtx_equal_p (op0, op1))
16850 /* Shouldn't happen often (it's useless, obviously), but when it does
16851 we'd generate incorrect code if we continue below. */
16852 emit_move_insn (dest, op0);
16856 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
16858 gcc_assert (REGNO (op1) == REGNO (scratch));
16860 x = gen_rtx_AND (vmode, scratch, mask);
16861 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
16864 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
16865 x = gen_rtx_NOT (vmode, dest);
16866 x = gen_rtx_AND (vmode, x, op0);
16867 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16871 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
16873 x = gen_rtx_AND (vmode, scratch, mask);
16875 else /* alternative 2,4 */
16877 gcc_assert (REGNO (mask) == REGNO (scratch));
16878 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
16879 x = gen_rtx_AND (vmode, scratch, op1);
16881 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
16883 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
16885 dest = simplify_gen_subreg (vmode, op0, mode, 0);
16886 x = gen_rtx_AND (vmode, dest, nmask);
16888 else /* alternative 3,4 */
16890 gcc_assert (REGNO (nmask) == REGNO (dest));
16892 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
16893 x = gen_rtx_AND (vmode, dest, op0);
16895 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16898 x = gen_rtx_IOR (vmode, dest, scratch);
16899 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16902 /* Return TRUE or FALSE depending on whether the first SET in INSN
16903 has source and destination with matching CC modes, and that the
16904 CC mode is at least as constrained as REQ_MODE. */
16907 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
16910 enum machine_mode set_mode;
16912 set = PATTERN (insn);
16913 if (GET_CODE (set) == PARALLEL)
16914 set = XVECEXP (set, 0, 0);
16915 gcc_assert (GET_CODE (set) == SET);
16916 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
16918 set_mode = GET_MODE (SET_DEST (set));
16922 if (req_mode != CCNOmode
16923 && (req_mode != CCmode
16924 || XEXP (SET_SRC (set), 1) != const0_rtx))
16928 if (req_mode == CCGCmode)
16932 if (req_mode == CCGOCmode || req_mode == CCNOmode)
16936 if (req_mode == CCZmode)
16947 gcc_unreachable ();
16950 return GET_MODE (SET_SRC (set)) == set_mode;
16953 /* Generate insn patterns to do an integer compare of OPERANDS. */
16956 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
16958 enum machine_mode cmpmode;
16961 cmpmode = SELECT_CC_MODE (code, op0, op1);
16962 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
16964 /* This is very simple, but making the interface the same as in the
16965 FP case makes the rest of the code easier. */
16966 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
16967 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
16969 /* Return the test that should be put into the flags user, i.e.
16970 the bcc, scc, or cmov instruction. */
16971 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
16974 /* Figure out whether to use ordered or unordered fp comparisons.
16975 Return the appropriate mode to use. */
16978 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
16980 /* ??? In order to make all comparisons reversible, we do all comparisons
16981 non-trapping when compiling for IEEE. Once gcc is able to distinguish
16982 all forms trapping and nontrapping comparisons, we can make inequality
16983 comparisons trapping again, since it results in better code when using
16984 FCOM based compares. */
16985 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
16989 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
16991 enum machine_mode mode = GET_MODE (op0);
16993 if (SCALAR_FLOAT_MODE_P (mode))
16995 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
16996 return ix86_fp_compare_mode (code);
17001 /* Only zero flag is needed. */
17002 case EQ: /* ZF=0 */
17003 case NE: /* ZF!=0 */
17005 /* Codes needing carry flag. */
17006 case GEU: /* CF=0 */
17007 case LTU: /* CF=1 */
17008 /* Detect overflow checks. They need just the carry flag. */
17009 if (GET_CODE (op0) == PLUS
17010 && rtx_equal_p (op1, XEXP (op0, 0)))
17014 case GTU: /* CF=0 & ZF=0 */
17015 case LEU: /* CF=1 | ZF=1 */
17016 /* Detect overflow checks. They need just the carry flag. */
17017 if (GET_CODE (op0) == MINUS
17018 && rtx_equal_p (op1, XEXP (op0, 0)))
17022 /* Codes possibly doable only with sign flag when
17023 comparing against zero. */
17024 case GE: /* SF=OF or SF=0 */
17025 case LT: /* SF<>OF or SF=1 */
17026 if (op1 == const0_rtx)
17029 /* For other cases Carry flag is not required. */
17031 /* Codes doable only with sign flag when comparing
17032 against zero, but we miss jump instruction for it
17033 so we need to use relational tests against overflow
17034 that thus needs to be zero. */
17035 case GT: /* ZF=0 & SF=OF */
17036 case LE: /* ZF=1 | SF<>OF */
17037 if (op1 == const0_rtx)
17041 /* strcmp pattern do (use flags) and combine may ask us for proper
17046 gcc_unreachable ();
17050 /* Return the fixed registers used for condition codes. */
17053 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
17060 /* If two condition code modes are compatible, return a condition code
17061 mode which is compatible with both. Otherwise, return
17064 static enum machine_mode
17065 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
17070 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
17073 if ((m1 == CCGCmode && m2 == CCGOCmode)
17074 || (m1 == CCGOCmode && m2 == CCGCmode))
17080 gcc_unreachable ();
17110 /* These are only compatible with themselves, which we already
17117 /* Return a comparison we can do and that it is equivalent to
17118 swap_condition (code) apart possibly from orderedness.
17119 But, never change orderedness if TARGET_IEEE_FP, returning
17120 UNKNOWN in that case if necessary. */
17122 static enum rtx_code
17123 ix86_fp_swap_condition (enum rtx_code code)
17127 case GT: /* GTU - CF=0 & ZF=0 */
17128 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
17129 case GE: /* GEU - CF=0 */
17130 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
17131 case UNLT: /* LTU - CF=1 */
17132 return TARGET_IEEE_FP ? UNKNOWN : GT;
17133 case UNLE: /* LEU - CF=1 | ZF=1 */
17134 return TARGET_IEEE_FP ? UNKNOWN : GE;
17136 return swap_condition (code);
17140 /* Return cost of comparison CODE using the best strategy for performance.
17141 All following functions do use number of instructions as a cost metrics.
17142 In future this should be tweaked to compute bytes for optimize_size and
17143 take into account performance of various instructions on various CPUs. */
17146 ix86_fp_comparison_cost (enum rtx_code code)
17150 /* The cost of code using bit-twiddling on %ah. */
17167 arith_cost = TARGET_IEEE_FP ? 5 : 4;
17171 arith_cost = TARGET_IEEE_FP ? 6 : 4;
17174 gcc_unreachable ();
17177 switch (ix86_fp_comparison_strategy (code))
17179 case IX86_FPCMP_COMI:
17180 return arith_cost > 4 ? 3 : 2;
17181 case IX86_FPCMP_SAHF:
17182 return arith_cost > 4 ? 4 : 3;
17188 /* Return strategy to use for floating-point. We assume that fcomi is always
17189 preferrable where available, since that is also true when looking at size
17190 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
17192 enum ix86_fpcmp_strategy
17193 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
17195 /* Do fcomi/sahf based test when profitable. */
17198 return IX86_FPCMP_COMI;
17200 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
17201 return IX86_FPCMP_SAHF;
17203 return IX86_FPCMP_ARITH;
17206 /* Swap, force into registers, or otherwise massage the two operands
17207 to a fp comparison. The operands are updated in place; the new
17208 comparison code is returned. */
17210 static enum rtx_code
17211 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
17213 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
17214 rtx op0 = *pop0, op1 = *pop1;
17215 enum machine_mode op_mode = GET_MODE (op0);
17216 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
17218 /* All of the unordered compare instructions only work on registers.
17219 The same is true of the fcomi compare instructions. The XFmode
17220 compare instructions require registers except when comparing
17221 against zero or when converting operand 1 from fixed point to
17225 && (fpcmp_mode == CCFPUmode
17226 || (op_mode == XFmode
17227 && ! (standard_80387_constant_p (op0) == 1
17228 || standard_80387_constant_p (op1) == 1)
17229 && GET_CODE (op1) != FLOAT)
17230 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
17232 op0 = force_reg (op_mode, op0);
17233 op1 = force_reg (op_mode, op1);
17237 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
17238 things around if they appear profitable, otherwise force op0
17239 into a register. */
17241 if (standard_80387_constant_p (op0) == 0
17243 && ! (standard_80387_constant_p (op1) == 0
17246 enum rtx_code new_code = ix86_fp_swap_condition (code);
17247 if (new_code != UNKNOWN)
17250 tmp = op0, op0 = op1, op1 = tmp;
17256 op0 = force_reg (op_mode, op0);
17258 if (CONSTANT_P (op1))
17260 int tmp = standard_80387_constant_p (op1);
17262 op1 = validize_mem (force_const_mem (op_mode, op1));
17266 op1 = force_reg (op_mode, op1);
17269 op1 = force_reg (op_mode, op1);
17273 /* Try to rearrange the comparison to make it cheaper. */
17274 if (ix86_fp_comparison_cost (code)
17275 > ix86_fp_comparison_cost (swap_condition (code))
17276 && (REG_P (op1) || can_create_pseudo_p ()))
17279 tmp = op0, op0 = op1, op1 = tmp;
17280 code = swap_condition (code);
17282 op0 = force_reg (op_mode, op0);
17290 /* Convert comparison codes we use to represent FP comparison to integer
17291 code that will result in proper branch. Return UNKNOWN if no such code
17295 ix86_fp_compare_code_to_integer (enum rtx_code code)
17324 /* Generate insn patterns to do a floating point compare of OPERANDS. */
17327 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
17329 enum machine_mode fpcmp_mode, intcmp_mode;
17332 fpcmp_mode = ix86_fp_compare_mode (code);
17333 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
17335 /* Do fcomi/sahf based test when profitable. */
17336 switch (ix86_fp_comparison_strategy (code))
17338 case IX86_FPCMP_COMI:
17339 intcmp_mode = fpcmp_mode;
17340 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
17341 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
17346 case IX86_FPCMP_SAHF:
17347 intcmp_mode = fpcmp_mode;
17348 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
17349 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
17353 scratch = gen_reg_rtx (HImode);
17354 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
17355 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
17358 case IX86_FPCMP_ARITH:
17359 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
17360 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
17361 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
17363 scratch = gen_reg_rtx (HImode);
17364 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
17366 /* In the unordered case, we have to check C2 for NaN's, which
17367 doesn't happen to work out to anything nice combination-wise.
17368 So do some bit twiddling on the value we've got in AH to come
17369 up with an appropriate set of condition codes. */
17371 intcmp_mode = CCNOmode;
17376 if (code == GT || !TARGET_IEEE_FP)
17378 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
17383 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17384 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
17385 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
17386 intcmp_mode = CCmode;
17392 if (code == LT && TARGET_IEEE_FP)
17394 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17395 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
17396 intcmp_mode = CCmode;
17401 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
17407 if (code == GE || !TARGET_IEEE_FP)
17409 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
17414 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17415 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
17421 if (code == LE && TARGET_IEEE_FP)
17423 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17424 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
17425 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
17426 intcmp_mode = CCmode;
17431 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
17437 if (code == EQ && TARGET_IEEE_FP)
17439 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17440 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
17441 intcmp_mode = CCmode;
17446 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
17452 if (code == NE && TARGET_IEEE_FP)
17454 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17455 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
17461 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
17467 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
17471 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
17476 gcc_unreachable ();
17484 /* Return the test that should be put into the flags user, i.e.
17485 the bcc, scc, or cmov instruction. */
17486 return gen_rtx_fmt_ee (code, VOIDmode,
17487 gen_rtx_REG (intcmp_mode, FLAGS_REG),
17492 ix86_expand_compare (enum rtx_code code, rtx op0, rtx op1)
17496 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
17497 ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
17499 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
17501 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
17502 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
17505 ret = ix86_expand_int_compare (code, op0, op1);
17511 ix86_expand_branch (enum rtx_code code, rtx op0, rtx op1, rtx label)
17513 enum machine_mode mode = GET_MODE (op0);
17525 tmp = ix86_expand_compare (code, op0, op1);
17526 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
17527 gen_rtx_LABEL_REF (VOIDmode, label),
17529 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
17536 /* Expand DImode branch into multiple compare+branch. */
17538 rtx lo[2], hi[2], label2;
17539 enum rtx_code code1, code2, code3;
17540 enum machine_mode submode;
17542 if (CONSTANT_P (op0) && !CONSTANT_P (op1))
17544 tmp = op0, op0 = op1, op1 = tmp;
17545 code = swap_condition (code);
17548 split_double_mode (mode, &op0, 1, lo+0, hi+0);
17549 split_double_mode (mode, &op1, 1, lo+1, hi+1);
17551 submode = mode == DImode ? SImode : DImode;
17553 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
17554 avoid two branches. This costs one extra insn, so disable when
17555 optimizing for size. */
17557 if ((code == EQ || code == NE)
17558 && (!optimize_insn_for_size_p ()
17559 || hi[1] == const0_rtx || lo[1] == const0_rtx))
17564 if (hi[1] != const0_rtx)
17565 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
17566 NULL_RTX, 0, OPTAB_WIDEN);
17569 if (lo[1] != const0_rtx)
17570 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
17571 NULL_RTX, 0, OPTAB_WIDEN);
17573 tmp = expand_binop (submode, ior_optab, xor1, xor0,
17574 NULL_RTX, 0, OPTAB_WIDEN);
17576 ix86_expand_branch (code, tmp, const0_rtx, label);
17580 /* Otherwise, if we are doing less-than or greater-or-equal-than,
17581 op1 is a constant and the low word is zero, then we can just
17582 examine the high word. Similarly for low word -1 and
17583 less-or-equal-than or greater-than. */
17585 if (CONST_INT_P (hi[1]))
17588 case LT: case LTU: case GE: case GEU:
17589 if (lo[1] == const0_rtx)
17591 ix86_expand_branch (code, hi[0], hi[1], label);
17595 case LE: case LEU: case GT: case GTU:
17596 if (lo[1] == constm1_rtx)
17598 ix86_expand_branch (code, hi[0], hi[1], label);
17606 /* Otherwise, we need two or three jumps. */
17608 label2 = gen_label_rtx ();
17611 code2 = swap_condition (code);
17612 code3 = unsigned_condition (code);
17616 case LT: case GT: case LTU: case GTU:
17619 case LE: code1 = LT; code2 = GT; break;
17620 case GE: code1 = GT; code2 = LT; break;
17621 case LEU: code1 = LTU; code2 = GTU; break;
17622 case GEU: code1 = GTU; code2 = LTU; break;
17624 case EQ: code1 = UNKNOWN; code2 = NE; break;
17625 case NE: code2 = UNKNOWN; break;
17628 gcc_unreachable ();
17633 * if (hi(a) < hi(b)) goto true;
17634 * if (hi(a) > hi(b)) goto false;
17635 * if (lo(a) < lo(b)) goto true;
17639 if (code1 != UNKNOWN)
17640 ix86_expand_branch (code1, hi[0], hi[1], label);
17641 if (code2 != UNKNOWN)
17642 ix86_expand_branch (code2, hi[0], hi[1], label2);
17644 ix86_expand_branch (code3, lo[0], lo[1], label);
17646 if (code2 != UNKNOWN)
17647 emit_label (label2);
17652 gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC);
17657 /* Split branch based on floating point condition. */
17659 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
17660 rtx target1, rtx target2, rtx tmp, rtx pushed)
17665 if (target2 != pc_rtx)
17668 code = reverse_condition_maybe_unordered (code);
17673 condition = ix86_expand_fp_compare (code, op1, op2,
17676 /* Remove pushed operand from stack. */
17678 ix86_free_from_memory (GET_MODE (pushed));
17680 i = emit_jump_insn (gen_rtx_SET
17682 gen_rtx_IF_THEN_ELSE (VOIDmode,
17683 condition, target1, target2)));
17684 if (split_branch_probability >= 0)
17685 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
17689 ix86_expand_setcc (rtx dest, enum rtx_code code, rtx op0, rtx op1)
17693 gcc_assert (GET_MODE (dest) == QImode);
17695 ret = ix86_expand_compare (code, op0, op1);
17696 PUT_MODE (ret, QImode);
17697 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
17700 /* Expand comparison setting or clearing carry flag. Return true when
17701 successful and set pop for the operation. */
17703 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
17705 enum machine_mode mode =
17706 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
17708 /* Do not handle double-mode compares that go through special path. */
17709 if (mode == (TARGET_64BIT ? TImode : DImode))
17712 if (SCALAR_FLOAT_MODE_P (mode))
17714 rtx compare_op, compare_seq;
17716 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
17718 /* Shortcut: following common codes never translate
17719 into carry flag compares. */
17720 if (code == EQ || code == NE || code == UNEQ || code == LTGT
17721 || code == ORDERED || code == UNORDERED)
17724 /* These comparisons require zero flag; swap operands so they won't. */
17725 if ((code == GT || code == UNLE || code == LE || code == UNGT)
17726 && !TARGET_IEEE_FP)
17731 code = swap_condition (code);
17734 /* Try to expand the comparison and verify that we end up with
17735 carry flag based comparison. This fails to be true only when
17736 we decide to expand comparison using arithmetic that is not
17737 too common scenario. */
17739 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
17740 compare_seq = get_insns ();
17743 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
17744 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
17745 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
17747 code = GET_CODE (compare_op);
17749 if (code != LTU && code != GEU)
17752 emit_insn (compare_seq);
17757 if (!INTEGRAL_MODE_P (mode))
17766 /* Convert a==0 into (unsigned)a<1. */
17769 if (op1 != const0_rtx)
17772 code = (code == EQ ? LTU : GEU);
17775 /* Convert a>b into b<a or a>=b-1. */
17778 if (CONST_INT_P (op1))
17780 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
17781 /* Bail out on overflow. We still can swap operands but that
17782 would force loading of the constant into register. */
17783 if (op1 == const0_rtx
17784 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
17786 code = (code == GTU ? GEU : LTU);
17793 code = (code == GTU ? LTU : GEU);
17797 /* Convert a>=0 into (unsigned)a<0x80000000. */
17800 if (mode == DImode || op1 != const0_rtx)
17802 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
17803 code = (code == LT ? GEU : LTU);
17807 if (mode == DImode || op1 != constm1_rtx)
17809 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
17810 code = (code == LE ? GEU : LTU);
17816 /* Swapping operands may cause constant to appear as first operand. */
17817 if (!nonimmediate_operand (op0, VOIDmode))
17819 if (!can_create_pseudo_p ())
17821 op0 = force_reg (mode, op0);
17823 *pop = ix86_expand_compare (code, op0, op1);
17824 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
17829 ix86_expand_int_movcc (rtx operands[])
17831 enum rtx_code code = GET_CODE (operands[1]), compare_code;
17832 rtx compare_seq, compare_op;
17833 enum machine_mode mode = GET_MODE (operands[0]);
17834 bool sign_bit_compare_p = false;
17835 rtx op0 = XEXP (operands[1], 0);
17836 rtx op1 = XEXP (operands[1], 1);
17839 compare_op = ix86_expand_compare (code, op0, op1);
17840 compare_seq = get_insns ();
17843 compare_code = GET_CODE (compare_op);
17845 if ((op1 == const0_rtx && (code == GE || code == LT))
17846 || (op1 == constm1_rtx && (code == GT || code == LE)))
17847 sign_bit_compare_p = true;
17849 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
17850 HImode insns, we'd be swallowed in word prefix ops. */
17852 if ((mode != HImode || TARGET_FAST_PREFIX)
17853 && (mode != (TARGET_64BIT ? TImode : DImode))
17854 && CONST_INT_P (operands[2])
17855 && CONST_INT_P (operands[3]))
17857 rtx out = operands[0];
17858 HOST_WIDE_INT ct = INTVAL (operands[2]);
17859 HOST_WIDE_INT cf = INTVAL (operands[3]);
17860 HOST_WIDE_INT diff;
17863 /* Sign bit compares are better done using shifts than we do by using
17865 if (sign_bit_compare_p
17866 || ix86_expand_carry_flag_compare (code, op0, op1, &compare_op))
17868 /* Detect overlap between destination and compare sources. */
17871 if (!sign_bit_compare_p)
17874 bool fpcmp = false;
17876 compare_code = GET_CODE (compare_op);
17878 flags = XEXP (compare_op, 0);
17880 if (GET_MODE (flags) == CCFPmode
17881 || GET_MODE (flags) == CCFPUmode)
17885 = ix86_fp_compare_code_to_integer (compare_code);
17888 /* To simplify rest of code, restrict to the GEU case. */
17889 if (compare_code == LTU)
17891 HOST_WIDE_INT tmp = ct;
17894 compare_code = reverse_condition (compare_code);
17895 code = reverse_condition (code);
17900 PUT_CODE (compare_op,
17901 reverse_condition_maybe_unordered
17902 (GET_CODE (compare_op)));
17904 PUT_CODE (compare_op,
17905 reverse_condition (GET_CODE (compare_op)));
17909 if (reg_overlap_mentioned_p (out, op0)
17910 || reg_overlap_mentioned_p (out, op1))
17911 tmp = gen_reg_rtx (mode);
17913 if (mode == DImode)
17914 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
17916 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
17917 flags, compare_op));
17921 if (code == GT || code == GE)
17922 code = reverse_condition (code);
17925 HOST_WIDE_INT tmp = ct;
17930 tmp = emit_store_flag (tmp, code, op0, op1, VOIDmode, 0, -1);
17943 tmp = expand_simple_binop (mode, PLUS,
17945 copy_rtx (tmp), 1, OPTAB_DIRECT);
17956 tmp = expand_simple_binop (mode, IOR,
17958 copy_rtx (tmp), 1, OPTAB_DIRECT);
17960 else if (diff == -1 && ct)
17970 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
17972 tmp = expand_simple_binop (mode, PLUS,
17973 copy_rtx (tmp), GEN_INT (cf),
17974 copy_rtx (tmp), 1, OPTAB_DIRECT);
17982 * andl cf - ct, dest
17992 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
17995 tmp = expand_simple_binop (mode, AND,
17997 gen_int_mode (cf - ct, mode),
17998 copy_rtx (tmp), 1, OPTAB_DIRECT);
18000 tmp = expand_simple_binop (mode, PLUS,
18001 copy_rtx (tmp), GEN_INT (ct),
18002 copy_rtx (tmp), 1, OPTAB_DIRECT);
18005 if (!rtx_equal_p (tmp, out))
18006 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
18013 enum machine_mode cmp_mode = GET_MODE (op0);
18016 tmp = ct, ct = cf, cf = tmp;
18019 if (SCALAR_FLOAT_MODE_P (cmp_mode))
18021 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
18023 /* We may be reversing unordered compare to normal compare, that
18024 is not valid in general (we may convert non-trapping condition
18025 to trapping one), however on i386 we currently emit all
18026 comparisons unordered. */
18027 compare_code = reverse_condition_maybe_unordered (compare_code);
18028 code = reverse_condition_maybe_unordered (code);
18032 compare_code = reverse_condition (compare_code);
18033 code = reverse_condition (code);
18037 compare_code = UNKNOWN;
18038 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
18039 && CONST_INT_P (op1))
18041 if (op1 == const0_rtx
18042 && (code == LT || code == GE))
18043 compare_code = code;
18044 else if (op1 == constm1_rtx)
18048 else if (code == GT)
18053 /* Optimize dest = (op0 < 0) ? -1 : cf. */
18054 if (compare_code != UNKNOWN
18055 && GET_MODE (op0) == GET_MODE (out)
18056 && (cf == -1 || ct == -1))
18058 /* If lea code below could be used, only optimize
18059 if it results in a 2 insn sequence. */
18061 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
18062 || diff == 3 || diff == 5 || diff == 9)
18063 || (compare_code == LT && ct == -1)
18064 || (compare_code == GE && cf == -1))
18067 * notl op1 (if necessary)
18075 code = reverse_condition (code);
18078 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, -1);
18080 out = expand_simple_binop (mode, IOR,
18082 out, 1, OPTAB_DIRECT);
18083 if (out != operands[0])
18084 emit_move_insn (operands[0], out);
18091 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
18092 || diff == 3 || diff == 5 || diff == 9)
18093 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
18095 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
18101 * lea cf(dest*(ct-cf)),dest
18105 * This also catches the degenerate setcc-only case.
18111 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, 1);
18114 /* On x86_64 the lea instruction operates on Pmode, so we need
18115 to get arithmetics done in proper mode to match. */
18117 tmp = copy_rtx (out);
18121 out1 = copy_rtx (out);
18122 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
18126 tmp = gen_rtx_PLUS (mode, tmp, out1);
18132 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
18135 if (!rtx_equal_p (tmp, out))
18138 out = force_operand (tmp, copy_rtx (out));
18140 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
18142 if (!rtx_equal_p (out, operands[0]))
18143 emit_move_insn (operands[0], copy_rtx (out));
18149 * General case: Jumpful:
18150 * xorl dest,dest cmpl op1, op2
18151 * cmpl op1, op2 movl ct, dest
18152 * setcc dest jcc 1f
18153 * decl dest movl cf, dest
18154 * andl (cf-ct),dest 1:
18157 * Size 20. Size 14.
18159 * This is reasonably steep, but branch mispredict costs are
18160 * high on modern cpus, so consider failing only if optimizing
18164 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
18165 && BRANCH_COST (optimize_insn_for_speed_p (),
18170 enum machine_mode cmp_mode = GET_MODE (op0);
18175 if (SCALAR_FLOAT_MODE_P (cmp_mode))
18177 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
18179 /* We may be reversing unordered compare to normal compare,
18180 that is not valid in general (we may convert non-trapping
18181 condition to trapping one), however on i386 we currently
18182 emit all comparisons unordered. */
18183 code = reverse_condition_maybe_unordered (code);
18187 code = reverse_condition (code);
18188 if (compare_code != UNKNOWN)
18189 compare_code = reverse_condition (compare_code);
18193 if (compare_code != UNKNOWN)
18195 /* notl op1 (if needed)
18200 For x < 0 (resp. x <= -1) there will be no notl,
18201 so if possible swap the constants to get rid of the
18203 True/false will be -1/0 while code below (store flag
18204 followed by decrement) is 0/-1, so the constants need
18205 to be exchanged once more. */
18207 if (compare_code == GE || !cf)
18209 code = reverse_condition (code);
18214 HOST_WIDE_INT tmp = cf;
18219 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, -1);
18223 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, 1);
18225 out = expand_simple_binop (mode, PLUS, copy_rtx (out),
18227 copy_rtx (out), 1, OPTAB_DIRECT);
18230 out = expand_simple_binop (mode, AND, copy_rtx (out),
18231 gen_int_mode (cf - ct, mode),
18232 copy_rtx (out), 1, OPTAB_DIRECT);
18234 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
18235 copy_rtx (out), 1, OPTAB_DIRECT);
18236 if (!rtx_equal_p (out, operands[0]))
18237 emit_move_insn (operands[0], copy_rtx (out));
18243 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
18245 /* Try a few things more with specific constants and a variable. */
18248 rtx var, orig_out, out, tmp;
18250 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
18253 /* If one of the two operands is an interesting constant, load a
18254 constant with the above and mask it in with a logical operation. */
18256 if (CONST_INT_P (operands[2]))
18259 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
18260 operands[3] = constm1_rtx, op = and_optab;
18261 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
18262 operands[3] = const0_rtx, op = ior_optab;
18266 else if (CONST_INT_P (operands[3]))
18269 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
18270 operands[2] = constm1_rtx, op = and_optab;
18271 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
18272 operands[2] = const0_rtx, op = ior_optab;
18279 orig_out = operands[0];
18280 tmp = gen_reg_rtx (mode);
18283 /* Recurse to get the constant loaded. */
18284 if (ix86_expand_int_movcc (operands) == 0)
18287 /* Mask in the interesting variable. */
18288 out = expand_binop (mode, op, var, tmp, orig_out, 0,
18290 if (!rtx_equal_p (out, orig_out))
18291 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
18297 * For comparison with above,
18307 if (! nonimmediate_operand (operands[2], mode))
18308 operands[2] = force_reg (mode, operands[2]);
18309 if (! nonimmediate_operand (operands[3], mode))
18310 operands[3] = force_reg (mode, operands[3]);
18312 if (! register_operand (operands[2], VOIDmode)
18314 || ! register_operand (operands[3], VOIDmode)))
18315 operands[2] = force_reg (mode, operands[2]);
18318 && ! register_operand (operands[3], VOIDmode))
18319 operands[3] = force_reg (mode, operands[3]);
18321 emit_insn (compare_seq);
18322 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
18323 gen_rtx_IF_THEN_ELSE (mode,
18324 compare_op, operands[2],
18329 /* Swap, force into registers, or otherwise massage the two operands
18330 to an sse comparison with a mask result. Thus we differ a bit from
18331 ix86_prepare_fp_compare_args which expects to produce a flags result.
18333 The DEST operand exists to help determine whether to commute commutative
18334 operators. The POP0/POP1 operands are updated in place. The new
18335 comparison code is returned, or UNKNOWN if not implementable. */
18337 static enum rtx_code
18338 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
18339 rtx *pop0, rtx *pop1)
18347 /* We have no LTGT as an operator. We could implement it with
18348 NE & ORDERED, but this requires an extra temporary. It's
18349 not clear that it's worth it. */
18356 /* These are supported directly. */
18363 /* For commutative operators, try to canonicalize the destination
18364 operand to be first in the comparison - this helps reload to
18365 avoid extra moves. */
18366 if (!dest || !rtx_equal_p (dest, *pop1))
18374 /* These are not supported directly. Swap the comparison operands
18375 to transform into something that is supported. */
18379 code = swap_condition (code);
18383 gcc_unreachable ();
18389 /* Detect conditional moves that exactly match min/max operational
18390 semantics. Note that this is IEEE safe, as long as we don't
18391 interchange the operands.
18393 Returns FALSE if this conditional move doesn't match a MIN/MAX,
18394 and TRUE if the operation is successful and instructions are emitted. */
18397 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
18398 rtx cmp_op1, rtx if_true, rtx if_false)
18400 enum machine_mode mode;
18406 else if (code == UNGE)
18409 if_true = if_false;
18415 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
18417 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
18422 mode = GET_MODE (dest);
18424 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
18425 but MODE may be a vector mode and thus not appropriate. */
18426 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
18428 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
18431 if_true = force_reg (mode, if_true);
18432 v = gen_rtvec (2, if_true, if_false);
18433 tmp = gen_rtx_UNSPEC (mode, v, u);
18437 code = is_min ? SMIN : SMAX;
18438 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
18441 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
18445 /* Expand an sse vector comparison. Return the register with the result. */
18448 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
18449 rtx op_true, rtx op_false)
18451 enum machine_mode mode = GET_MODE (dest);
18454 cmp_op0 = force_reg (mode, cmp_op0);
18455 if (!nonimmediate_operand (cmp_op1, mode))
18456 cmp_op1 = force_reg (mode, cmp_op1);
18459 || reg_overlap_mentioned_p (dest, op_true)
18460 || reg_overlap_mentioned_p (dest, op_false))
18461 dest = gen_reg_rtx (mode);
18463 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
18464 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18469 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
18470 operations. This is used for both scalar and vector conditional moves. */
18473 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
18475 enum machine_mode mode = GET_MODE (dest);
18478 if (op_false == CONST0_RTX (mode))
18480 op_true = force_reg (mode, op_true);
18481 x = gen_rtx_AND (mode, cmp, op_true);
18482 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18484 else if (op_true == CONST0_RTX (mode))
18486 op_false = force_reg (mode, op_false);
18487 x = gen_rtx_NOT (mode, cmp);
18488 x = gen_rtx_AND (mode, x, op_false);
18489 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18491 else if (TARGET_XOP)
18493 rtx pcmov = gen_rtx_SET (mode, dest,
18494 gen_rtx_IF_THEN_ELSE (mode, cmp,
18501 op_true = force_reg (mode, op_true);
18502 op_false = force_reg (mode, op_false);
18504 t2 = gen_reg_rtx (mode);
18506 t3 = gen_reg_rtx (mode);
18510 x = gen_rtx_AND (mode, op_true, cmp);
18511 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
18513 x = gen_rtx_NOT (mode, cmp);
18514 x = gen_rtx_AND (mode, x, op_false);
18515 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
18517 x = gen_rtx_IOR (mode, t3, t2);
18518 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18522 /* Expand a floating-point conditional move. Return true if successful. */
18525 ix86_expand_fp_movcc (rtx operands[])
18527 enum machine_mode mode = GET_MODE (operands[0]);
18528 enum rtx_code code = GET_CODE (operands[1]);
18529 rtx tmp, compare_op;
18530 rtx op0 = XEXP (operands[1], 0);
18531 rtx op1 = XEXP (operands[1], 1);
18533 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
18535 enum machine_mode cmode;
18537 /* Since we've no cmove for sse registers, don't force bad register
18538 allocation just to gain access to it. Deny movcc when the
18539 comparison mode doesn't match the move mode. */
18540 cmode = GET_MODE (op0);
18541 if (cmode == VOIDmode)
18542 cmode = GET_MODE (op1);
18546 code = ix86_prepare_sse_fp_compare_args (operands[0], code, &op0, &op1);
18547 if (code == UNKNOWN)
18550 if (ix86_expand_sse_fp_minmax (operands[0], code, op0, op1,
18551 operands[2], operands[3]))
18554 tmp = ix86_expand_sse_cmp (operands[0], code, op0, op1,
18555 operands[2], operands[3]);
18556 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
18560 /* The floating point conditional move instructions don't directly
18561 support conditions resulting from a signed integer comparison. */
18563 compare_op = ix86_expand_compare (code, op0, op1);
18564 if (!fcmov_comparison_operator (compare_op, VOIDmode))
18566 tmp = gen_reg_rtx (QImode);
18567 ix86_expand_setcc (tmp, code, op0, op1);
18569 compare_op = ix86_expand_compare (NE, tmp, const0_rtx);
18572 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
18573 gen_rtx_IF_THEN_ELSE (mode, compare_op,
18574 operands[2], operands[3])));
18579 /* Expand a floating-point vector conditional move; a vcond operation
18580 rather than a movcc operation. */
18583 ix86_expand_fp_vcond (rtx operands[])
18585 enum rtx_code code = GET_CODE (operands[3]);
18588 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
18589 &operands[4], &operands[5]);
18590 if (code == UNKNOWN)
18593 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
18594 operands[5], operands[1], operands[2]))
18597 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
18598 operands[1], operands[2]);
18599 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
18603 /* Expand a signed/unsigned integral vector conditional move. */
18606 ix86_expand_int_vcond (rtx operands[])
18608 enum machine_mode mode = GET_MODE (operands[0]);
18609 enum rtx_code code = GET_CODE (operands[3]);
18610 bool negate = false;
18613 cop0 = operands[4];
18614 cop1 = operands[5];
18616 /* XOP supports all of the comparisons on all vector int types. */
18619 /* Canonicalize the comparison to EQ, GT, GTU. */
18630 code = reverse_condition (code);
18636 code = reverse_condition (code);
18642 code = swap_condition (code);
18643 x = cop0, cop0 = cop1, cop1 = x;
18647 gcc_unreachable ();
18650 /* Only SSE4.1/SSE4.2 supports V2DImode. */
18651 if (mode == V2DImode)
18656 /* SSE4.1 supports EQ. */
18657 if (!TARGET_SSE4_1)
18663 /* SSE4.2 supports GT/GTU. */
18664 if (!TARGET_SSE4_2)
18669 gcc_unreachable ();
18673 /* Unsigned parallel compare is not supported by the hardware.
18674 Play some tricks to turn this into a signed comparison
18678 cop0 = force_reg (mode, cop0);
18686 rtx (*gen_sub3) (rtx, rtx, rtx);
18688 /* Subtract (-(INT MAX) - 1) from both operands to make
18690 mask = ix86_build_signbit_mask (mode, true, false);
18691 gen_sub3 = (mode == V4SImode
18692 ? gen_subv4si3 : gen_subv2di3);
18693 t1 = gen_reg_rtx (mode);
18694 emit_insn (gen_sub3 (t1, cop0, mask));
18696 t2 = gen_reg_rtx (mode);
18697 emit_insn (gen_sub3 (t2, cop1, mask));
18707 /* Perform a parallel unsigned saturating subtraction. */
18708 x = gen_reg_rtx (mode);
18709 emit_insn (gen_rtx_SET (VOIDmode, x,
18710 gen_rtx_US_MINUS (mode, cop0, cop1)));
18713 cop1 = CONST0_RTX (mode);
18719 gcc_unreachable ();
18724 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
18725 operands[1+negate], operands[2-negate]);
18727 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
18728 operands[2-negate]);
18732 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
18733 true if we should do zero extension, else sign extension. HIGH_P is
18734 true if we want the N/2 high elements, else the low elements. */
18737 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
18739 enum machine_mode imode = GET_MODE (operands[1]);
18740 rtx (*unpack)(rtx, rtx, rtx);
18747 unpack = gen_vec_interleave_highv16qi;
18749 unpack = gen_vec_interleave_lowv16qi;
18753 unpack = gen_vec_interleave_highv8hi;
18755 unpack = gen_vec_interleave_lowv8hi;
18759 unpack = gen_vec_interleave_highv4si;
18761 unpack = gen_vec_interleave_lowv4si;
18764 gcc_unreachable ();
18767 dest = gen_lowpart (imode, operands[0]);
18770 se = force_reg (imode, CONST0_RTX (imode));
18772 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
18773 operands[1], pc_rtx, pc_rtx);
18775 emit_insn (unpack (dest, operands[1], se));
18778 /* This function performs the same task as ix86_expand_sse_unpack,
18779 but with SSE4.1 instructions. */
18782 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
18784 enum machine_mode imode = GET_MODE (operands[1]);
18785 rtx (*unpack)(rtx, rtx);
18792 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
18794 unpack = gen_sse4_1_sign_extendv8qiv8hi2;
18798 unpack = gen_sse4_1_zero_extendv4hiv4si2;
18800 unpack = gen_sse4_1_sign_extendv4hiv4si2;
18804 unpack = gen_sse4_1_zero_extendv2siv2di2;
18806 unpack = gen_sse4_1_sign_extendv2siv2di2;
18809 gcc_unreachable ();
18812 dest = operands[0];
18815 /* Shift higher 8 bytes to lower 8 bytes. */
18816 src = gen_reg_rtx (imode);
18817 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, src),
18818 gen_lowpart (V1TImode, operands[1]),
18824 emit_insn (unpack (dest, src));
18827 /* Expand conditional increment or decrement using adb/sbb instructions.
18828 The default case using setcc followed by the conditional move can be
18829 done by generic code. */
18831 ix86_expand_int_addcc (rtx operands[])
18833 enum rtx_code code = GET_CODE (operands[1]);
18835 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
18837 rtx val = const0_rtx;
18838 bool fpcmp = false;
18839 enum machine_mode mode;
18840 rtx op0 = XEXP (operands[1], 0);
18841 rtx op1 = XEXP (operands[1], 1);
18843 if (operands[3] != const1_rtx
18844 && operands[3] != constm1_rtx)
18846 if (!ix86_expand_carry_flag_compare (code, op0, op1, &compare_op))
18848 code = GET_CODE (compare_op);
18850 flags = XEXP (compare_op, 0);
18852 if (GET_MODE (flags) == CCFPmode
18853 || GET_MODE (flags) == CCFPUmode)
18856 code = ix86_fp_compare_code_to_integer (code);
18863 PUT_CODE (compare_op,
18864 reverse_condition_maybe_unordered
18865 (GET_CODE (compare_op)));
18867 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
18870 mode = GET_MODE (operands[0]);
18872 /* Construct either adc or sbb insn. */
18873 if ((code == LTU) == (operands[3] == constm1_rtx))
18878 insn = gen_subqi3_carry;
18881 insn = gen_subhi3_carry;
18884 insn = gen_subsi3_carry;
18887 insn = gen_subdi3_carry;
18890 gcc_unreachable ();
18898 insn = gen_addqi3_carry;
18901 insn = gen_addhi3_carry;
18904 insn = gen_addsi3_carry;
18907 insn = gen_adddi3_carry;
18910 gcc_unreachable ();
18913 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
18919 /* Split operands 0 and 1 into half-mode parts. Similar to split_double_mode,
18920 but works for floating pointer parameters and nonoffsetable memories.
18921 For pushes, it returns just stack offsets; the values will be saved
18922 in the right order. Maximally three parts are generated. */
18925 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
18930 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
18932 size = (GET_MODE_SIZE (mode) + 4) / 8;
18934 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
18935 gcc_assert (size >= 2 && size <= 4);
18937 /* Optimize constant pool reference to immediates. This is used by fp
18938 moves, that force all constants to memory to allow combining. */
18939 if (MEM_P (operand) && MEM_READONLY_P (operand))
18941 rtx tmp = maybe_get_pool_constant (operand);
18946 if (MEM_P (operand) && !offsettable_memref_p (operand))
18948 /* The only non-offsetable memories we handle are pushes. */
18949 int ok = push_operand (operand, VOIDmode);
18953 operand = copy_rtx (operand);
18954 PUT_MODE (operand, Pmode);
18955 parts[0] = parts[1] = parts[2] = parts[3] = operand;
18959 if (GET_CODE (operand) == CONST_VECTOR)
18961 enum machine_mode imode = int_mode_for_mode (mode);
18962 /* Caution: if we looked through a constant pool memory above,
18963 the operand may actually have a different mode now. That's
18964 ok, since we want to pun this all the way back to an integer. */
18965 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
18966 gcc_assert (operand != NULL);
18972 if (mode == DImode)
18973 split_double_mode (mode, &operand, 1, &parts[0], &parts[1]);
18978 if (REG_P (operand))
18980 gcc_assert (reload_completed);
18981 for (i = 0; i < size; i++)
18982 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
18984 else if (offsettable_memref_p (operand))
18986 operand = adjust_address (operand, SImode, 0);
18987 parts[0] = operand;
18988 for (i = 1; i < size; i++)
18989 parts[i] = adjust_address (operand, SImode, 4 * i);
18991 else if (GET_CODE (operand) == CONST_DOUBLE)
18996 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
19000 real_to_target (l, &r, mode);
19001 parts[3] = gen_int_mode (l[3], SImode);
19002 parts[2] = gen_int_mode (l[2], SImode);
19005 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
19006 parts[2] = gen_int_mode (l[2], SImode);
19009 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
19012 gcc_unreachable ();
19014 parts[1] = gen_int_mode (l[1], SImode);
19015 parts[0] = gen_int_mode (l[0], SImode);
19018 gcc_unreachable ();
19023 if (mode == TImode)
19024 split_double_mode (mode, &operand, 1, &parts[0], &parts[1]);
19025 if (mode == XFmode || mode == TFmode)
19027 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
19028 if (REG_P (operand))
19030 gcc_assert (reload_completed);
19031 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
19032 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
19034 else if (offsettable_memref_p (operand))
19036 operand = adjust_address (operand, DImode, 0);
19037 parts[0] = operand;
19038 parts[1] = adjust_address (operand, upper_mode, 8);
19040 else if (GET_CODE (operand) == CONST_DOUBLE)
19045 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
19046 real_to_target (l, &r, mode);
19048 /* Do not use shift by 32 to avoid warning on 32bit systems. */
19049 if (HOST_BITS_PER_WIDE_INT >= 64)
19052 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
19053 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
19056 parts[0] = immed_double_const (l[0], l[1], DImode);
19058 if (upper_mode == SImode)
19059 parts[1] = gen_int_mode (l[2], SImode);
19060 else if (HOST_BITS_PER_WIDE_INT >= 64)
19063 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
19064 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
19067 parts[1] = immed_double_const (l[2], l[3], DImode);
19070 gcc_unreachable ();
19077 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
19078 Return false when normal moves are needed; true when all required
19079 insns have been emitted. Operands 2-4 contain the input values
19080 int the correct order; operands 5-7 contain the output values. */
19083 ix86_split_long_move (rtx operands[])
19088 int collisions = 0;
19089 enum machine_mode mode = GET_MODE (operands[0]);
19090 bool collisionparts[4];
19092 /* The DFmode expanders may ask us to move double.
19093 For 64bit target this is single move. By hiding the fact
19094 here we simplify i386.md splitters. */
19095 if (TARGET_64BIT && GET_MODE_SIZE (GET_MODE (operands[0])) == 8)
19097 /* Optimize constant pool reference to immediates. This is used by
19098 fp moves, that force all constants to memory to allow combining. */
19100 if (MEM_P (operands[1])
19101 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
19102 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
19103 operands[1] = get_pool_constant (XEXP (operands[1], 0));
19104 if (push_operand (operands[0], VOIDmode))
19106 operands[0] = copy_rtx (operands[0]);
19107 PUT_MODE (operands[0], Pmode);
19110 operands[0] = gen_lowpart (DImode, operands[0]);
19111 operands[1] = gen_lowpart (DImode, operands[1]);
19112 emit_move_insn (operands[0], operands[1]);
19116 /* The only non-offsettable memory we handle is push. */
19117 if (push_operand (operands[0], VOIDmode))
19120 gcc_assert (!MEM_P (operands[0])
19121 || offsettable_memref_p (operands[0]));
19123 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
19124 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
19126 /* When emitting push, take care for source operands on the stack. */
19127 if (push && MEM_P (operands[1])
19128 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
19130 rtx src_base = XEXP (part[1][nparts - 1], 0);
19132 /* Compensate for the stack decrement by 4. */
19133 if (!TARGET_64BIT && nparts == 3
19134 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
19135 src_base = plus_constant (src_base, 4);
19137 /* src_base refers to the stack pointer and is
19138 automatically decreased by emitted push. */
19139 for (i = 0; i < nparts; i++)
19140 part[1][i] = change_address (part[1][i],
19141 GET_MODE (part[1][i]), src_base);
19144 /* We need to do copy in the right order in case an address register
19145 of the source overlaps the destination. */
19146 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
19150 for (i = 0; i < nparts; i++)
19153 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
19154 if (collisionparts[i])
19158 /* Collision in the middle part can be handled by reordering. */
19159 if (collisions == 1 && nparts == 3 && collisionparts [1])
19161 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
19162 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
19164 else if (collisions == 1
19166 && (collisionparts [1] || collisionparts [2]))
19168 if (collisionparts [1])
19170 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
19171 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
19175 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
19176 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
19180 /* If there are more collisions, we can't handle it by reordering.
19181 Do an lea to the last part and use only one colliding move. */
19182 else if (collisions > 1)
19188 base = part[0][nparts - 1];
19190 /* Handle the case when the last part isn't valid for lea.
19191 Happens in 64-bit mode storing the 12-byte XFmode. */
19192 if (GET_MODE (base) != Pmode)
19193 base = gen_rtx_REG (Pmode, REGNO (base));
19195 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
19196 part[1][0] = replace_equiv_address (part[1][0], base);
19197 for (i = 1; i < nparts; i++)
19199 tmp = plus_constant (base, UNITS_PER_WORD * i);
19200 part[1][i] = replace_equiv_address (part[1][i], tmp);
19211 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
19212 emit_insn (gen_addsi3 (stack_pointer_rtx,
19213 stack_pointer_rtx, GEN_INT (-4)));
19214 emit_move_insn (part[0][2], part[1][2]);
19216 else if (nparts == 4)
19218 emit_move_insn (part[0][3], part[1][3]);
19219 emit_move_insn (part[0][2], part[1][2]);
19224 /* In 64bit mode we don't have 32bit push available. In case this is
19225 register, it is OK - we will just use larger counterpart. We also
19226 retype memory - these comes from attempt to avoid REX prefix on
19227 moving of second half of TFmode value. */
19228 if (GET_MODE (part[1][1]) == SImode)
19230 switch (GET_CODE (part[1][1]))
19233 part[1][1] = adjust_address (part[1][1], DImode, 0);
19237 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
19241 gcc_unreachable ();
19244 if (GET_MODE (part[1][0]) == SImode)
19245 part[1][0] = part[1][1];
19248 emit_move_insn (part[0][1], part[1][1]);
19249 emit_move_insn (part[0][0], part[1][0]);
19253 /* Choose correct order to not overwrite the source before it is copied. */
19254 if ((REG_P (part[0][0])
19255 && REG_P (part[1][1])
19256 && (REGNO (part[0][0]) == REGNO (part[1][1])
19258 && REGNO (part[0][0]) == REGNO (part[1][2]))
19260 && REGNO (part[0][0]) == REGNO (part[1][3]))))
19262 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
19264 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
19266 operands[2 + i] = part[0][j];
19267 operands[6 + i] = part[1][j];
19272 for (i = 0; i < nparts; i++)
19274 operands[2 + i] = part[0][i];
19275 operands[6 + i] = part[1][i];
19279 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
19280 if (optimize_insn_for_size_p ())
19282 for (j = 0; j < nparts - 1; j++)
19283 if (CONST_INT_P (operands[6 + j])
19284 && operands[6 + j] != const0_rtx
19285 && REG_P (operands[2 + j]))
19286 for (i = j; i < nparts - 1; i++)
19287 if (CONST_INT_P (operands[7 + i])
19288 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
19289 operands[7 + i] = operands[2 + j];
19292 for (i = 0; i < nparts; i++)
19293 emit_move_insn (operands[2 + i], operands[6 + i]);
19298 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
19299 left shift by a constant, either using a single shift or
19300 a sequence of add instructions. */
19303 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
19305 rtx (*insn)(rtx, rtx, rtx);
19308 || (count * ix86_cost->add <= ix86_cost->shift_const
19309 && !optimize_insn_for_size_p ()))
19311 insn = mode == DImode ? gen_addsi3 : gen_adddi3;
19312 while (count-- > 0)
19313 emit_insn (insn (operand, operand, operand));
19317 insn = mode == DImode ? gen_ashlsi3 : gen_ashldi3;
19318 emit_insn (insn (operand, operand, GEN_INT (count)));
19323 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
19325 rtx (*gen_ashl3)(rtx, rtx, rtx);
19326 rtx (*gen_shld)(rtx, rtx, rtx);
19327 int half_width = GET_MODE_BITSIZE (mode) >> 1;
19329 rtx low[2], high[2];
19332 if (CONST_INT_P (operands[2]))
19334 split_double_mode (mode, operands, 2, low, high);
19335 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
19337 if (count >= half_width)
19339 emit_move_insn (high[0], low[1]);
19340 emit_move_insn (low[0], const0_rtx);
19342 if (count > half_width)
19343 ix86_expand_ashl_const (high[0], count - half_width, mode);
19347 gen_shld = mode == DImode ? gen_x86_shld : gen_x86_64_shld;
19349 if (!rtx_equal_p (operands[0], operands[1]))
19350 emit_move_insn (operands[0], operands[1]);
19352 emit_insn (gen_shld (high[0], low[0], GEN_INT (count)));
19353 ix86_expand_ashl_const (low[0], count, mode);
19358 split_double_mode (mode, operands, 1, low, high);
19360 gen_ashl3 = mode == DImode ? gen_ashlsi3 : gen_ashldi3;
19362 if (operands[1] == const1_rtx)
19364 /* Assuming we've chosen a QImode capable registers, then 1 << N
19365 can be done with two 32/64-bit shifts, no branches, no cmoves. */
19366 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
19368 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
19370 ix86_expand_clear (low[0]);
19371 ix86_expand_clear (high[0]);
19372 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (half_width)));
19374 d = gen_lowpart (QImode, low[0]);
19375 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
19376 s = gen_rtx_EQ (QImode, flags, const0_rtx);
19377 emit_insn (gen_rtx_SET (VOIDmode, d, s));
19379 d = gen_lowpart (QImode, high[0]);
19380 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
19381 s = gen_rtx_NE (QImode, flags, const0_rtx);
19382 emit_insn (gen_rtx_SET (VOIDmode, d, s));
19385 /* Otherwise, we can get the same results by manually performing
19386 a bit extract operation on bit 5/6, and then performing the two
19387 shifts. The two methods of getting 0/1 into low/high are exactly
19388 the same size. Avoiding the shift in the bit extract case helps
19389 pentium4 a bit; no one else seems to care much either way. */
19392 enum machine_mode half_mode;
19393 rtx (*gen_lshr3)(rtx, rtx, rtx);
19394 rtx (*gen_and3)(rtx, rtx, rtx);
19395 rtx (*gen_xor3)(rtx, rtx, rtx);
19396 HOST_WIDE_INT bits;
19399 if (mode == DImode)
19401 half_mode = SImode;
19402 gen_lshr3 = gen_lshrsi3;
19403 gen_and3 = gen_andsi3;
19404 gen_xor3 = gen_xorsi3;
19409 half_mode = DImode;
19410 gen_lshr3 = gen_lshrdi3;
19411 gen_and3 = gen_anddi3;
19412 gen_xor3 = gen_xordi3;
19416 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
19417 x = gen_rtx_ZERO_EXTEND (half_mode, operands[2]);
19419 x = gen_lowpart (half_mode, operands[2]);
19420 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
19422 emit_insn (gen_lshr3 (high[0], high[0], GEN_INT (bits)));
19423 emit_insn (gen_and3 (high[0], high[0], const1_rtx));
19424 emit_move_insn (low[0], high[0]);
19425 emit_insn (gen_xor3 (low[0], low[0], const1_rtx));
19428 emit_insn (gen_ashl3 (low[0], low[0], operands[2]));
19429 emit_insn (gen_ashl3 (high[0], high[0], operands[2]));
19433 if (operands[1] == constm1_rtx)
19435 /* For -1 << N, we can avoid the shld instruction, because we
19436 know that we're shifting 0...31/63 ones into a -1. */
19437 emit_move_insn (low[0], constm1_rtx);
19438 if (optimize_insn_for_size_p ())
19439 emit_move_insn (high[0], low[0]);
19441 emit_move_insn (high[0], constm1_rtx);
19445 gen_shld = mode == DImode ? gen_x86_shld : gen_x86_64_shld;
19447 if (!rtx_equal_p (operands[0], operands[1]))
19448 emit_move_insn (operands[0], operands[1]);
19450 split_double_mode (mode, operands, 1, low, high);
19451 emit_insn (gen_shld (high[0], low[0], operands[2]));
19454 emit_insn (gen_ashl3 (low[0], low[0], operands[2]));
19456 if (TARGET_CMOVE && scratch)
19458 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
19459 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
19461 ix86_expand_clear (scratch);
19462 emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2], scratch));
19466 rtx (*gen_x86_shift_adj_2)(rtx, rtx, rtx)
19467 = mode == DImode ? gen_x86_shiftsi_adj_2 : gen_x86_shiftdi_adj_2;
19469 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
19474 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
19476 rtx (*gen_ashr3)(rtx, rtx, rtx)
19477 = mode == DImode ? gen_ashrsi3 : gen_ashrdi3;
19478 rtx (*gen_shrd)(rtx, rtx, rtx);
19479 int half_width = GET_MODE_BITSIZE (mode) >> 1;
19481 rtx low[2], high[2];
19484 if (CONST_INT_P (operands[2]))
19486 split_double_mode (mode, operands, 2, low, high);
19487 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
19489 if (count == GET_MODE_BITSIZE (mode) - 1)
19491 emit_move_insn (high[0], high[1]);
19492 emit_insn (gen_ashr3 (high[0], high[0],
19493 GEN_INT (half_width - 1)));
19494 emit_move_insn (low[0], high[0]);
19497 else if (count >= half_width)
19499 emit_move_insn (low[0], high[1]);
19500 emit_move_insn (high[0], low[0]);
19501 emit_insn (gen_ashr3 (high[0], high[0],
19502 GEN_INT (half_width - 1)));
19504 if (count > half_width)
19505 emit_insn (gen_ashr3 (low[0], low[0],
19506 GEN_INT (count - half_width)));
19510 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19512 if (!rtx_equal_p (operands[0], operands[1]))
19513 emit_move_insn (operands[0], operands[1]);
19515 emit_insn (gen_shrd (low[0], high[0], GEN_INT (count)));
19516 emit_insn (gen_ashr3 (high[0], high[0], GEN_INT (count)));
19521 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19523 if (!rtx_equal_p (operands[0], operands[1]))
19524 emit_move_insn (operands[0], operands[1]);
19526 split_double_mode (mode, operands, 1, low, high);
19528 emit_insn (gen_shrd (low[0], high[0], operands[2]));
19529 emit_insn (gen_ashr3 (high[0], high[0], operands[2]));
19531 if (TARGET_CMOVE && scratch)
19533 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
19534 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
19536 emit_move_insn (scratch, high[0]);
19537 emit_insn (gen_ashr3 (scratch, scratch,
19538 GEN_INT (half_width - 1)));
19539 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
19544 rtx (*gen_x86_shift_adj_3)(rtx, rtx, rtx)
19545 = mode == DImode ? gen_x86_shiftsi_adj_3 : gen_x86_shiftdi_adj_3;
19547 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
19553 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
19555 rtx (*gen_lshr3)(rtx, rtx, rtx)
19556 = mode == DImode ? gen_lshrsi3 : gen_lshrdi3;
19557 rtx (*gen_shrd)(rtx, rtx, rtx);
19558 int half_width = GET_MODE_BITSIZE (mode) >> 1;
19560 rtx low[2], high[2];
19563 if (CONST_INT_P (operands[2]))
19565 split_double_mode (mode, operands, 2, low, high);
19566 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
19568 if (count >= half_width)
19570 emit_move_insn (low[0], high[1]);
19571 ix86_expand_clear (high[0]);
19573 if (count > half_width)
19574 emit_insn (gen_lshr3 (low[0], low[0],
19575 GEN_INT (count - half_width)));
19579 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19581 if (!rtx_equal_p (operands[0], operands[1]))
19582 emit_move_insn (operands[0], operands[1]);
19584 emit_insn (gen_shrd (low[0], high[0], GEN_INT (count)));
19585 emit_insn (gen_lshr3 (high[0], high[0], GEN_INT (count)));
19590 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19592 if (!rtx_equal_p (operands[0], operands[1]))
19593 emit_move_insn (operands[0], operands[1]);
19595 split_double_mode (mode, operands, 1, low, high);
19597 emit_insn (gen_shrd (low[0], high[0], operands[2]));
19598 emit_insn (gen_lshr3 (high[0], high[0], operands[2]));
19600 if (TARGET_CMOVE && scratch)
19602 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
19603 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
19605 ix86_expand_clear (scratch);
19606 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
19611 rtx (*gen_x86_shift_adj_2)(rtx, rtx, rtx)
19612 = mode == DImode ? gen_x86_shiftsi_adj_2 : gen_x86_shiftdi_adj_2;
19614 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
19619 /* Predict just emitted jump instruction to be taken with probability PROB. */
19621 predict_jump (int prob)
19623 rtx insn = get_last_insn ();
19624 gcc_assert (JUMP_P (insn));
19625 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
19628 /* Helper function for the string operations below. Dest VARIABLE whether
19629 it is aligned to VALUE bytes. If true, jump to the label. */
19631 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
19633 rtx label = gen_label_rtx ();
19634 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
19635 if (GET_MODE (variable) == DImode)
19636 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
19638 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
19639 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
19642 predict_jump (REG_BR_PROB_BASE * 50 / 100);
19644 predict_jump (REG_BR_PROB_BASE * 90 / 100);
19648 /* Adjust COUNTER by the VALUE. */
19650 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
19652 rtx (*gen_add)(rtx, rtx, rtx)
19653 = GET_MODE (countreg) == DImode ? gen_adddi3 : gen_addsi3;
19655 emit_insn (gen_add (countreg, countreg, GEN_INT (-value)));
19658 /* Zero extend possibly SImode EXP to Pmode register. */
19660 ix86_zero_extend_to_Pmode (rtx exp)
19663 if (GET_MODE (exp) == VOIDmode)
19664 return force_reg (Pmode, exp);
19665 if (GET_MODE (exp) == Pmode)
19666 return copy_to_mode_reg (Pmode, exp);
19667 r = gen_reg_rtx (Pmode);
19668 emit_insn (gen_zero_extendsidi2 (r, exp));
19672 /* Divide COUNTREG by SCALE. */
19674 scale_counter (rtx countreg, int scale)
19680 if (CONST_INT_P (countreg))
19681 return GEN_INT (INTVAL (countreg) / scale);
19682 gcc_assert (REG_P (countreg));
19684 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
19685 GEN_INT (exact_log2 (scale)),
19686 NULL, 1, OPTAB_DIRECT);
19690 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
19691 DImode for constant loop counts. */
19693 static enum machine_mode
19694 counter_mode (rtx count_exp)
19696 if (GET_MODE (count_exp) != VOIDmode)
19697 return GET_MODE (count_exp);
19698 if (!CONST_INT_P (count_exp))
19700 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
19705 /* When SRCPTR is non-NULL, output simple loop to move memory
19706 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
19707 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
19708 equivalent loop to set memory by VALUE (supposed to be in MODE).
19710 The size is rounded down to whole number of chunk size moved at once.
19711 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
19715 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
19716 rtx destptr, rtx srcptr, rtx value,
19717 rtx count, enum machine_mode mode, int unroll,
19720 rtx out_label, top_label, iter, tmp;
19721 enum machine_mode iter_mode = counter_mode (count);
19722 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
19723 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
19729 top_label = gen_label_rtx ();
19730 out_label = gen_label_rtx ();
19731 iter = gen_reg_rtx (iter_mode);
19733 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
19734 NULL, 1, OPTAB_DIRECT);
19735 /* Those two should combine. */
19736 if (piece_size == const1_rtx)
19738 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
19740 predict_jump (REG_BR_PROB_BASE * 10 / 100);
19742 emit_move_insn (iter, const0_rtx);
19744 emit_label (top_label);
19746 tmp = convert_modes (Pmode, iter_mode, iter, true);
19747 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
19748 destmem = change_address (destmem, mode, x_addr);
19752 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
19753 srcmem = change_address (srcmem, mode, y_addr);
19755 /* When unrolling for chips that reorder memory reads and writes,
19756 we can save registers by using single temporary.
19757 Also using 4 temporaries is overkill in 32bit mode. */
19758 if (!TARGET_64BIT && 0)
19760 for (i = 0; i < unroll; i++)
19765 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
19767 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
19769 emit_move_insn (destmem, srcmem);
19775 gcc_assert (unroll <= 4);
19776 for (i = 0; i < unroll; i++)
19778 tmpreg[i] = gen_reg_rtx (mode);
19782 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
19784 emit_move_insn (tmpreg[i], srcmem);
19786 for (i = 0; i < unroll; i++)
19791 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
19793 emit_move_insn (destmem, tmpreg[i]);
19798 for (i = 0; i < unroll; i++)
19802 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
19803 emit_move_insn (destmem, value);
19806 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
19807 true, OPTAB_LIB_WIDEN);
19809 emit_move_insn (iter, tmp);
19811 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
19813 if (expected_size != -1)
19815 expected_size /= GET_MODE_SIZE (mode) * unroll;
19816 if (expected_size == 0)
19818 else if (expected_size > REG_BR_PROB_BASE)
19819 predict_jump (REG_BR_PROB_BASE - 1);
19821 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
19824 predict_jump (REG_BR_PROB_BASE * 80 / 100);
19825 iter = ix86_zero_extend_to_Pmode (iter);
19826 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
19827 true, OPTAB_LIB_WIDEN);
19828 if (tmp != destptr)
19829 emit_move_insn (destptr, tmp);
19832 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
19833 true, OPTAB_LIB_WIDEN);
19835 emit_move_insn (srcptr, tmp);
19837 emit_label (out_label);
19840 /* Output "rep; mov" instruction.
19841 Arguments have same meaning as for previous function */
19843 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
19844 rtx destptr, rtx srcptr,
19846 enum machine_mode mode)
19852 /* If the size is known, it is shorter to use rep movs. */
19853 if (mode == QImode && CONST_INT_P (count)
19854 && !(INTVAL (count) & 3))
19857 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
19858 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
19859 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
19860 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
19861 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
19862 if (mode != QImode)
19864 destexp = gen_rtx_ASHIFT (Pmode, countreg,
19865 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
19866 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
19867 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
19868 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
19869 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
19873 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
19874 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
19876 if (CONST_INT_P (count))
19878 count = GEN_INT (INTVAL (count)
19879 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
19880 destmem = shallow_copy_rtx (destmem);
19881 srcmem = shallow_copy_rtx (srcmem);
19882 set_mem_size (destmem, count);
19883 set_mem_size (srcmem, count);
19887 if (MEM_SIZE (destmem))
19888 set_mem_size (destmem, NULL_RTX);
19889 if (MEM_SIZE (srcmem))
19890 set_mem_size (srcmem, NULL_RTX);
19892 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
19896 /* Output "rep; stos" instruction.
19897 Arguments have same meaning as for previous function */
19899 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
19900 rtx count, enum machine_mode mode,
19906 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
19907 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
19908 value = force_reg (mode, gen_lowpart (mode, value));
19909 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
19910 if (mode != QImode)
19912 destexp = gen_rtx_ASHIFT (Pmode, countreg,
19913 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
19914 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
19917 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
19918 if (orig_value == const0_rtx && CONST_INT_P (count))
19920 count = GEN_INT (INTVAL (count)
19921 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
19922 destmem = shallow_copy_rtx (destmem);
19923 set_mem_size (destmem, count);
19925 else if (MEM_SIZE (destmem))
19926 set_mem_size (destmem, NULL_RTX);
19927 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
19931 emit_strmov (rtx destmem, rtx srcmem,
19932 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
19934 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
19935 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
19936 emit_insn (gen_strmov (destptr, dest, srcptr, src));
19939 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
19941 expand_movmem_epilogue (rtx destmem, rtx srcmem,
19942 rtx destptr, rtx srcptr, rtx count, int max_size)
19945 if (CONST_INT_P (count))
19947 HOST_WIDE_INT countval = INTVAL (count);
19950 if ((countval & 0x10) && max_size > 16)
19954 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
19955 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
19958 gcc_unreachable ();
19961 if ((countval & 0x08) && max_size > 8)
19964 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
19967 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
19968 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
19972 if ((countval & 0x04) && max_size > 4)
19974 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
19977 if ((countval & 0x02) && max_size > 2)
19979 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
19982 if ((countval & 0x01) && max_size > 1)
19984 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
19991 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
19992 count, 1, OPTAB_DIRECT);
19993 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
19994 count, QImode, 1, 4);
19998 /* When there are stringops, we can cheaply increase dest and src pointers.
19999 Otherwise we save code size by maintaining offset (zero is readily
20000 available from preceding rep operation) and using x86 addressing modes.
20002 if (TARGET_SINGLE_STRINGOP)
20006 rtx label = ix86_expand_aligntest (count, 4, true);
20007 src = change_address (srcmem, SImode, srcptr);
20008 dest = change_address (destmem, SImode, destptr);
20009 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20010 emit_label (label);
20011 LABEL_NUSES (label) = 1;
20015 rtx label = ix86_expand_aligntest (count, 2, true);
20016 src = change_address (srcmem, HImode, srcptr);
20017 dest = change_address (destmem, HImode, destptr);
20018 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20019 emit_label (label);
20020 LABEL_NUSES (label) = 1;
20024 rtx label = ix86_expand_aligntest (count, 1, true);
20025 src = change_address (srcmem, QImode, srcptr);
20026 dest = change_address (destmem, QImode, destptr);
20027 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20028 emit_label (label);
20029 LABEL_NUSES (label) = 1;
20034 rtx offset = force_reg (Pmode, const0_rtx);
20039 rtx label = ix86_expand_aligntest (count, 4, true);
20040 src = change_address (srcmem, SImode, srcptr);
20041 dest = change_address (destmem, SImode, destptr);
20042 emit_move_insn (dest, src);
20043 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
20044 true, OPTAB_LIB_WIDEN);
20046 emit_move_insn (offset, tmp);
20047 emit_label (label);
20048 LABEL_NUSES (label) = 1;
20052 rtx label = ix86_expand_aligntest (count, 2, true);
20053 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
20054 src = change_address (srcmem, HImode, tmp);
20055 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
20056 dest = change_address (destmem, HImode, tmp);
20057 emit_move_insn (dest, src);
20058 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
20059 true, OPTAB_LIB_WIDEN);
20061 emit_move_insn (offset, tmp);
20062 emit_label (label);
20063 LABEL_NUSES (label) = 1;
20067 rtx label = ix86_expand_aligntest (count, 1, true);
20068 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
20069 src = change_address (srcmem, QImode, tmp);
20070 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
20071 dest = change_address (destmem, QImode, tmp);
20072 emit_move_insn (dest, src);
20073 emit_label (label);
20074 LABEL_NUSES (label) = 1;
20079 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
20081 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
20082 rtx count, int max_size)
20085 expand_simple_binop (counter_mode (count), AND, count,
20086 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
20087 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
20088 gen_lowpart (QImode, value), count, QImode,
20092 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
20094 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
20098 if (CONST_INT_P (count))
20100 HOST_WIDE_INT countval = INTVAL (count);
20103 if ((countval & 0x10) && max_size > 16)
20107 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
20108 emit_insn (gen_strset (destptr, dest, value));
20109 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
20110 emit_insn (gen_strset (destptr, dest, value));
20113 gcc_unreachable ();
20116 if ((countval & 0x08) && max_size > 8)
20120 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
20121 emit_insn (gen_strset (destptr, dest, value));
20125 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
20126 emit_insn (gen_strset (destptr, dest, value));
20127 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
20128 emit_insn (gen_strset (destptr, dest, value));
20132 if ((countval & 0x04) && max_size > 4)
20134 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
20135 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
20138 if ((countval & 0x02) && max_size > 2)
20140 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
20141 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
20144 if ((countval & 0x01) && max_size > 1)
20146 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
20147 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
20154 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
20159 rtx label = ix86_expand_aligntest (count, 16, true);
20162 dest = change_address (destmem, DImode, destptr);
20163 emit_insn (gen_strset (destptr, dest, value));
20164 emit_insn (gen_strset (destptr, dest, value));
20168 dest = change_address (destmem, SImode, destptr);
20169 emit_insn (gen_strset (destptr, dest, value));
20170 emit_insn (gen_strset (destptr, dest, value));
20171 emit_insn (gen_strset (destptr, dest, value));
20172 emit_insn (gen_strset (destptr, dest, value));
20174 emit_label (label);
20175 LABEL_NUSES (label) = 1;
20179 rtx label = ix86_expand_aligntest (count, 8, true);
20182 dest = change_address (destmem, DImode, destptr);
20183 emit_insn (gen_strset (destptr, dest, value));
20187 dest = change_address (destmem, SImode, destptr);
20188 emit_insn (gen_strset (destptr, dest, value));
20189 emit_insn (gen_strset (destptr, dest, value));
20191 emit_label (label);
20192 LABEL_NUSES (label) = 1;
20196 rtx label = ix86_expand_aligntest (count, 4, true);
20197 dest = change_address (destmem, SImode, destptr);
20198 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
20199 emit_label (label);
20200 LABEL_NUSES (label) = 1;
20204 rtx label = ix86_expand_aligntest (count, 2, true);
20205 dest = change_address (destmem, HImode, destptr);
20206 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
20207 emit_label (label);
20208 LABEL_NUSES (label) = 1;
20212 rtx label = ix86_expand_aligntest (count, 1, true);
20213 dest = change_address (destmem, QImode, destptr);
20214 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
20215 emit_label (label);
20216 LABEL_NUSES (label) = 1;
20220 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
20221 DESIRED_ALIGNMENT. */
20223 expand_movmem_prologue (rtx destmem, rtx srcmem,
20224 rtx destptr, rtx srcptr, rtx count,
20225 int align, int desired_alignment)
20227 if (align <= 1 && desired_alignment > 1)
20229 rtx label = ix86_expand_aligntest (destptr, 1, false);
20230 srcmem = change_address (srcmem, QImode, srcptr);
20231 destmem = change_address (destmem, QImode, destptr);
20232 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
20233 ix86_adjust_counter (count, 1);
20234 emit_label (label);
20235 LABEL_NUSES (label) = 1;
20237 if (align <= 2 && desired_alignment > 2)
20239 rtx label = ix86_expand_aligntest (destptr, 2, false);
20240 srcmem = change_address (srcmem, HImode, srcptr);
20241 destmem = change_address (destmem, HImode, destptr);
20242 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
20243 ix86_adjust_counter (count, 2);
20244 emit_label (label);
20245 LABEL_NUSES (label) = 1;
20247 if (align <= 4 && desired_alignment > 4)
20249 rtx label = ix86_expand_aligntest (destptr, 4, false);
20250 srcmem = change_address (srcmem, SImode, srcptr);
20251 destmem = change_address (destmem, SImode, destptr);
20252 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
20253 ix86_adjust_counter (count, 4);
20254 emit_label (label);
20255 LABEL_NUSES (label) = 1;
20257 gcc_assert (desired_alignment <= 8);
20260 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
20261 ALIGN_BYTES is how many bytes need to be copied. */
20263 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
20264 int desired_align, int align_bytes)
20267 rtx src_size, dst_size;
20269 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
20270 if (src_align_bytes >= 0)
20271 src_align_bytes = desired_align - src_align_bytes;
20272 src_size = MEM_SIZE (src);
20273 dst_size = MEM_SIZE (dst);
20274 if (align_bytes & 1)
20276 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
20277 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
20279 emit_insn (gen_strmov (destreg, dst, srcreg, src));
20281 if (align_bytes & 2)
20283 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
20284 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
20285 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
20286 set_mem_align (dst, 2 * BITS_PER_UNIT);
20287 if (src_align_bytes >= 0
20288 && (src_align_bytes & 1) == (align_bytes & 1)
20289 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
20290 set_mem_align (src, 2 * BITS_PER_UNIT);
20292 emit_insn (gen_strmov (destreg, dst, srcreg, src));
20294 if (align_bytes & 4)
20296 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
20297 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
20298 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
20299 set_mem_align (dst, 4 * BITS_PER_UNIT);
20300 if (src_align_bytes >= 0)
20302 unsigned int src_align = 0;
20303 if ((src_align_bytes & 3) == (align_bytes & 3))
20305 else if ((src_align_bytes & 1) == (align_bytes & 1))
20307 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
20308 set_mem_align (src, src_align * BITS_PER_UNIT);
20311 emit_insn (gen_strmov (destreg, dst, srcreg, src));
20313 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
20314 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
20315 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
20316 set_mem_align (dst, desired_align * BITS_PER_UNIT);
20317 if (src_align_bytes >= 0)
20319 unsigned int src_align = 0;
20320 if ((src_align_bytes & 7) == (align_bytes & 7))
20322 else if ((src_align_bytes & 3) == (align_bytes & 3))
20324 else if ((src_align_bytes & 1) == (align_bytes & 1))
20326 if (src_align > (unsigned int) desired_align)
20327 src_align = desired_align;
20328 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
20329 set_mem_align (src, src_align * BITS_PER_UNIT);
20332 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
20334 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
20339 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
20340 DESIRED_ALIGNMENT. */
20342 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
20343 int align, int desired_alignment)
20345 if (align <= 1 && desired_alignment > 1)
20347 rtx label = ix86_expand_aligntest (destptr, 1, false);
20348 destmem = change_address (destmem, QImode, destptr);
20349 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
20350 ix86_adjust_counter (count, 1);
20351 emit_label (label);
20352 LABEL_NUSES (label) = 1;
20354 if (align <= 2 && desired_alignment > 2)
20356 rtx label = ix86_expand_aligntest (destptr, 2, false);
20357 destmem = change_address (destmem, HImode, destptr);
20358 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
20359 ix86_adjust_counter (count, 2);
20360 emit_label (label);
20361 LABEL_NUSES (label) = 1;
20363 if (align <= 4 && desired_alignment > 4)
20365 rtx label = ix86_expand_aligntest (destptr, 4, false);
20366 destmem = change_address (destmem, SImode, destptr);
20367 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
20368 ix86_adjust_counter (count, 4);
20369 emit_label (label);
20370 LABEL_NUSES (label) = 1;
20372 gcc_assert (desired_alignment <= 8);
20375 /* Set enough from DST to align DST known to by aligned by ALIGN to
20376 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
20378 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
20379 int desired_align, int align_bytes)
20382 rtx dst_size = MEM_SIZE (dst);
20383 if (align_bytes & 1)
20385 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
20387 emit_insn (gen_strset (destreg, dst,
20388 gen_lowpart (QImode, value)));
20390 if (align_bytes & 2)
20392 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
20393 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
20394 set_mem_align (dst, 2 * BITS_PER_UNIT);
20396 emit_insn (gen_strset (destreg, dst,
20397 gen_lowpart (HImode, value)));
20399 if (align_bytes & 4)
20401 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
20402 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
20403 set_mem_align (dst, 4 * BITS_PER_UNIT);
20405 emit_insn (gen_strset (destreg, dst,
20406 gen_lowpart (SImode, value)));
20408 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
20409 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
20410 set_mem_align (dst, desired_align * BITS_PER_UNIT);
20412 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
20416 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
20417 static enum stringop_alg
20418 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
20419 int *dynamic_check)
20421 const struct stringop_algs * algs;
20422 bool optimize_for_speed;
20423 /* Algorithms using the rep prefix want at least edi and ecx;
20424 additionally, memset wants eax and memcpy wants esi. Don't
20425 consider such algorithms if the user has appropriated those
20426 registers for their own purposes. */
20427 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
20429 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
20431 #define ALG_USABLE_P(alg) (rep_prefix_usable \
20432 || (alg != rep_prefix_1_byte \
20433 && alg != rep_prefix_4_byte \
20434 && alg != rep_prefix_8_byte))
20435 const struct processor_costs *cost;
20437 /* Even if the string operation call is cold, we still might spend a lot
20438 of time processing large blocks. */
20439 if (optimize_function_for_size_p (cfun)
20440 || (optimize_insn_for_size_p ()
20441 && expected_size != -1 && expected_size < 256))
20442 optimize_for_speed = false;
20444 optimize_for_speed = true;
20446 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
20448 *dynamic_check = -1;
20450 algs = &cost->memset[TARGET_64BIT != 0];
20452 algs = &cost->memcpy[TARGET_64BIT != 0];
20453 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
20454 return stringop_alg;
20455 /* rep; movq or rep; movl is the smallest variant. */
20456 else if (!optimize_for_speed)
20458 if (!count || (count & 3))
20459 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
20461 return rep_prefix_usable ? rep_prefix_4_byte : loop;
20463 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
20465 else if (expected_size != -1 && expected_size < 4)
20466 return loop_1_byte;
20467 else if (expected_size != -1)
20470 enum stringop_alg alg = libcall;
20471 for (i = 0; i < MAX_STRINGOP_ALGS; i++)
20473 /* We get here if the algorithms that were not libcall-based
20474 were rep-prefix based and we are unable to use rep prefixes
20475 based on global register usage. Break out of the loop and
20476 use the heuristic below. */
20477 if (algs->size[i].max == 0)
20479 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
20481 enum stringop_alg candidate = algs->size[i].alg;
20483 if (candidate != libcall && ALG_USABLE_P (candidate))
20485 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
20486 last non-libcall inline algorithm. */
20487 if (TARGET_INLINE_ALL_STRINGOPS)
20489 /* When the current size is best to be copied by a libcall,
20490 but we are still forced to inline, run the heuristic below
20491 that will pick code for medium sized blocks. */
20492 if (alg != libcall)
20496 else if (ALG_USABLE_P (candidate))
20500 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
20502 /* When asked to inline the call anyway, try to pick meaningful choice.
20503 We look for maximal size of block that is faster to copy by hand and
20504 take blocks of at most of that size guessing that average size will
20505 be roughly half of the block.
20507 If this turns out to be bad, we might simply specify the preferred
20508 choice in ix86_costs. */
20509 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
20510 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
20513 enum stringop_alg alg;
20515 bool any_alg_usable_p = true;
20517 for (i = 0; i < MAX_STRINGOP_ALGS; i++)
20519 enum stringop_alg candidate = algs->size[i].alg;
20520 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
20522 if (candidate != libcall && candidate
20523 && ALG_USABLE_P (candidate))
20524 max = algs->size[i].max;
20526 /* If there aren't any usable algorithms, then recursing on
20527 smaller sizes isn't going to find anything. Just return the
20528 simple byte-at-a-time copy loop. */
20529 if (!any_alg_usable_p)
20531 /* Pick something reasonable. */
20532 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
20533 *dynamic_check = 128;
20534 return loop_1_byte;
20538 alg = decide_alg (count, max / 2, memset, dynamic_check);
20539 gcc_assert (*dynamic_check == -1);
20540 gcc_assert (alg != libcall);
20541 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
20542 *dynamic_check = max;
20545 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
20546 #undef ALG_USABLE_P
20549 /* Decide on alignment. We know that the operand is already aligned to ALIGN
20550 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
20552 decide_alignment (int align,
20553 enum stringop_alg alg,
20556 int desired_align = 0;
20560 gcc_unreachable ();
20562 case unrolled_loop:
20563 desired_align = GET_MODE_SIZE (Pmode);
20565 case rep_prefix_8_byte:
20568 case rep_prefix_4_byte:
20569 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
20570 copying whole cacheline at once. */
20571 if (TARGET_PENTIUMPRO)
20576 case rep_prefix_1_byte:
20577 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
20578 copying whole cacheline at once. */
20579 if (TARGET_PENTIUMPRO)
20593 if (desired_align < align)
20594 desired_align = align;
20595 if (expected_size != -1 && expected_size < 4)
20596 desired_align = align;
20597 return desired_align;
20600 /* Return the smallest power of 2 greater than VAL. */
20602 smallest_pow2_greater_than (int val)
20610 /* Expand string move (memcpy) operation. Use i386 string operations when
20611 profitable. expand_setmem contains similar code. The code depends upon
20612 architecture, block size and alignment, but always has the same
20615 1) Prologue guard: Conditional that jumps up to epilogues for small
20616 blocks that can be handled by epilogue alone. This is faster but
20617 also needed for correctness, since prologue assume the block is larger
20618 than the desired alignment.
20620 Optional dynamic check for size and libcall for large
20621 blocks is emitted here too, with -minline-stringops-dynamically.
20623 2) Prologue: copy first few bytes in order to get destination aligned
20624 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
20625 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
20626 We emit either a jump tree on power of two sized blocks, or a byte loop.
20628 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
20629 with specified algorithm.
20631 4) Epilogue: code copying tail of the block that is too small to be
20632 handled by main body (or up to size guarded by prologue guard). */
20635 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
20636 rtx expected_align_exp, rtx expected_size_exp)
20642 rtx jump_around_label = NULL;
20643 HOST_WIDE_INT align = 1;
20644 unsigned HOST_WIDE_INT count = 0;
20645 HOST_WIDE_INT expected_size = -1;
20646 int size_needed = 0, epilogue_size_needed;
20647 int desired_align = 0, align_bytes = 0;
20648 enum stringop_alg alg;
20650 bool need_zero_guard = false;
20652 if (CONST_INT_P (align_exp))
20653 align = INTVAL (align_exp);
20654 /* i386 can do misaligned access on reasonably increased cost. */
20655 if (CONST_INT_P (expected_align_exp)
20656 && INTVAL (expected_align_exp) > align)
20657 align = INTVAL (expected_align_exp);
20658 /* ALIGN is the minimum of destination and source alignment, but we care here
20659 just about destination alignment. */
20660 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
20661 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
20663 if (CONST_INT_P (count_exp))
20664 count = expected_size = INTVAL (count_exp);
20665 if (CONST_INT_P (expected_size_exp) && count == 0)
20666 expected_size = INTVAL (expected_size_exp);
20668 /* Make sure we don't need to care about overflow later on. */
20669 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
20672 /* Step 0: Decide on preferred algorithm, desired alignment and
20673 size of chunks to be copied by main loop. */
20675 alg = decide_alg (count, expected_size, false, &dynamic_check);
20676 desired_align = decide_alignment (align, alg, expected_size);
20678 if (!TARGET_ALIGN_STRINGOPS)
20679 align = desired_align;
20681 if (alg == libcall)
20683 gcc_assert (alg != no_stringop);
20685 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
20686 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
20687 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
20692 gcc_unreachable ();
20694 need_zero_guard = true;
20695 size_needed = GET_MODE_SIZE (Pmode);
20697 case unrolled_loop:
20698 need_zero_guard = true;
20699 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
20701 case rep_prefix_8_byte:
20704 case rep_prefix_4_byte:
20707 case rep_prefix_1_byte:
20711 need_zero_guard = true;
20716 epilogue_size_needed = size_needed;
20718 /* Step 1: Prologue guard. */
20720 /* Alignment code needs count to be in register. */
20721 if (CONST_INT_P (count_exp) && desired_align > align)
20723 if (INTVAL (count_exp) > desired_align
20724 && INTVAL (count_exp) > size_needed)
20727 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
20728 if (align_bytes <= 0)
20731 align_bytes = desired_align - align_bytes;
20733 if (align_bytes == 0)
20734 count_exp = force_reg (counter_mode (count_exp), count_exp);
20736 gcc_assert (desired_align >= 1 && align >= 1);
20738 /* Ensure that alignment prologue won't copy past end of block. */
20739 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
20741 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
20742 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
20743 Make sure it is power of 2. */
20744 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
20748 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
20750 /* If main algorithm works on QImode, no epilogue is needed.
20751 For small sizes just don't align anything. */
20752 if (size_needed == 1)
20753 desired_align = align;
20760 label = gen_label_rtx ();
20761 emit_cmp_and_jump_insns (count_exp,
20762 GEN_INT (epilogue_size_needed),
20763 LTU, 0, counter_mode (count_exp), 1, label);
20764 if (expected_size == -1 || expected_size < epilogue_size_needed)
20765 predict_jump (REG_BR_PROB_BASE * 60 / 100);
20767 predict_jump (REG_BR_PROB_BASE * 20 / 100);
20771 /* Emit code to decide on runtime whether library call or inline should be
20773 if (dynamic_check != -1)
20775 if (CONST_INT_P (count_exp))
20777 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
20779 emit_block_move_via_libcall (dst, src, count_exp, false);
20780 count_exp = const0_rtx;
20786 rtx hot_label = gen_label_rtx ();
20787 jump_around_label = gen_label_rtx ();
20788 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
20789 LEU, 0, GET_MODE (count_exp), 1, hot_label);
20790 predict_jump (REG_BR_PROB_BASE * 90 / 100);
20791 emit_block_move_via_libcall (dst, src, count_exp, false);
20792 emit_jump (jump_around_label);
20793 emit_label (hot_label);
20797 /* Step 2: Alignment prologue. */
20799 if (desired_align > align)
20801 if (align_bytes == 0)
20803 /* Except for the first move in epilogue, we no longer know
20804 constant offset in aliasing info. It don't seems to worth
20805 the pain to maintain it for the first move, so throw away
20807 src = change_address (src, BLKmode, srcreg);
20808 dst = change_address (dst, BLKmode, destreg);
20809 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
20814 /* If we know how many bytes need to be stored before dst is
20815 sufficiently aligned, maintain aliasing info accurately. */
20816 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
20817 desired_align, align_bytes);
20818 count_exp = plus_constant (count_exp, -align_bytes);
20819 count -= align_bytes;
20821 if (need_zero_guard
20822 && (count < (unsigned HOST_WIDE_INT) size_needed
20823 || (align_bytes == 0
20824 && count < ((unsigned HOST_WIDE_INT) size_needed
20825 + desired_align - align))))
20827 /* It is possible that we copied enough so the main loop will not
20829 gcc_assert (size_needed > 1);
20830 if (label == NULL_RTX)
20831 label = gen_label_rtx ();
20832 emit_cmp_and_jump_insns (count_exp,
20833 GEN_INT (size_needed),
20834 LTU, 0, counter_mode (count_exp), 1, label);
20835 if (expected_size == -1
20836 || expected_size < (desired_align - align) / 2 + size_needed)
20837 predict_jump (REG_BR_PROB_BASE * 20 / 100);
20839 predict_jump (REG_BR_PROB_BASE * 60 / 100);
20842 if (label && size_needed == 1)
20844 emit_label (label);
20845 LABEL_NUSES (label) = 1;
20847 epilogue_size_needed = 1;
20849 else if (label == NULL_RTX)
20850 epilogue_size_needed = size_needed;
20852 /* Step 3: Main loop. */
20858 gcc_unreachable ();
20860 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
20861 count_exp, QImode, 1, expected_size);
20864 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
20865 count_exp, Pmode, 1, expected_size);
20867 case unrolled_loop:
20868 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
20869 registers for 4 temporaries anyway. */
20870 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
20871 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
20874 case rep_prefix_8_byte:
20875 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
20878 case rep_prefix_4_byte:
20879 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
20882 case rep_prefix_1_byte:
20883 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
20887 /* Adjust properly the offset of src and dest memory for aliasing. */
20888 if (CONST_INT_P (count_exp))
20890 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
20891 (count / size_needed) * size_needed);
20892 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
20893 (count / size_needed) * size_needed);
20897 src = change_address (src, BLKmode, srcreg);
20898 dst = change_address (dst, BLKmode, destreg);
20901 /* Step 4: Epilogue to copy the remaining bytes. */
20905 /* When the main loop is done, COUNT_EXP might hold original count,
20906 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
20907 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
20908 bytes. Compensate if needed. */
20910 if (size_needed < epilogue_size_needed)
20913 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
20914 GEN_INT (size_needed - 1), count_exp, 1,
20916 if (tmp != count_exp)
20917 emit_move_insn (count_exp, tmp);
20919 emit_label (label);
20920 LABEL_NUSES (label) = 1;
20923 if (count_exp != const0_rtx && epilogue_size_needed > 1)
20924 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
20925 epilogue_size_needed);
20926 if (jump_around_label)
20927 emit_label (jump_around_label);
20931 /* Helper function for memcpy. For QImode value 0xXY produce
20932 0xXYXYXYXY of wide specified by MODE. This is essentially
20933 a * 0x10101010, but we can do slightly better than
20934 synth_mult by unwinding the sequence by hand on CPUs with
20937 promote_duplicated_reg (enum machine_mode mode, rtx val)
20939 enum machine_mode valmode = GET_MODE (val);
20941 int nops = mode == DImode ? 3 : 2;
20943 gcc_assert (mode == SImode || mode == DImode);
20944 if (val == const0_rtx)
20945 return copy_to_mode_reg (mode, const0_rtx);
20946 if (CONST_INT_P (val))
20948 HOST_WIDE_INT v = INTVAL (val) & 255;
20952 if (mode == DImode)
20953 v |= (v << 16) << 16;
20954 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
20957 if (valmode == VOIDmode)
20959 if (valmode != QImode)
20960 val = gen_lowpart (QImode, val);
20961 if (mode == QImode)
20963 if (!TARGET_PARTIAL_REG_STALL)
20965 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
20966 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
20967 <= (ix86_cost->shift_const + ix86_cost->add) * nops
20968 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
20970 rtx reg = convert_modes (mode, QImode, val, true);
20971 tmp = promote_duplicated_reg (mode, const1_rtx);
20972 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
20977 rtx reg = convert_modes (mode, QImode, val, true);
20979 if (!TARGET_PARTIAL_REG_STALL)
20980 if (mode == SImode)
20981 emit_insn (gen_movsi_insv_1 (reg, reg));
20983 emit_insn (gen_movdi_insv_1 (reg, reg));
20986 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
20987 NULL, 1, OPTAB_DIRECT);
20989 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
20991 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
20992 NULL, 1, OPTAB_DIRECT);
20993 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
20994 if (mode == SImode)
20996 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
20997 NULL, 1, OPTAB_DIRECT);
20998 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
21003 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
21004 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
21005 alignment from ALIGN to DESIRED_ALIGN. */
21007 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
21012 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
21013 promoted_val = promote_duplicated_reg (DImode, val);
21014 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
21015 promoted_val = promote_duplicated_reg (SImode, val);
21016 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
21017 promoted_val = promote_duplicated_reg (HImode, val);
21019 promoted_val = val;
21021 return promoted_val;
21024 /* Expand string clear operation (bzero). Use i386 string operations when
21025 profitable. See expand_movmem comment for explanation of individual
21026 steps performed. */
21028 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
21029 rtx expected_align_exp, rtx expected_size_exp)
21034 rtx jump_around_label = NULL;
21035 HOST_WIDE_INT align = 1;
21036 unsigned HOST_WIDE_INT count = 0;
21037 HOST_WIDE_INT expected_size = -1;
21038 int size_needed = 0, epilogue_size_needed;
21039 int desired_align = 0, align_bytes = 0;
21040 enum stringop_alg alg;
21041 rtx promoted_val = NULL;
21042 bool force_loopy_epilogue = false;
21044 bool need_zero_guard = false;
21046 if (CONST_INT_P (align_exp))
21047 align = INTVAL (align_exp);
21048 /* i386 can do misaligned access on reasonably increased cost. */
21049 if (CONST_INT_P (expected_align_exp)
21050 && INTVAL (expected_align_exp) > align)
21051 align = INTVAL (expected_align_exp);
21052 if (CONST_INT_P (count_exp))
21053 count = expected_size = INTVAL (count_exp);
21054 if (CONST_INT_P (expected_size_exp) && count == 0)
21055 expected_size = INTVAL (expected_size_exp);
21057 /* Make sure we don't need to care about overflow later on. */
21058 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
21061 /* Step 0: Decide on preferred algorithm, desired alignment and
21062 size of chunks to be copied by main loop. */
21064 alg = decide_alg (count, expected_size, true, &dynamic_check);
21065 desired_align = decide_alignment (align, alg, expected_size);
21067 if (!TARGET_ALIGN_STRINGOPS)
21068 align = desired_align;
21070 if (alg == libcall)
21072 gcc_assert (alg != no_stringop);
21074 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
21075 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
21080 gcc_unreachable ();
21082 need_zero_guard = true;
21083 size_needed = GET_MODE_SIZE (Pmode);
21085 case unrolled_loop:
21086 need_zero_guard = true;
21087 size_needed = GET_MODE_SIZE (Pmode) * 4;
21089 case rep_prefix_8_byte:
21092 case rep_prefix_4_byte:
21095 case rep_prefix_1_byte:
21099 need_zero_guard = true;
21103 epilogue_size_needed = size_needed;
21105 /* Step 1: Prologue guard. */
21107 /* Alignment code needs count to be in register. */
21108 if (CONST_INT_P (count_exp) && desired_align > align)
21110 if (INTVAL (count_exp) > desired_align
21111 && INTVAL (count_exp) > size_needed)
21114 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
21115 if (align_bytes <= 0)
21118 align_bytes = desired_align - align_bytes;
21120 if (align_bytes == 0)
21122 enum machine_mode mode = SImode;
21123 if (TARGET_64BIT && (count & ~0xffffffff))
21125 count_exp = force_reg (mode, count_exp);
21128 /* Do the cheap promotion to allow better CSE across the
21129 main loop and epilogue (ie one load of the big constant in the
21130 front of all code. */
21131 if (CONST_INT_P (val_exp))
21132 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
21133 desired_align, align);
21134 /* Ensure that alignment prologue won't copy past end of block. */
21135 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
21137 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
21138 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
21139 Make sure it is power of 2. */
21140 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
21142 /* To improve performance of small blocks, we jump around the VAL
21143 promoting mode. This mean that if the promoted VAL is not constant,
21144 we might not use it in the epilogue and have to use byte
21146 if (epilogue_size_needed > 2 && !promoted_val)
21147 force_loopy_epilogue = true;
21150 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
21152 /* If main algorithm works on QImode, no epilogue is needed.
21153 For small sizes just don't align anything. */
21154 if (size_needed == 1)
21155 desired_align = align;
21162 label = gen_label_rtx ();
21163 emit_cmp_and_jump_insns (count_exp,
21164 GEN_INT (epilogue_size_needed),
21165 LTU, 0, counter_mode (count_exp), 1, label);
21166 if (expected_size == -1 || expected_size <= epilogue_size_needed)
21167 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21169 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21172 if (dynamic_check != -1)
21174 rtx hot_label = gen_label_rtx ();
21175 jump_around_label = gen_label_rtx ();
21176 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
21177 LEU, 0, counter_mode (count_exp), 1, hot_label);
21178 predict_jump (REG_BR_PROB_BASE * 90 / 100);
21179 set_storage_via_libcall (dst, count_exp, val_exp, false);
21180 emit_jump (jump_around_label);
21181 emit_label (hot_label);
21184 /* Step 2: Alignment prologue. */
21186 /* Do the expensive promotion once we branched off the small blocks. */
21188 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
21189 desired_align, align);
21190 gcc_assert (desired_align >= 1 && align >= 1);
21192 if (desired_align > align)
21194 if (align_bytes == 0)
21196 /* Except for the first move in epilogue, we no longer know
21197 constant offset in aliasing info. It don't seems to worth
21198 the pain to maintain it for the first move, so throw away
21200 dst = change_address (dst, BLKmode, destreg);
21201 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
21206 /* If we know how many bytes need to be stored before dst is
21207 sufficiently aligned, maintain aliasing info accurately. */
21208 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
21209 desired_align, align_bytes);
21210 count_exp = plus_constant (count_exp, -align_bytes);
21211 count -= align_bytes;
21213 if (need_zero_guard
21214 && (count < (unsigned HOST_WIDE_INT) size_needed
21215 || (align_bytes == 0
21216 && count < ((unsigned HOST_WIDE_INT) size_needed
21217 + desired_align - align))))
21219 /* It is possible that we copied enough so the main loop will not
21221 gcc_assert (size_needed > 1);
21222 if (label == NULL_RTX)
21223 label = gen_label_rtx ();
21224 emit_cmp_and_jump_insns (count_exp,
21225 GEN_INT (size_needed),
21226 LTU, 0, counter_mode (count_exp), 1, label);
21227 if (expected_size == -1
21228 || expected_size < (desired_align - align) / 2 + size_needed)
21229 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21231 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21234 if (label && size_needed == 1)
21236 emit_label (label);
21237 LABEL_NUSES (label) = 1;
21239 promoted_val = val_exp;
21240 epilogue_size_needed = 1;
21242 else if (label == NULL_RTX)
21243 epilogue_size_needed = size_needed;
21245 /* Step 3: Main loop. */
21251 gcc_unreachable ();
21253 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
21254 count_exp, QImode, 1, expected_size);
21257 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
21258 count_exp, Pmode, 1, expected_size);
21260 case unrolled_loop:
21261 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
21262 count_exp, Pmode, 4, expected_size);
21264 case rep_prefix_8_byte:
21265 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
21268 case rep_prefix_4_byte:
21269 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
21272 case rep_prefix_1_byte:
21273 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
21277 /* Adjust properly the offset of src and dest memory for aliasing. */
21278 if (CONST_INT_P (count_exp))
21279 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
21280 (count / size_needed) * size_needed);
21282 dst = change_address (dst, BLKmode, destreg);
21284 /* Step 4: Epilogue to copy the remaining bytes. */
21288 /* When the main loop is done, COUNT_EXP might hold original count,
21289 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
21290 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
21291 bytes. Compensate if needed. */
21293 if (size_needed < epilogue_size_needed)
21296 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
21297 GEN_INT (size_needed - 1), count_exp, 1,
21299 if (tmp != count_exp)
21300 emit_move_insn (count_exp, tmp);
21302 emit_label (label);
21303 LABEL_NUSES (label) = 1;
21306 if (count_exp != const0_rtx && epilogue_size_needed > 1)
21308 if (force_loopy_epilogue)
21309 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
21310 epilogue_size_needed);
21312 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
21313 epilogue_size_needed);
21315 if (jump_around_label)
21316 emit_label (jump_around_label);
21320 /* Expand the appropriate insns for doing strlen if not just doing
21323 out = result, initialized with the start address
21324 align_rtx = alignment of the address.
21325 scratch = scratch register, initialized with the startaddress when
21326 not aligned, otherwise undefined
21328 This is just the body. It needs the initializations mentioned above and
21329 some address computing at the end. These things are done in i386.md. */
21332 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
21336 rtx align_2_label = NULL_RTX;
21337 rtx align_3_label = NULL_RTX;
21338 rtx align_4_label = gen_label_rtx ();
21339 rtx end_0_label = gen_label_rtx ();
21341 rtx tmpreg = gen_reg_rtx (SImode);
21342 rtx scratch = gen_reg_rtx (SImode);
21346 if (CONST_INT_P (align_rtx))
21347 align = INTVAL (align_rtx);
21349 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
21351 /* Is there a known alignment and is it less than 4? */
21354 rtx scratch1 = gen_reg_rtx (Pmode);
21355 emit_move_insn (scratch1, out);
21356 /* Is there a known alignment and is it not 2? */
21359 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
21360 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
21362 /* Leave just the 3 lower bits. */
21363 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
21364 NULL_RTX, 0, OPTAB_WIDEN);
21366 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
21367 Pmode, 1, align_4_label);
21368 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
21369 Pmode, 1, align_2_label);
21370 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
21371 Pmode, 1, align_3_label);
21375 /* Since the alignment is 2, we have to check 2 or 0 bytes;
21376 check if is aligned to 4 - byte. */
21378 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
21379 NULL_RTX, 0, OPTAB_WIDEN);
21381 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
21382 Pmode, 1, align_4_label);
21385 mem = change_address (src, QImode, out);
21387 /* Now compare the bytes. */
21389 /* Compare the first n unaligned byte on a byte per byte basis. */
21390 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
21391 QImode, 1, end_0_label);
21393 /* Increment the address. */
21394 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
21396 /* Not needed with an alignment of 2 */
21399 emit_label (align_2_label);
21401 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
21404 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
21406 emit_label (align_3_label);
21409 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
21412 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
21415 /* Generate loop to check 4 bytes at a time. It is not a good idea to
21416 align this loop. It gives only huge programs, but does not help to
21418 emit_label (align_4_label);
21420 mem = change_address (src, SImode, out);
21421 emit_move_insn (scratch, mem);
21422 emit_insn (ix86_gen_add3 (out, out, GEN_INT (4)));
21424 /* This formula yields a nonzero result iff one of the bytes is zero.
21425 This saves three branches inside loop and many cycles. */
21427 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
21428 emit_insn (gen_one_cmplsi2 (scratch, scratch));
21429 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
21430 emit_insn (gen_andsi3 (tmpreg, tmpreg,
21431 gen_int_mode (0x80808080, SImode)));
21432 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
21437 rtx reg = gen_reg_rtx (SImode);
21438 rtx reg2 = gen_reg_rtx (Pmode);
21439 emit_move_insn (reg, tmpreg);
21440 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
21442 /* If zero is not in the first two bytes, move two bytes forward. */
21443 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
21444 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21445 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
21446 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
21447 gen_rtx_IF_THEN_ELSE (SImode, tmp,
21450 /* Emit lea manually to avoid clobbering of flags. */
21451 emit_insn (gen_rtx_SET (SImode, reg2,
21452 gen_rtx_PLUS (Pmode, out, const2_rtx)));
21454 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21455 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
21456 emit_insn (gen_rtx_SET (VOIDmode, out,
21457 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
21463 rtx end_2_label = gen_label_rtx ();
21464 /* Is zero in the first two bytes? */
21466 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
21467 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21468 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
21469 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
21470 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
21472 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
21473 JUMP_LABEL (tmp) = end_2_label;
21475 /* Not in the first two. Move two bytes forward. */
21476 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
21477 emit_insn (ix86_gen_add3 (out, out, const2_rtx));
21479 emit_label (end_2_label);
21483 /* Avoid branch in fixing the byte. */
21484 tmpreg = gen_lowpart (QImode, tmpreg);
21485 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
21486 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
21487 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
21488 emit_insn (ix86_gen_sub3_carry (out, out, GEN_INT (3), tmp, cmp));
21490 emit_label (end_0_label);
21493 /* Expand strlen. */
21496 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
21498 rtx addr, scratch1, scratch2, scratch3, scratch4;
21500 /* The generic case of strlen expander is long. Avoid it's
21501 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
21503 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
21504 && !TARGET_INLINE_ALL_STRINGOPS
21505 && !optimize_insn_for_size_p ()
21506 && (!CONST_INT_P (align) || INTVAL (align) < 4))
21509 addr = force_reg (Pmode, XEXP (src, 0));
21510 scratch1 = gen_reg_rtx (Pmode);
21512 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
21513 && !optimize_insn_for_size_p ())
21515 /* Well it seems that some optimizer does not combine a call like
21516 foo(strlen(bar), strlen(bar));
21517 when the move and the subtraction is done here. It does calculate
21518 the length just once when these instructions are done inside of
21519 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
21520 often used and I use one fewer register for the lifetime of
21521 output_strlen_unroll() this is better. */
21523 emit_move_insn (out, addr);
21525 ix86_expand_strlensi_unroll_1 (out, src, align);
21527 /* strlensi_unroll_1 returns the address of the zero at the end of
21528 the string, like memchr(), so compute the length by subtracting
21529 the start address. */
21530 emit_insn (ix86_gen_sub3 (out, out, addr));
21536 /* Can't use this if the user has appropriated eax, ecx, or edi. */
21537 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
21540 scratch2 = gen_reg_rtx (Pmode);
21541 scratch3 = gen_reg_rtx (Pmode);
21542 scratch4 = force_reg (Pmode, constm1_rtx);
21544 emit_move_insn (scratch3, addr);
21545 eoschar = force_reg (QImode, eoschar);
21547 src = replace_equiv_address_nv (src, scratch3);
21549 /* If .md starts supporting :P, this can be done in .md. */
21550 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
21551 scratch4), UNSPEC_SCAS);
21552 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
21553 emit_insn (ix86_gen_one_cmpl2 (scratch2, scratch1));
21554 emit_insn (ix86_gen_add3 (out, scratch2, constm1_rtx));
21559 /* For given symbol (function) construct code to compute address of it's PLT
21560 entry in large x86-64 PIC model. */
21562 construct_plt_address (rtx symbol)
21564 rtx tmp = gen_reg_rtx (Pmode);
21565 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
21567 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
21568 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
21570 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
21571 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
21576 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
21578 rtx pop, int sibcall)
21580 rtx use = NULL, call;
21582 if (pop == const0_rtx)
21584 gcc_assert (!TARGET_64BIT || !pop);
21586 if (TARGET_MACHO && !TARGET_64BIT)
21589 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
21590 fnaddr = machopic_indirect_call_target (fnaddr);
21595 /* Static functions and indirect calls don't need the pic register. */
21596 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
21597 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
21598 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
21599 use_reg (&use, pic_offset_table_rtx);
21602 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
21604 rtx al = gen_rtx_REG (QImode, AX_REG);
21605 emit_move_insn (al, callarg2);
21606 use_reg (&use, al);
21609 if (ix86_cmodel == CM_LARGE_PIC
21611 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
21612 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
21613 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
21615 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
21616 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
21618 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
21619 fnaddr = gen_rtx_MEM (QImode, fnaddr);
21622 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
21624 call = gen_rtx_SET (VOIDmode, retval, call);
21627 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
21628 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
21629 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
21632 && ix86_cfun_abi () == MS_ABI
21633 && (!callarg2 || INTVAL (callarg2) != -2))
21635 /* We need to represent that SI and DI registers are clobbered
21637 static int clobbered_registers[] = {
21638 XMM6_REG, XMM7_REG, XMM8_REG,
21639 XMM9_REG, XMM10_REG, XMM11_REG,
21640 XMM12_REG, XMM13_REG, XMM14_REG,
21641 XMM15_REG, SI_REG, DI_REG
21644 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
21645 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
21646 UNSPEC_MS_TO_SYSV_CALL);
21650 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
21651 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
21654 (SSE_REGNO_P (clobbered_registers[i])
21656 clobbered_registers[i]));
21658 call = gen_rtx_PARALLEL (VOIDmode,
21659 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
21663 /* Add UNSPEC_CALL_NEEDS_VZEROUPPER decoration. */
21664 if (TARGET_VZEROUPPER && cfun->machine->use_avx256_p)
21669 cfun->machine->use_vzeroupper_p = 1;
21670 if (cfun->machine->callee_pass_avx256_p)
21672 if (cfun->machine->callee_return_avx256_p)
21673 avx256 = callee_return_pass_avx256;
21675 avx256 = callee_pass_avx256;
21677 else if (cfun->machine->callee_return_avx256_p)
21678 avx256 = callee_return_avx256;
21680 avx256 = call_no_avx256;
21682 if (reload_completed)
21683 emit_insn (gen_avx_vzeroupper (GEN_INT (avx256)));
21686 unspec = gen_rtx_UNSPEC (VOIDmode,
21687 gen_rtvec (1, GEN_INT (avx256)),
21688 UNSPEC_CALL_NEEDS_VZEROUPPER);
21689 call = gen_rtx_PARALLEL (VOIDmode,
21690 gen_rtvec (2, call, unspec));
21694 call = emit_call_insn (call);
21696 CALL_INSN_FUNCTION_USAGE (call) = use;
21702 ix86_split_call_vzeroupper (rtx insn, rtx vzeroupper)
21704 rtx call = XVECEXP (PATTERN (insn), 0, 0);
21705 emit_insn (gen_avx_vzeroupper (vzeroupper));
21706 emit_call_insn (call);
21709 /* Output the assembly for a call instruction. */
21712 ix86_output_call_insn (rtx insn, rtx call_op, int addr_op)
21714 bool direct_p = constant_call_address_operand (call_op, Pmode);
21715 bool seh_nop_p = false;
21717 gcc_assert (addr_op == 0 || addr_op == 1);
21719 if (SIBLING_CALL_P (insn))
21722 return addr_op ? "jmp\t%P1" : "jmp\t%P0";
21723 /* SEH epilogue detection requires the indirect branch case
21724 to include REX.W. */
21725 else if (TARGET_SEH)
21726 return addr_op ? "rex.W jmp %A1" : "rex.W jmp %A0";
21728 return addr_op ? "jmp\t%A1" : "jmp\t%A0";
21731 /* SEH unwinding can require an extra nop to be emitted in several
21732 circumstances. Determine if we have one of those. */
21737 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
21739 /* If we get to another real insn, we don't need the nop. */
21743 /* If we get to the epilogue note, prevent a catch region from
21744 being adjacent to the standard epilogue sequence. If non-
21745 call-exceptions, we'll have done this during epilogue emission. */
21746 if (NOTE_P (i) && NOTE_KIND (i) == NOTE_INSN_EPILOGUE_BEG
21747 && !flag_non_call_exceptions
21748 && !can_throw_internal (insn))
21755 /* If we didn't find a real insn following the call, prevent the
21756 unwinder from looking into the next function. */
21764 return addr_op ? "call\t%P1\n\tnop" : "call\t%P0\n\tnop";
21766 return addr_op ? "call\t%P1" : "call\t%P0";
21771 return addr_op ? "call\t%A1\n\tnop" : "call\t%A0\n\tnop";
21773 return addr_op ? "call\t%A1" : "call\t%A0";
21777 /* Clear stack slot assignments remembered from previous functions.
21778 This is called from INIT_EXPANDERS once before RTL is emitted for each
21781 static struct machine_function *
21782 ix86_init_machine_status (void)
21784 struct machine_function *f;
21786 f = ggc_alloc_cleared_machine_function ();
21787 f->use_fast_prologue_epilogue_nregs = -1;
21788 f->tls_descriptor_call_expanded_p = 0;
21789 f->call_abi = ix86_abi;
21794 /* Return a MEM corresponding to a stack slot with mode MODE.
21795 Allocate a new slot if necessary.
21797 The RTL for a function can have several slots available: N is
21798 which slot to use. */
21801 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
21803 struct stack_local_entry *s;
21805 gcc_assert (n < MAX_386_STACK_LOCALS);
21807 /* Virtual slot is valid only before vregs are instantiated. */
21808 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
21810 for (s = ix86_stack_locals; s; s = s->next)
21811 if (s->mode == mode && s->n == n)
21812 return copy_rtx (s->rtl);
21814 s = ggc_alloc_stack_local_entry ();
21817 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
21819 s->next = ix86_stack_locals;
21820 ix86_stack_locals = s;
21824 /* Construct the SYMBOL_REF for the tls_get_addr function. */
21826 static GTY(()) rtx ix86_tls_symbol;
21828 ix86_tls_get_addr (void)
21831 if (!ix86_tls_symbol)
21833 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
21834 (TARGET_ANY_GNU_TLS
21836 ? "___tls_get_addr"
21837 : "__tls_get_addr");
21840 return ix86_tls_symbol;
21843 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
21845 static GTY(()) rtx ix86_tls_module_base_symbol;
21847 ix86_tls_module_base (void)
21850 if (!ix86_tls_module_base_symbol)
21852 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
21853 "_TLS_MODULE_BASE_");
21854 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
21855 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
21858 return ix86_tls_module_base_symbol;
21861 /* Calculate the length of the memory address in the instruction
21862 encoding. Does not include the one-byte modrm, opcode, or prefix. */
21865 memory_address_length (rtx addr)
21867 struct ix86_address parts;
21868 rtx base, index, disp;
21872 if (GET_CODE (addr) == PRE_DEC
21873 || GET_CODE (addr) == POST_INC
21874 || GET_CODE (addr) == PRE_MODIFY
21875 || GET_CODE (addr) == POST_MODIFY)
21878 ok = ix86_decompose_address (addr, &parts);
21881 if (parts.base && GET_CODE (parts.base) == SUBREG)
21882 parts.base = SUBREG_REG (parts.base);
21883 if (parts.index && GET_CODE (parts.index) == SUBREG)
21884 parts.index = SUBREG_REG (parts.index);
21887 index = parts.index;
21892 - esp as the base always wants an index,
21893 - ebp as the base always wants a displacement,
21894 - r12 as the base always wants an index,
21895 - r13 as the base always wants a displacement. */
21897 /* Register Indirect. */
21898 if (base && !index && !disp)
21900 /* esp (for its index) and ebp (for its displacement) need
21901 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
21904 && (addr == arg_pointer_rtx
21905 || addr == frame_pointer_rtx
21906 || REGNO (addr) == SP_REG
21907 || REGNO (addr) == BP_REG
21908 || REGNO (addr) == R12_REG
21909 || REGNO (addr) == R13_REG))
21913 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
21914 is not disp32, but disp32(%rip), so for disp32
21915 SIB byte is needed, unless print_operand_address
21916 optimizes it into disp32(%rip) or (%rip) is implied
21918 else if (disp && !base && !index)
21925 if (GET_CODE (disp) == CONST)
21926 symbol = XEXP (disp, 0);
21927 if (GET_CODE (symbol) == PLUS
21928 && CONST_INT_P (XEXP (symbol, 1)))
21929 symbol = XEXP (symbol, 0);
21931 if (GET_CODE (symbol) != LABEL_REF
21932 && (GET_CODE (symbol) != SYMBOL_REF
21933 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
21934 && (GET_CODE (symbol) != UNSPEC
21935 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
21936 && XINT (symbol, 1) != UNSPEC_PCREL
21937 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
21944 /* Find the length of the displacement constant. */
21947 if (base && satisfies_constraint_K (disp))
21952 /* ebp always wants a displacement. Similarly r13. */
21953 else if (base && REG_P (base)
21954 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
21957 /* An index requires the two-byte modrm form.... */
21959 /* ...like esp (or r12), which always wants an index. */
21960 || base == arg_pointer_rtx
21961 || base == frame_pointer_rtx
21962 || (base && REG_P (base)
21963 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
21980 /* Compute default value for "length_immediate" attribute. When SHORTFORM
21981 is set, expect that insn have 8bit immediate alternative. */
21983 ix86_attr_length_immediate_default (rtx insn, int shortform)
21987 extract_insn_cached (insn);
21988 for (i = recog_data.n_operands - 1; i >= 0; --i)
21989 if (CONSTANT_P (recog_data.operand[i]))
21991 enum attr_mode mode = get_attr_mode (insn);
21994 if (shortform && CONST_INT_P (recog_data.operand[i]))
21996 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
22003 ival = trunc_int_for_mode (ival, HImode);
22006 ival = trunc_int_for_mode (ival, SImode);
22011 if (IN_RANGE (ival, -128, 127))
22028 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
22033 fatal_insn ("unknown insn mode", insn);
22038 /* Compute default value for "length_address" attribute. */
22040 ix86_attr_length_address_default (rtx insn)
22044 if (get_attr_type (insn) == TYPE_LEA)
22046 rtx set = PATTERN (insn), addr;
22048 if (GET_CODE (set) == PARALLEL)
22049 set = XVECEXP (set, 0, 0);
22051 gcc_assert (GET_CODE (set) == SET);
22053 addr = SET_SRC (set);
22054 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
22056 if (GET_CODE (addr) == ZERO_EXTEND)
22057 addr = XEXP (addr, 0);
22058 if (GET_CODE (addr) == SUBREG)
22059 addr = SUBREG_REG (addr);
22062 return memory_address_length (addr);
22065 extract_insn_cached (insn);
22066 for (i = recog_data.n_operands - 1; i >= 0; --i)
22067 if (MEM_P (recog_data.operand[i]))
22069 constrain_operands_cached (reload_completed);
22070 if (which_alternative != -1)
22072 const char *constraints = recog_data.constraints[i];
22073 int alt = which_alternative;
22075 while (*constraints == '=' || *constraints == '+')
22078 while (*constraints++ != ',')
22080 /* Skip ignored operands. */
22081 if (*constraints == 'X')
22084 return memory_address_length (XEXP (recog_data.operand[i], 0));
22089 /* Compute default value for "length_vex" attribute. It includes
22090 2 or 3 byte VEX prefix and 1 opcode byte. */
22093 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
22098 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
22099 byte VEX prefix. */
22100 if (!has_0f_opcode || has_vex_w)
22103 /* We can always use 2 byte VEX prefix in 32bit. */
22107 extract_insn_cached (insn);
22109 for (i = recog_data.n_operands - 1; i >= 0; --i)
22110 if (REG_P (recog_data.operand[i]))
22112 /* REX.W bit uses 3 byte VEX prefix. */
22113 if (GET_MODE (recog_data.operand[i]) == DImode
22114 && GENERAL_REG_P (recog_data.operand[i]))
22119 /* REX.X or REX.B bits use 3 byte VEX prefix. */
22120 if (MEM_P (recog_data.operand[i])
22121 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
22128 /* Return the maximum number of instructions a cpu can issue. */
22131 ix86_issue_rate (void)
22135 case PROCESSOR_PENTIUM:
22136 case PROCESSOR_ATOM:
22140 case PROCESSOR_PENTIUMPRO:
22141 case PROCESSOR_PENTIUM4:
22142 case PROCESSOR_ATHLON:
22144 case PROCESSOR_AMDFAM10:
22145 case PROCESSOR_NOCONA:
22146 case PROCESSOR_GENERIC32:
22147 case PROCESSOR_GENERIC64:
22148 case PROCESSOR_BDVER1:
22151 case PROCESSOR_CORE2:
22159 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
22160 by DEP_INSN and nothing set by DEP_INSN. */
22163 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
22167 /* Simplify the test for uninteresting insns. */
22168 if (insn_type != TYPE_SETCC
22169 && insn_type != TYPE_ICMOV
22170 && insn_type != TYPE_FCMOV
22171 && insn_type != TYPE_IBR)
22174 if ((set = single_set (dep_insn)) != 0)
22176 set = SET_DEST (set);
22179 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
22180 && XVECLEN (PATTERN (dep_insn), 0) == 2
22181 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
22182 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
22184 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
22185 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
22190 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
22193 /* This test is true if the dependent insn reads the flags but
22194 not any other potentially set register. */
22195 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
22198 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
22204 /* Return true iff USE_INSN has a memory address with operands set by
22208 ix86_agi_dependent (rtx set_insn, rtx use_insn)
22211 extract_insn_cached (use_insn);
22212 for (i = recog_data.n_operands - 1; i >= 0; --i)
22213 if (MEM_P (recog_data.operand[i]))
22215 rtx addr = XEXP (recog_data.operand[i], 0);
22216 return modified_in_p (addr, set_insn) != 0;
22222 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
22224 enum attr_type insn_type, dep_insn_type;
22225 enum attr_memory memory;
22227 int dep_insn_code_number;
22229 /* Anti and output dependencies have zero cost on all CPUs. */
22230 if (REG_NOTE_KIND (link) != 0)
22233 dep_insn_code_number = recog_memoized (dep_insn);
22235 /* If we can't recognize the insns, we can't really do anything. */
22236 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
22239 insn_type = get_attr_type (insn);
22240 dep_insn_type = get_attr_type (dep_insn);
22244 case PROCESSOR_PENTIUM:
22245 /* Address Generation Interlock adds a cycle of latency. */
22246 if (insn_type == TYPE_LEA)
22248 rtx addr = PATTERN (insn);
22250 if (GET_CODE (addr) == PARALLEL)
22251 addr = XVECEXP (addr, 0, 0);
22253 gcc_assert (GET_CODE (addr) == SET);
22255 addr = SET_SRC (addr);
22256 if (modified_in_p (addr, dep_insn))
22259 else if (ix86_agi_dependent (dep_insn, insn))
22262 /* ??? Compares pair with jump/setcc. */
22263 if (ix86_flags_dependent (insn, dep_insn, insn_type))
22266 /* Floating point stores require value to be ready one cycle earlier. */
22267 if (insn_type == TYPE_FMOV
22268 && get_attr_memory (insn) == MEMORY_STORE
22269 && !ix86_agi_dependent (dep_insn, insn))
22273 case PROCESSOR_PENTIUMPRO:
22274 memory = get_attr_memory (insn);
22276 /* INT->FP conversion is expensive. */
22277 if (get_attr_fp_int_src (dep_insn))
22280 /* There is one cycle extra latency between an FP op and a store. */
22281 if (insn_type == TYPE_FMOV
22282 && (set = single_set (dep_insn)) != NULL_RTX
22283 && (set2 = single_set (insn)) != NULL_RTX
22284 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
22285 && MEM_P (SET_DEST (set2)))
22288 /* Show ability of reorder buffer to hide latency of load by executing
22289 in parallel with previous instruction in case
22290 previous instruction is not needed to compute the address. */
22291 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
22292 && !ix86_agi_dependent (dep_insn, insn))
22294 /* Claim moves to take one cycle, as core can issue one load
22295 at time and the next load can start cycle later. */
22296 if (dep_insn_type == TYPE_IMOV
22297 || dep_insn_type == TYPE_FMOV)
22305 memory = get_attr_memory (insn);
22307 /* The esp dependency is resolved before the instruction is really
22309 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
22310 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
22313 /* INT->FP conversion is expensive. */
22314 if (get_attr_fp_int_src (dep_insn))
22317 /* Show ability of reorder buffer to hide latency of load by executing
22318 in parallel with previous instruction in case
22319 previous instruction is not needed to compute the address. */
22320 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
22321 && !ix86_agi_dependent (dep_insn, insn))
22323 /* Claim moves to take one cycle, as core can issue one load
22324 at time and the next load can start cycle later. */
22325 if (dep_insn_type == TYPE_IMOV
22326 || dep_insn_type == TYPE_FMOV)
22335 case PROCESSOR_ATHLON:
22337 case PROCESSOR_AMDFAM10:
22338 case PROCESSOR_BDVER1:
22339 case PROCESSOR_ATOM:
22340 case PROCESSOR_GENERIC32:
22341 case PROCESSOR_GENERIC64:
22342 memory = get_attr_memory (insn);
22344 /* Show ability of reorder buffer to hide latency of load by executing
22345 in parallel with previous instruction in case
22346 previous instruction is not needed to compute the address. */
22347 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
22348 && !ix86_agi_dependent (dep_insn, insn))
22350 enum attr_unit unit = get_attr_unit (insn);
22353 /* Because of the difference between the length of integer and
22354 floating unit pipeline preparation stages, the memory operands
22355 for floating point are cheaper.
22357 ??? For Athlon it the difference is most probably 2. */
22358 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
22361 loadcost = TARGET_ATHLON ? 2 : 0;
22363 if (cost >= loadcost)
22376 /* How many alternative schedules to try. This should be as wide as the
22377 scheduling freedom in the DFA, but no wider. Making this value too
22378 large results extra work for the scheduler. */
22381 ia32_multipass_dfa_lookahead (void)
22385 case PROCESSOR_PENTIUM:
22388 case PROCESSOR_PENTIUMPRO:
22392 case PROCESSOR_CORE2:
22393 case PROCESSOR_COREI7_32:
22394 case PROCESSOR_COREI7_64:
22395 /* Generally, we want haifa-sched:max_issue() to look ahead as far
22396 as many instructions can be executed on a cycle, i.e.,
22397 issue_rate. I wonder why tuning for many CPUs does not do this. */
22398 return ix86_issue_rate ();
22407 /* Model decoder of Core 2/i7.
22408 Below hooks for multipass scheduling (see haifa-sched.c:max_issue)
22409 track the instruction fetch block boundaries and make sure that long
22410 (9+ bytes) instructions are assigned to D0. */
22412 /* Maximum length of an insn that can be handled by
22413 a secondary decoder unit. '8' for Core 2/i7. */
22414 static int core2i7_secondary_decoder_max_insn_size;
22416 /* Ifetch block size, i.e., number of bytes decoder reads per cycle.
22417 '16' for Core 2/i7. */
22418 static int core2i7_ifetch_block_size;
22420 /* Maximum number of instructions decoder can handle per cycle.
22421 '6' for Core 2/i7. */
22422 static int core2i7_ifetch_block_max_insns;
22424 typedef struct ix86_first_cycle_multipass_data_ *
22425 ix86_first_cycle_multipass_data_t;
22426 typedef const struct ix86_first_cycle_multipass_data_ *
22427 const_ix86_first_cycle_multipass_data_t;
22429 /* A variable to store target state across calls to max_issue within
22431 static struct ix86_first_cycle_multipass_data_ _ix86_first_cycle_multipass_data,
22432 *ix86_first_cycle_multipass_data = &_ix86_first_cycle_multipass_data;
22434 /* Initialize DATA. */
22436 core2i7_first_cycle_multipass_init (void *_data)
22438 ix86_first_cycle_multipass_data_t data
22439 = (ix86_first_cycle_multipass_data_t) _data;
22441 data->ifetch_block_len = 0;
22442 data->ifetch_block_n_insns = 0;
22443 data->ready_try_change = NULL;
22444 data->ready_try_change_size = 0;
22447 /* Advancing the cycle; reset ifetch block counts. */
22449 core2i7_dfa_post_advance_cycle (void)
22451 ix86_first_cycle_multipass_data_t data = ix86_first_cycle_multipass_data;
22453 gcc_assert (data->ifetch_block_n_insns <= core2i7_ifetch_block_max_insns);
22455 data->ifetch_block_len = 0;
22456 data->ifetch_block_n_insns = 0;
22459 static int min_insn_size (rtx);
22461 /* Filter out insns from ready_try that the core will not be able to issue
22462 on current cycle due to decoder. */
22464 core2i7_first_cycle_multipass_filter_ready_try
22465 (const_ix86_first_cycle_multipass_data_t data,
22466 char *ready_try, int n_ready, bool first_cycle_insn_p)
22473 if (ready_try[n_ready])
22476 insn = get_ready_element (n_ready);
22477 insn_size = min_insn_size (insn);
22479 if (/* If this is a too long an insn for a secondary decoder ... */
22480 (!first_cycle_insn_p
22481 && insn_size > core2i7_secondary_decoder_max_insn_size)
22482 /* ... or it would not fit into the ifetch block ... */
22483 || data->ifetch_block_len + insn_size > core2i7_ifetch_block_size
22484 /* ... or the decoder is full already ... */
22485 || data->ifetch_block_n_insns + 1 > core2i7_ifetch_block_max_insns)
22486 /* ... mask the insn out. */
22488 ready_try[n_ready] = 1;
22490 if (data->ready_try_change)
22491 SET_BIT (data->ready_try_change, n_ready);
22496 /* Prepare for a new round of multipass lookahead scheduling. */
22498 core2i7_first_cycle_multipass_begin (void *_data, char *ready_try, int n_ready,
22499 bool first_cycle_insn_p)
22501 ix86_first_cycle_multipass_data_t data
22502 = (ix86_first_cycle_multipass_data_t) _data;
22503 const_ix86_first_cycle_multipass_data_t prev_data
22504 = ix86_first_cycle_multipass_data;
22506 /* Restore the state from the end of the previous round. */
22507 data->ifetch_block_len = prev_data->ifetch_block_len;
22508 data->ifetch_block_n_insns = prev_data->ifetch_block_n_insns;
22510 /* Filter instructions that cannot be issued on current cycle due to
22511 decoder restrictions. */
22512 core2i7_first_cycle_multipass_filter_ready_try (data, ready_try, n_ready,
22513 first_cycle_insn_p);
22516 /* INSN is being issued in current solution. Account for its impact on
22517 the decoder model. */
22519 core2i7_first_cycle_multipass_issue (void *_data, char *ready_try, int n_ready,
22520 rtx insn, const void *_prev_data)
22522 ix86_first_cycle_multipass_data_t data
22523 = (ix86_first_cycle_multipass_data_t) _data;
22524 const_ix86_first_cycle_multipass_data_t prev_data
22525 = (const_ix86_first_cycle_multipass_data_t) _prev_data;
22527 int insn_size = min_insn_size (insn);
22529 data->ifetch_block_len = prev_data->ifetch_block_len + insn_size;
22530 data->ifetch_block_n_insns = prev_data->ifetch_block_n_insns + 1;
22531 gcc_assert (data->ifetch_block_len <= core2i7_ifetch_block_size
22532 && data->ifetch_block_n_insns <= core2i7_ifetch_block_max_insns);
22534 /* Allocate or resize the bitmap for storing INSN's effect on ready_try. */
22535 if (!data->ready_try_change)
22537 data->ready_try_change = sbitmap_alloc (n_ready);
22538 data->ready_try_change_size = n_ready;
22540 else if (data->ready_try_change_size < n_ready)
22542 data->ready_try_change = sbitmap_resize (data->ready_try_change,
22544 data->ready_try_change_size = n_ready;
22546 sbitmap_zero (data->ready_try_change);
22548 /* Filter out insns from ready_try that the core will not be able to issue
22549 on current cycle due to decoder. */
22550 core2i7_first_cycle_multipass_filter_ready_try (data, ready_try, n_ready,
22554 /* Revert the effect on ready_try. */
22556 core2i7_first_cycle_multipass_backtrack (const void *_data,
22558 int n_ready ATTRIBUTE_UNUSED)
22560 const_ix86_first_cycle_multipass_data_t data
22561 = (const_ix86_first_cycle_multipass_data_t) _data;
22562 unsigned int i = 0;
22563 sbitmap_iterator sbi;
22565 gcc_assert (sbitmap_last_set_bit (data->ready_try_change) < n_ready);
22566 EXECUTE_IF_SET_IN_SBITMAP (data->ready_try_change, 0, i, sbi)
22572 /* Save the result of multipass lookahead scheduling for the next round. */
22574 core2i7_first_cycle_multipass_end (const void *_data)
22576 const_ix86_first_cycle_multipass_data_t data
22577 = (const_ix86_first_cycle_multipass_data_t) _data;
22578 ix86_first_cycle_multipass_data_t next_data
22579 = ix86_first_cycle_multipass_data;
22583 next_data->ifetch_block_len = data->ifetch_block_len;
22584 next_data->ifetch_block_n_insns = data->ifetch_block_n_insns;
22588 /* Deallocate target data. */
22590 core2i7_first_cycle_multipass_fini (void *_data)
22592 ix86_first_cycle_multipass_data_t data
22593 = (ix86_first_cycle_multipass_data_t) _data;
22595 if (data->ready_try_change)
22597 sbitmap_free (data->ready_try_change);
22598 data->ready_try_change = NULL;
22599 data->ready_try_change_size = 0;
22603 /* Prepare for scheduling pass. */
22605 ix86_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
22606 int verbose ATTRIBUTE_UNUSED,
22607 int max_uid ATTRIBUTE_UNUSED)
22609 /* Install scheduling hooks for current CPU. Some of these hooks are used
22610 in time-critical parts of the scheduler, so we only set them up when
22611 they are actually used. */
22614 case PROCESSOR_CORE2:
22615 case PROCESSOR_COREI7_32:
22616 case PROCESSOR_COREI7_64:
22617 targetm.sched.dfa_post_advance_cycle
22618 = core2i7_dfa_post_advance_cycle;
22619 targetm.sched.first_cycle_multipass_init
22620 = core2i7_first_cycle_multipass_init;
22621 targetm.sched.first_cycle_multipass_begin
22622 = core2i7_first_cycle_multipass_begin;
22623 targetm.sched.first_cycle_multipass_issue
22624 = core2i7_first_cycle_multipass_issue;
22625 targetm.sched.first_cycle_multipass_backtrack
22626 = core2i7_first_cycle_multipass_backtrack;
22627 targetm.sched.first_cycle_multipass_end
22628 = core2i7_first_cycle_multipass_end;
22629 targetm.sched.first_cycle_multipass_fini
22630 = core2i7_first_cycle_multipass_fini;
22632 /* Set decoder parameters. */
22633 core2i7_secondary_decoder_max_insn_size = 8;
22634 core2i7_ifetch_block_size = 16;
22635 core2i7_ifetch_block_max_insns = 6;
22639 targetm.sched.dfa_post_advance_cycle = NULL;
22640 targetm.sched.first_cycle_multipass_init = NULL;
22641 targetm.sched.first_cycle_multipass_begin = NULL;
22642 targetm.sched.first_cycle_multipass_issue = NULL;
22643 targetm.sched.first_cycle_multipass_backtrack = NULL;
22644 targetm.sched.first_cycle_multipass_end = NULL;
22645 targetm.sched.first_cycle_multipass_fini = NULL;
22651 /* Compute the alignment given to a constant that is being placed in memory.
22652 EXP is the constant and ALIGN is the alignment that the object would
22654 The value of this function is used instead of that alignment to align
22658 ix86_constant_alignment (tree exp, int align)
22660 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
22661 || TREE_CODE (exp) == INTEGER_CST)
22663 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
22665 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
22668 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
22669 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
22670 return BITS_PER_WORD;
22675 /* Compute the alignment for a static variable.
22676 TYPE is the data type, and ALIGN is the alignment that
22677 the object would ordinarily have. The value of this function is used
22678 instead of that alignment to align the object. */
22681 ix86_data_alignment (tree type, int align)
22683 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
22685 if (AGGREGATE_TYPE_P (type)
22686 && TYPE_SIZE (type)
22687 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
22688 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
22689 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
22690 && align < max_align)
22693 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
22694 to 16byte boundary. */
22697 if (AGGREGATE_TYPE_P (type)
22698 && TYPE_SIZE (type)
22699 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
22700 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
22701 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
22705 if (TREE_CODE (type) == ARRAY_TYPE)
22707 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
22709 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
22712 else if (TREE_CODE (type) == COMPLEX_TYPE)
22715 if (TYPE_MODE (type) == DCmode && align < 64)
22717 if ((TYPE_MODE (type) == XCmode
22718 || TYPE_MODE (type) == TCmode) && align < 128)
22721 else if ((TREE_CODE (type) == RECORD_TYPE
22722 || TREE_CODE (type) == UNION_TYPE
22723 || TREE_CODE (type) == QUAL_UNION_TYPE)
22724 && TYPE_FIELDS (type))
22726 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
22728 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
22731 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
22732 || TREE_CODE (type) == INTEGER_TYPE)
22734 if (TYPE_MODE (type) == DFmode && align < 64)
22736 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
22743 /* Compute the alignment for a local variable or a stack slot. EXP is
22744 the data type or decl itself, MODE is the widest mode available and
22745 ALIGN is the alignment that the object would ordinarily have. The
22746 value of this macro is used instead of that alignment to align the
22750 ix86_local_alignment (tree exp, enum machine_mode mode,
22751 unsigned int align)
22755 if (exp && DECL_P (exp))
22757 type = TREE_TYPE (exp);
22766 if (use_avx256_p (mode, type))
22767 cfun->machine->use_avx256_p = true;
22769 /* Don't do dynamic stack realignment for long long objects with
22770 -mpreferred-stack-boundary=2. */
22773 && ix86_preferred_stack_boundary < 64
22774 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
22775 && (!type || !TYPE_USER_ALIGN (type))
22776 && (!decl || !DECL_USER_ALIGN (decl)))
22779 /* If TYPE is NULL, we are allocating a stack slot for caller-save
22780 register in MODE. We will return the largest alignment of XF
22784 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
22785 align = GET_MODE_ALIGNMENT (DFmode);
22789 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
22790 to 16byte boundary. Exact wording is:
22792 An array uses the same alignment as its elements, except that a local or
22793 global array variable of length at least 16 bytes or
22794 a C99 variable-length array variable always has alignment of at least 16 bytes.
22796 This was added to allow use of aligned SSE instructions at arrays. This
22797 rule is meant for static storage (where compiler can not do the analysis
22798 by itself). We follow it for automatic variables only when convenient.
22799 We fully control everything in the function compiled and functions from
22800 other unit can not rely on the alignment.
22802 Exclude va_list type. It is the common case of local array where
22803 we can not benefit from the alignment. */
22804 if (TARGET_64BIT && optimize_function_for_speed_p (cfun)
22807 if (AGGREGATE_TYPE_P (type)
22808 && (TYPE_MAIN_VARIANT (type)
22809 != TYPE_MAIN_VARIANT (va_list_type_node))
22810 && TYPE_SIZE (type)
22811 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
22812 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
22813 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
22816 if (TREE_CODE (type) == ARRAY_TYPE)
22818 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
22820 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
22823 else if (TREE_CODE (type) == COMPLEX_TYPE)
22825 if (TYPE_MODE (type) == DCmode && align < 64)
22827 if ((TYPE_MODE (type) == XCmode
22828 || TYPE_MODE (type) == TCmode) && align < 128)
22831 else if ((TREE_CODE (type) == RECORD_TYPE
22832 || TREE_CODE (type) == UNION_TYPE
22833 || TREE_CODE (type) == QUAL_UNION_TYPE)
22834 && TYPE_FIELDS (type))
22836 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
22838 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
22841 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
22842 || TREE_CODE (type) == INTEGER_TYPE)
22845 if (TYPE_MODE (type) == DFmode && align < 64)
22847 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
22853 /* Compute the minimum required alignment for dynamic stack realignment
22854 purposes for a local variable, parameter or a stack slot. EXP is
22855 the data type or decl itself, MODE is its mode and ALIGN is the
22856 alignment that the object would ordinarily have. */
22859 ix86_minimum_alignment (tree exp, enum machine_mode mode,
22860 unsigned int align)
22864 if (exp && DECL_P (exp))
22866 type = TREE_TYPE (exp);
22875 if (use_avx256_p (mode, type))
22876 cfun->machine->use_avx256_p = true;
22878 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
22881 /* Don't do dynamic stack realignment for long long objects with
22882 -mpreferred-stack-boundary=2. */
22883 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
22884 && (!type || !TYPE_USER_ALIGN (type))
22885 && (!decl || !DECL_USER_ALIGN (decl)))
22891 /* Find a location for the static chain incoming to a nested function.
22892 This is a register, unless all free registers are used by arguments. */
22895 ix86_static_chain (const_tree fndecl, bool incoming_p)
22899 if (!DECL_STATIC_CHAIN (fndecl))
22904 /* We always use R10 in 64-bit mode. */
22910 /* By default in 32-bit mode we use ECX to pass the static chain. */
22913 fntype = TREE_TYPE (fndecl);
22914 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
22916 /* Fastcall functions use ecx/edx for arguments, which leaves
22917 us with EAX for the static chain. */
22920 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
22922 /* Thiscall functions use ecx for arguments, which leaves
22923 us with EAX for the static chain. */
22926 else if (ix86_function_regparm (fntype, fndecl) == 3)
22928 /* For regparm 3, we have no free call-clobbered registers in
22929 which to store the static chain. In order to implement this,
22930 we have the trampoline push the static chain to the stack.
22931 However, we can't push a value below the return address when
22932 we call the nested function directly, so we have to use an
22933 alternate entry point. For this we use ESI, and have the
22934 alternate entry point push ESI, so that things appear the
22935 same once we're executing the nested function. */
22938 if (fndecl == current_function_decl)
22939 ix86_static_chain_on_stack = true;
22940 return gen_frame_mem (SImode,
22941 plus_constant (arg_pointer_rtx, -8));
22947 return gen_rtx_REG (Pmode, regno);
22950 /* Emit RTL insns to initialize the variable parts of a trampoline.
22951 FNDECL is the decl of the target address; M_TRAMP is a MEM for
22952 the trampoline, and CHAIN_VALUE is an RTX for the static chain
22953 to be passed to the target function. */
22956 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
22960 fnaddr = XEXP (DECL_RTL (fndecl), 0);
22967 /* Depending on the static chain location, either load a register
22968 with a constant, or push the constant to the stack. All of the
22969 instructions are the same size. */
22970 chain = ix86_static_chain (fndecl, true);
22973 if (REGNO (chain) == CX_REG)
22975 else if (REGNO (chain) == AX_REG)
22978 gcc_unreachable ();
22983 mem = adjust_address (m_tramp, QImode, 0);
22984 emit_move_insn (mem, gen_int_mode (opcode, QImode));
22986 mem = adjust_address (m_tramp, SImode, 1);
22987 emit_move_insn (mem, chain_value);
22989 /* Compute offset from the end of the jmp to the target function.
22990 In the case in which the trampoline stores the static chain on
22991 the stack, we need to skip the first insn which pushes the
22992 (call-saved) register static chain; this push is 1 byte. */
22993 disp = expand_binop (SImode, sub_optab, fnaddr,
22994 plus_constant (XEXP (m_tramp, 0),
22995 MEM_P (chain) ? 9 : 10),
22996 NULL_RTX, 1, OPTAB_DIRECT);
22998 mem = adjust_address (m_tramp, QImode, 5);
22999 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
23001 mem = adjust_address (m_tramp, SImode, 6);
23002 emit_move_insn (mem, disp);
23008 /* Load the function address to r11. Try to load address using
23009 the shorter movl instead of movabs. We may want to support
23010 movq for kernel mode, but kernel does not use trampolines at
23012 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
23014 fnaddr = copy_to_mode_reg (DImode, fnaddr);
23016 mem = adjust_address (m_tramp, HImode, offset);
23017 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
23019 mem = adjust_address (m_tramp, SImode, offset + 2);
23020 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
23025 mem = adjust_address (m_tramp, HImode, offset);
23026 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
23028 mem = adjust_address (m_tramp, DImode, offset + 2);
23029 emit_move_insn (mem, fnaddr);
23033 /* Load static chain using movabs to r10. */
23034 mem = adjust_address (m_tramp, HImode, offset);
23035 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
23037 mem = adjust_address (m_tramp, DImode, offset + 2);
23038 emit_move_insn (mem, chain_value);
23041 /* Jump to r11; the last (unused) byte is a nop, only there to
23042 pad the write out to a single 32-bit store. */
23043 mem = adjust_address (m_tramp, SImode, offset);
23044 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
23047 gcc_assert (offset <= TRAMPOLINE_SIZE);
23050 #ifdef ENABLE_EXECUTE_STACK
23051 #ifdef CHECK_EXECUTE_STACK_ENABLED
23052 if (CHECK_EXECUTE_STACK_ENABLED)
23054 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
23055 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
23059 /* The following file contains several enumerations and data structures
23060 built from the definitions in i386-builtin-types.def. */
23062 #include "i386-builtin-types.inc"
23064 /* Table for the ix86 builtin non-function types. */
23065 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
23067 /* Retrieve an element from the above table, building some of
23068 the types lazily. */
23071 ix86_get_builtin_type (enum ix86_builtin_type tcode)
23073 unsigned int index;
23076 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
23078 type = ix86_builtin_type_tab[(int) tcode];
23082 gcc_assert (tcode > IX86_BT_LAST_PRIM);
23083 if (tcode <= IX86_BT_LAST_VECT)
23085 enum machine_mode mode;
23087 index = tcode - IX86_BT_LAST_PRIM - 1;
23088 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
23089 mode = ix86_builtin_type_vect_mode[index];
23091 type = build_vector_type_for_mode (itype, mode);
23097 index = tcode - IX86_BT_LAST_VECT - 1;
23098 if (tcode <= IX86_BT_LAST_PTR)
23099 quals = TYPE_UNQUALIFIED;
23101 quals = TYPE_QUAL_CONST;
23103 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
23104 if (quals != TYPE_UNQUALIFIED)
23105 itype = build_qualified_type (itype, quals);
23107 type = build_pointer_type (itype);
23110 ix86_builtin_type_tab[(int) tcode] = type;
23114 /* Table for the ix86 builtin function types. */
23115 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
23117 /* Retrieve an element from the above table, building some of
23118 the types lazily. */
23121 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
23125 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
23127 type = ix86_builtin_func_type_tab[(int) tcode];
23131 if (tcode <= IX86_BT_LAST_FUNC)
23133 unsigned start = ix86_builtin_func_start[(int) tcode];
23134 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
23135 tree rtype, atype, args = void_list_node;
23138 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
23139 for (i = after - 1; i > start; --i)
23141 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
23142 args = tree_cons (NULL, atype, args);
23145 type = build_function_type (rtype, args);
23149 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
23150 enum ix86_builtin_func_type icode;
23152 icode = ix86_builtin_func_alias_base[index];
23153 type = ix86_get_builtin_func_type (icode);
23156 ix86_builtin_func_type_tab[(int) tcode] = type;
23161 /* Codes for all the SSE/MMX builtins. */
23164 IX86_BUILTIN_ADDPS,
23165 IX86_BUILTIN_ADDSS,
23166 IX86_BUILTIN_DIVPS,
23167 IX86_BUILTIN_DIVSS,
23168 IX86_BUILTIN_MULPS,
23169 IX86_BUILTIN_MULSS,
23170 IX86_BUILTIN_SUBPS,
23171 IX86_BUILTIN_SUBSS,
23173 IX86_BUILTIN_CMPEQPS,
23174 IX86_BUILTIN_CMPLTPS,
23175 IX86_BUILTIN_CMPLEPS,
23176 IX86_BUILTIN_CMPGTPS,
23177 IX86_BUILTIN_CMPGEPS,
23178 IX86_BUILTIN_CMPNEQPS,
23179 IX86_BUILTIN_CMPNLTPS,
23180 IX86_BUILTIN_CMPNLEPS,
23181 IX86_BUILTIN_CMPNGTPS,
23182 IX86_BUILTIN_CMPNGEPS,
23183 IX86_BUILTIN_CMPORDPS,
23184 IX86_BUILTIN_CMPUNORDPS,
23185 IX86_BUILTIN_CMPEQSS,
23186 IX86_BUILTIN_CMPLTSS,
23187 IX86_BUILTIN_CMPLESS,
23188 IX86_BUILTIN_CMPNEQSS,
23189 IX86_BUILTIN_CMPNLTSS,
23190 IX86_BUILTIN_CMPNLESS,
23191 IX86_BUILTIN_CMPNGTSS,
23192 IX86_BUILTIN_CMPNGESS,
23193 IX86_BUILTIN_CMPORDSS,
23194 IX86_BUILTIN_CMPUNORDSS,
23196 IX86_BUILTIN_COMIEQSS,
23197 IX86_BUILTIN_COMILTSS,
23198 IX86_BUILTIN_COMILESS,
23199 IX86_BUILTIN_COMIGTSS,
23200 IX86_BUILTIN_COMIGESS,
23201 IX86_BUILTIN_COMINEQSS,
23202 IX86_BUILTIN_UCOMIEQSS,
23203 IX86_BUILTIN_UCOMILTSS,
23204 IX86_BUILTIN_UCOMILESS,
23205 IX86_BUILTIN_UCOMIGTSS,
23206 IX86_BUILTIN_UCOMIGESS,
23207 IX86_BUILTIN_UCOMINEQSS,
23209 IX86_BUILTIN_CVTPI2PS,
23210 IX86_BUILTIN_CVTPS2PI,
23211 IX86_BUILTIN_CVTSI2SS,
23212 IX86_BUILTIN_CVTSI642SS,
23213 IX86_BUILTIN_CVTSS2SI,
23214 IX86_BUILTIN_CVTSS2SI64,
23215 IX86_BUILTIN_CVTTPS2PI,
23216 IX86_BUILTIN_CVTTSS2SI,
23217 IX86_BUILTIN_CVTTSS2SI64,
23219 IX86_BUILTIN_MAXPS,
23220 IX86_BUILTIN_MAXSS,
23221 IX86_BUILTIN_MINPS,
23222 IX86_BUILTIN_MINSS,
23224 IX86_BUILTIN_LOADUPS,
23225 IX86_BUILTIN_STOREUPS,
23226 IX86_BUILTIN_MOVSS,
23228 IX86_BUILTIN_MOVHLPS,
23229 IX86_BUILTIN_MOVLHPS,
23230 IX86_BUILTIN_LOADHPS,
23231 IX86_BUILTIN_LOADLPS,
23232 IX86_BUILTIN_STOREHPS,
23233 IX86_BUILTIN_STORELPS,
23235 IX86_BUILTIN_MASKMOVQ,
23236 IX86_BUILTIN_MOVMSKPS,
23237 IX86_BUILTIN_PMOVMSKB,
23239 IX86_BUILTIN_MOVNTPS,
23240 IX86_BUILTIN_MOVNTQ,
23242 IX86_BUILTIN_LOADDQU,
23243 IX86_BUILTIN_STOREDQU,
23245 IX86_BUILTIN_PACKSSWB,
23246 IX86_BUILTIN_PACKSSDW,
23247 IX86_BUILTIN_PACKUSWB,
23249 IX86_BUILTIN_PADDB,
23250 IX86_BUILTIN_PADDW,
23251 IX86_BUILTIN_PADDD,
23252 IX86_BUILTIN_PADDQ,
23253 IX86_BUILTIN_PADDSB,
23254 IX86_BUILTIN_PADDSW,
23255 IX86_BUILTIN_PADDUSB,
23256 IX86_BUILTIN_PADDUSW,
23257 IX86_BUILTIN_PSUBB,
23258 IX86_BUILTIN_PSUBW,
23259 IX86_BUILTIN_PSUBD,
23260 IX86_BUILTIN_PSUBQ,
23261 IX86_BUILTIN_PSUBSB,
23262 IX86_BUILTIN_PSUBSW,
23263 IX86_BUILTIN_PSUBUSB,
23264 IX86_BUILTIN_PSUBUSW,
23267 IX86_BUILTIN_PANDN,
23271 IX86_BUILTIN_PAVGB,
23272 IX86_BUILTIN_PAVGW,
23274 IX86_BUILTIN_PCMPEQB,
23275 IX86_BUILTIN_PCMPEQW,
23276 IX86_BUILTIN_PCMPEQD,
23277 IX86_BUILTIN_PCMPGTB,
23278 IX86_BUILTIN_PCMPGTW,
23279 IX86_BUILTIN_PCMPGTD,
23281 IX86_BUILTIN_PMADDWD,
23283 IX86_BUILTIN_PMAXSW,
23284 IX86_BUILTIN_PMAXUB,
23285 IX86_BUILTIN_PMINSW,
23286 IX86_BUILTIN_PMINUB,
23288 IX86_BUILTIN_PMULHUW,
23289 IX86_BUILTIN_PMULHW,
23290 IX86_BUILTIN_PMULLW,
23292 IX86_BUILTIN_PSADBW,
23293 IX86_BUILTIN_PSHUFW,
23295 IX86_BUILTIN_PSLLW,
23296 IX86_BUILTIN_PSLLD,
23297 IX86_BUILTIN_PSLLQ,
23298 IX86_BUILTIN_PSRAW,
23299 IX86_BUILTIN_PSRAD,
23300 IX86_BUILTIN_PSRLW,
23301 IX86_BUILTIN_PSRLD,
23302 IX86_BUILTIN_PSRLQ,
23303 IX86_BUILTIN_PSLLWI,
23304 IX86_BUILTIN_PSLLDI,
23305 IX86_BUILTIN_PSLLQI,
23306 IX86_BUILTIN_PSRAWI,
23307 IX86_BUILTIN_PSRADI,
23308 IX86_BUILTIN_PSRLWI,
23309 IX86_BUILTIN_PSRLDI,
23310 IX86_BUILTIN_PSRLQI,
23312 IX86_BUILTIN_PUNPCKHBW,
23313 IX86_BUILTIN_PUNPCKHWD,
23314 IX86_BUILTIN_PUNPCKHDQ,
23315 IX86_BUILTIN_PUNPCKLBW,
23316 IX86_BUILTIN_PUNPCKLWD,
23317 IX86_BUILTIN_PUNPCKLDQ,
23319 IX86_BUILTIN_SHUFPS,
23321 IX86_BUILTIN_RCPPS,
23322 IX86_BUILTIN_RCPSS,
23323 IX86_BUILTIN_RSQRTPS,
23324 IX86_BUILTIN_RSQRTPS_NR,
23325 IX86_BUILTIN_RSQRTSS,
23326 IX86_BUILTIN_RSQRTF,
23327 IX86_BUILTIN_SQRTPS,
23328 IX86_BUILTIN_SQRTPS_NR,
23329 IX86_BUILTIN_SQRTSS,
23331 IX86_BUILTIN_UNPCKHPS,
23332 IX86_BUILTIN_UNPCKLPS,
23334 IX86_BUILTIN_ANDPS,
23335 IX86_BUILTIN_ANDNPS,
23337 IX86_BUILTIN_XORPS,
23340 IX86_BUILTIN_LDMXCSR,
23341 IX86_BUILTIN_STMXCSR,
23342 IX86_BUILTIN_SFENCE,
23344 /* 3DNow! Original */
23345 IX86_BUILTIN_FEMMS,
23346 IX86_BUILTIN_PAVGUSB,
23347 IX86_BUILTIN_PF2ID,
23348 IX86_BUILTIN_PFACC,
23349 IX86_BUILTIN_PFADD,
23350 IX86_BUILTIN_PFCMPEQ,
23351 IX86_BUILTIN_PFCMPGE,
23352 IX86_BUILTIN_PFCMPGT,
23353 IX86_BUILTIN_PFMAX,
23354 IX86_BUILTIN_PFMIN,
23355 IX86_BUILTIN_PFMUL,
23356 IX86_BUILTIN_PFRCP,
23357 IX86_BUILTIN_PFRCPIT1,
23358 IX86_BUILTIN_PFRCPIT2,
23359 IX86_BUILTIN_PFRSQIT1,
23360 IX86_BUILTIN_PFRSQRT,
23361 IX86_BUILTIN_PFSUB,
23362 IX86_BUILTIN_PFSUBR,
23363 IX86_BUILTIN_PI2FD,
23364 IX86_BUILTIN_PMULHRW,
23366 /* 3DNow! Athlon Extensions */
23367 IX86_BUILTIN_PF2IW,
23368 IX86_BUILTIN_PFNACC,
23369 IX86_BUILTIN_PFPNACC,
23370 IX86_BUILTIN_PI2FW,
23371 IX86_BUILTIN_PSWAPDSI,
23372 IX86_BUILTIN_PSWAPDSF,
23375 IX86_BUILTIN_ADDPD,
23376 IX86_BUILTIN_ADDSD,
23377 IX86_BUILTIN_DIVPD,
23378 IX86_BUILTIN_DIVSD,
23379 IX86_BUILTIN_MULPD,
23380 IX86_BUILTIN_MULSD,
23381 IX86_BUILTIN_SUBPD,
23382 IX86_BUILTIN_SUBSD,
23384 IX86_BUILTIN_CMPEQPD,
23385 IX86_BUILTIN_CMPLTPD,
23386 IX86_BUILTIN_CMPLEPD,
23387 IX86_BUILTIN_CMPGTPD,
23388 IX86_BUILTIN_CMPGEPD,
23389 IX86_BUILTIN_CMPNEQPD,
23390 IX86_BUILTIN_CMPNLTPD,
23391 IX86_BUILTIN_CMPNLEPD,
23392 IX86_BUILTIN_CMPNGTPD,
23393 IX86_BUILTIN_CMPNGEPD,
23394 IX86_BUILTIN_CMPORDPD,
23395 IX86_BUILTIN_CMPUNORDPD,
23396 IX86_BUILTIN_CMPEQSD,
23397 IX86_BUILTIN_CMPLTSD,
23398 IX86_BUILTIN_CMPLESD,
23399 IX86_BUILTIN_CMPNEQSD,
23400 IX86_BUILTIN_CMPNLTSD,
23401 IX86_BUILTIN_CMPNLESD,
23402 IX86_BUILTIN_CMPORDSD,
23403 IX86_BUILTIN_CMPUNORDSD,
23405 IX86_BUILTIN_COMIEQSD,
23406 IX86_BUILTIN_COMILTSD,
23407 IX86_BUILTIN_COMILESD,
23408 IX86_BUILTIN_COMIGTSD,
23409 IX86_BUILTIN_COMIGESD,
23410 IX86_BUILTIN_COMINEQSD,
23411 IX86_BUILTIN_UCOMIEQSD,
23412 IX86_BUILTIN_UCOMILTSD,
23413 IX86_BUILTIN_UCOMILESD,
23414 IX86_BUILTIN_UCOMIGTSD,
23415 IX86_BUILTIN_UCOMIGESD,
23416 IX86_BUILTIN_UCOMINEQSD,
23418 IX86_BUILTIN_MAXPD,
23419 IX86_BUILTIN_MAXSD,
23420 IX86_BUILTIN_MINPD,
23421 IX86_BUILTIN_MINSD,
23423 IX86_BUILTIN_ANDPD,
23424 IX86_BUILTIN_ANDNPD,
23426 IX86_BUILTIN_XORPD,
23428 IX86_BUILTIN_SQRTPD,
23429 IX86_BUILTIN_SQRTSD,
23431 IX86_BUILTIN_UNPCKHPD,
23432 IX86_BUILTIN_UNPCKLPD,
23434 IX86_BUILTIN_SHUFPD,
23436 IX86_BUILTIN_LOADUPD,
23437 IX86_BUILTIN_STOREUPD,
23438 IX86_BUILTIN_MOVSD,
23440 IX86_BUILTIN_LOADHPD,
23441 IX86_BUILTIN_LOADLPD,
23443 IX86_BUILTIN_CVTDQ2PD,
23444 IX86_BUILTIN_CVTDQ2PS,
23446 IX86_BUILTIN_CVTPD2DQ,
23447 IX86_BUILTIN_CVTPD2PI,
23448 IX86_BUILTIN_CVTPD2PS,
23449 IX86_BUILTIN_CVTTPD2DQ,
23450 IX86_BUILTIN_CVTTPD2PI,
23452 IX86_BUILTIN_CVTPI2PD,
23453 IX86_BUILTIN_CVTSI2SD,
23454 IX86_BUILTIN_CVTSI642SD,
23456 IX86_BUILTIN_CVTSD2SI,
23457 IX86_BUILTIN_CVTSD2SI64,
23458 IX86_BUILTIN_CVTSD2SS,
23459 IX86_BUILTIN_CVTSS2SD,
23460 IX86_BUILTIN_CVTTSD2SI,
23461 IX86_BUILTIN_CVTTSD2SI64,
23463 IX86_BUILTIN_CVTPS2DQ,
23464 IX86_BUILTIN_CVTPS2PD,
23465 IX86_BUILTIN_CVTTPS2DQ,
23467 IX86_BUILTIN_MOVNTI,
23468 IX86_BUILTIN_MOVNTPD,
23469 IX86_BUILTIN_MOVNTDQ,
23471 IX86_BUILTIN_MOVQ128,
23474 IX86_BUILTIN_MASKMOVDQU,
23475 IX86_BUILTIN_MOVMSKPD,
23476 IX86_BUILTIN_PMOVMSKB128,
23478 IX86_BUILTIN_PACKSSWB128,
23479 IX86_BUILTIN_PACKSSDW128,
23480 IX86_BUILTIN_PACKUSWB128,
23482 IX86_BUILTIN_PADDB128,
23483 IX86_BUILTIN_PADDW128,
23484 IX86_BUILTIN_PADDD128,
23485 IX86_BUILTIN_PADDQ128,
23486 IX86_BUILTIN_PADDSB128,
23487 IX86_BUILTIN_PADDSW128,
23488 IX86_BUILTIN_PADDUSB128,
23489 IX86_BUILTIN_PADDUSW128,
23490 IX86_BUILTIN_PSUBB128,
23491 IX86_BUILTIN_PSUBW128,
23492 IX86_BUILTIN_PSUBD128,
23493 IX86_BUILTIN_PSUBQ128,
23494 IX86_BUILTIN_PSUBSB128,
23495 IX86_BUILTIN_PSUBSW128,
23496 IX86_BUILTIN_PSUBUSB128,
23497 IX86_BUILTIN_PSUBUSW128,
23499 IX86_BUILTIN_PAND128,
23500 IX86_BUILTIN_PANDN128,
23501 IX86_BUILTIN_POR128,
23502 IX86_BUILTIN_PXOR128,
23504 IX86_BUILTIN_PAVGB128,
23505 IX86_BUILTIN_PAVGW128,
23507 IX86_BUILTIN_PCMPEQB128,
23508 IX86_BUILTIN_PCMPEQW128,
23509 IX86_BUILTIN_PCMPEQD128,
23510 IX86_BUILTIN_PCMPGTB128,
23511 IX86_BUILTIN_PCMPGTW128,
23512 IX86_BUILTIN_PCMPGTD128,
23514 IX86_BUILTIN_PMADDWD128,
23516 IX86_BUILTIN_PMAXSW128,
23517 IX86_BUILTIN_PMAXUB128,
23518 IX86_BUILTIN_PMINSW128,
23519 IX86_BUILTIN_PMINUB128,
23521 IX86_BUILTIN_PMULUDQ,
23522 IX86_BUILTIN_PMULUDQ128,
23523 IX86_BUILTIN_PMULHUW128,
23524 IX86_BUILTIN_PMULHW128,
23525 IX86_BUILTIN_PMULLW128,
23527 IX86_BUILTIN_PSADBW128,
23528 IX86_BUILTIN_PSHUFHW,
23529 IX86_BUILTIN_PSHUFLW,
23530 IX86_BUILTIN_PSHUFD,
23532 IX86_BUILTIN_PSLLDQI128,
23533 IX86_BUILTIN_PSLLWI128,
23534 IX86_BUILTIN_PSLLDI128,
23535 IX86_BUILTIN_PSLLQI128,
23536 IX86_BUILTIN_PSRAWI128,
23537 IX86_BUILTIN_PSRADI128,
23538 IX86_BUILTIN_PSRLDQI128,
23539 IX86_BUILTIN_PSRLWI128,
23540 IX86_BUILTIN_PSRLDI128,
23541 IX86_BUILTIN_PSRLQI128,
23543 IX86_BUILTIN_PSLLDQ128,
23544 IX86_BUILTIN_PSLLW128,
23545 IX86_BUILTIN_PSLLD128,
23546 IX86_BUILTIN_PSLLQ128,
23547 IX86_BUILTIN_PSRAW128,
23548 IX86_BUILTIN_PSRAD128,
23549 IX86_BUILTIN_PSRLW128,
23550 IX86_BUILTIN_PSRLD128,
23551 IX86_BUILTIN_PSRLQ128,
23553 IX86_BUILTIN_PUNPCKHBW128,
23554 IX86_BUILTIN_PUNPCKHWD128,
23555 IX86_BUILTIN_PUNPCKHDQ128,
23556 IX86_BUILTIN_PUNPCKHQDQ128,
23557 IX86_BUILTIN_PUNPCKLBW128,
23558 IX86_BUILTIN_PUNPCKLWD128,
23559 IX86_BUILTIN_PUNPCKLDQ128,
23560 IX86_BUILTIN_PUNPCKLQDQ128,
23562 IX86_BUILTIN_CLFLUSH,
23563 IX86_BUILTIN_MFENCE,
23564 IX86_BUILTIN_LFENCE,
23566 IX86_BUILTIN_BSRSI,
23567 IX86_BUILTIN_BSRDI,
23568 IX86_BUILTIN_RDPMC,
23569 IX86_BUILTIN_RDTSC,
23570 IX86_BUILTIN_RDTSCP,
23571 IX86_BUILTIN_ROLQI,
23572 IX86_BUILTIN_ROLHI,
23573 IX86_BUILTIN_RORQI,
23574 IX86_BUILTIN_RORHI,
23577 IX86_BUILTIN_ADDSUBPS,
23578 IX86_BUILTIN_HADDPS,
23579 IX86_BUILTIN_HSUBPS,
23580 IX86_BUILTIN_MOVSHDUP,
23581 IX86_BUILTIN_MOVSLDUP,
23582 IX86_BUILTIN_ADDSUBPD,
23583 IX86_BUILTIN_HADDPD,
23584 IX86_BUILTIN_HSUBPD,
23585 IX86_BUILTIN_LDDQU,
23587 IX86_BUILTIN_MONITOR,
23588 IX86_BUILTIN_MWAIT,
23591 IX86_BUILTIN_PHADDW,
23592 IX86_BUILTIN_PHADDD,
23593 IX86_BUILTIN_PHADDSW,
23594 IX86_BUILTIN_PHSUBW,
23595 IX86_BUILTIN_PHSUBD,
23596 IX86_BUILTIN_PHSUBSW,
23597 IX86_BUILTIN_PMADDUBSW,
23598 IX86_BUILTIN_PMULHRSW,
23599 IX86_BUILTIN_PSHUFB,
23600 IX86_BUILTIN_PSIGNB,
23601 IX86_BUILTIN_PSIGNW,
23602 IX86_BUILTIN_PSIGND,
23603 IX86_BUILTIN_PALIGNR,
23604 IX86_BUILTIN_PABSB,
23605 IX86_BUILTIN_PABSW,
23606 IX86_BUILTIN_PABSD,
23608 IX86_BUILTIN_PHADDW128,
23609 IX86_BUILTIN_PHADDD128,
23610 IX86_BUILTIN_PHADDSW128,
23611 IX86_BUILTIN_PHSUBW128,
23612 IX86_BUILTIN_PHSUBD128,
23613 IX86_BUILTIN_PHSUBSW128,
23614 IX86_BUILTIN_PMADDUBSW128,
23615 IX86_BUILTIN_PMULHRSW128,
23616 IX86_BUILTIN_PSHUFB128,
23617 IX86_BUILTIN_PSIGNB128,
23618 IX86_BUILTIN_PSIGNW128,
23619 IX86_BUILTIN_PSIGND128,
23620 IX86_BUILTIN_PALIGNR128,
23621 IX86_BUILTIN_PABSB128,
23622 IX86_BUILTIN_PABSW128,
23623 IX86_BUILTIN_PABSD128,
23625 /* AMDFAM10 - SSE4A New Instructions. */
23626 IX86_BUILTIN_MOVNTSD,
23627 IX86_BUILTIN_MOVNTSS,
23628 IX86_BUILTIN_EXTRQI,
23629 IX86_BUILTIN_EXTRQ,
23630 IX86_BUILTIN_INSERTQI,
23631 IX86_BUILTIN_INSERTQ,
23634 IX86_BUILTIN_BLENDPD,
23635 IX86_BUILTIN_BLENDPS,
23636 IX86_BUILTIN_BLENDVPD,
23637 IX86_BUILTIN_BLENDVPS,
23638 IX86_BUILTIN_PBLENDVB128,
23639 IX86_BUILTIN_PBLENDW128,
23644 IX86_BUILTIN_INSERTPS128,
23646 IX86_BUILTIN_MOVNTDQA,
23647 IX86_BUILTIN_MPSADBW128,
23648 IX86_BUILTIN_PACKUSDW128,
23649 IX86_BUILTIN_PCMPEQQ,
23650 IX86_BUILTIN_PHMINPOSUW128,
23652 IX86_BUILTIN_PMAXSB128,
23653 IX86_BUILTIN_PMAXSD128,
23654 IX86_BUILTIN_PMAXUD128,
23655 IX86_BUILTIN_PMAXUW128,
23657 IX86_BUILTIN_PMINSB128,
23658 IX86_BUILTIN_PMINSD128,
23659 IX86_BUILTIN_PMINUD128,
23660 IX86_BUILTIN_PMINUW128,
23662 IX86_BUILTIN_PMOVSXBW128,
23663 IX86_BUILTIN_PMOVSXBD128,
23664 IX86_BUILTIN_PMOVSXBQ128,
23665 IX86_BUILTIN_PMOVSXWD128,
23666 IX86_BUILTIN_PMOVSXWQ128,
23667 IX86_BUILTIN_PMOVSXDQ128,
23669 IX86_BUILTIN_PMOVZXBW128,
23670 IX86_BUILTIN_PMOVZXBD128,
23671 IX86_BUILTIN_PMOVZXBQ128,
23672 IX86_BUILTIN_PMOVZXWD128,
23673 IX86_BUILTIN_PMOVZXWQ128,
23674 IX86_BUILTIN_PMOVZXDQ128,
23676 IX86_BUILTIN_PMULDQ128,
23677 IX86_BUILTIN_PMULLD128,
23679 IX86_BUILTIN_ROUNDPD,
23680 IX86_BUILTIN_ROUNDPS,
23681 IX86_BUILTIN_ROUNDSD,
23682 IX86_BUILTIN_ROUNDSS,
23684 IX86_BUILTIN_PTESTZ,
23685 IX86_BUILTIN_PTESTC,
23686 IX86_BUILTIN_PTESTNZC,
23688 IX86_BUILTIN_VEC_INIT_V2SI,
23689 IX86_BUILTIN_VEC_INIT_V4HI,
23690 IX86_BUILTIN_VEC_INIT_V8QI,
23691 IX86_BUILTIN_VEC_EXT_V2DF,
23692 IX86_BUILTIN_VEC_EXT_V2DI,
23693 IX86_BUILTIN_VEC_EXT_V4SF,
23694 IX86_BUILTIN_VEC_EXT_V4SI,
23695 IX86_BUILTIN_VEC_EXT_V8HI,
23696 IX86_BUILTIN_VEC_EXT_V2SI,
23697 IX86_BUILTIN_VEC_EXT_V4HI,
23698 IX86_BUILTIN_VEC_EXT_V16QI,
23699 IX86_BUILTIN_VEC_SET_V2DI,
23700 IX86_BUILTIN_VEC_SET_V4SF,
23701 IX86_BUILTIN_VEC_SET_V4SI,
23702 IX86_BUILTIN_VEC_SET_V8HI,
23703 IX86_BUILTIN_VEC_SET_V4HI,
23704 IX86_BUILTIN_VEC_SET_V16QI,
23706 IX86_BUILTIN_VEC_PACK_SFIX,
23709 IX86_BUILTIN_CRC32QI,
23710 IX86_BUILTIN_CRC32HI,
23711 IX86_BUILTIN_CRC32SI,
23712 IX86_BUILTIN_CRC32DI,
23714 IX86_BUILTIN_PCMPESTRI128,
23715 IX86_BUILTIN_PCMPESTRM128,
23716 IX86_BUILTIN_PCMPESTRA128,
23717 IX86_BUILTIN_PCMPESTRC128,
23718 IX86_BUILTIN_PCMPESTRO128,
23719 IX86_BUILTIN_PCMPESTRS128,
23720 IX86_BUILTIN_PCMPESTRZ128,
23721 IX86_BUILTIN_PCMPISTRI128,
23722 IX86_BUILTIN_PCMPISTRM128,
23723 IX86_BUILTIN_PCMPISTRA128,
23724 IX86_BUILTIN_PCMPISTRC128,
23725 IX86_BUILTIN_PCMPISTRO128,
23726 IX86_BUILTIN_PCMPISTRS128,
23727 IX86_BUILTIN_PCMPISTRZ128,
23729 IX86_BUILTIN_PCMPGTQ,
23731 /* AES instructions */
23732 IX86_BUILTIN_AESENC128,
23733 IX86_BUILTIN_AESENCLAST128,
23734 IX86_BUILTIN_AESDEC128,
23735 IX86_BUILTIN_AESDECLAST128,
23736 IX86_BUILTIN_AESIMC128,
23737 IX86_BUILTIN_AESKEYGENASSIST128,
23739 /* PCLMUL instruction */
23740 IX86_BUILTIN_PCLMULQDQ128,
23743 IX86_BUILTIN_ADDPD256,
23744 IX86_BUILTIN_ADDPS256,
23745 IX86_BUILTIN_ADDSUBPD256,
23746 IX86_BUILTIN_ADDSUBPS256,
23747 IX86_BUILTIN_ANDPD256,
23748 IX86_BUILTIN_ANDPS256,
23749 IX86_BUILTIN_ANDNPD256,
23750 IX86_BUILTIN_ANDNPS256,
23751 IX86_BUILTIN_BLENDPD256,
23752 IX86_BUILTIN_BLENDPS256,
23753 IX86_BUILTIN_BLENDVPD256,
23754 IX86_BUILTIN_BLENDVPS256,
23755 IX86_BUILTIN_DIVPD256,
23756 IX86_BUILTIN_DIVPS256,
23757 IX86_BUILTIN_DPPS256,
23758 IX86_BUILTIN_HADDPD256,
23759 IX86_BUILTIN_HADDPS256,
23760 IX86_BUILTIN_HSUBPD256,
23761 IX86_BUILTIN_HSUBPS256,
23762 IX86_BUILTIN_MAXPD256,
23763 IX86_BUILTIN_MAXPS256,
23764 IX86_BUILTIN_MINPD256,
23765 IX86_BUILTIN_MINPS256,
23766 IX86_BUILTIN_MULPD256,
23767 IX86_BUILTIN_MULPS256,
23768 IX86_BUILTIN_ORPD256,
23769 IX86_BUILTIN_ORPS256,
23770 IX86_BUILTIN_SHUFPD256,
23771 IX86_BUILTIN_SHUFPS256,
23772 IX86_BUILTIN_SUBPD256,
23773 IX86_BUILTIN_SUBPS256,
23774 IX86_BUILTIN_XORPD256,
23775 IX86_BUILTIN_XORPS256,
23776 IX86_BUILTIN_CMPSD,
23777 IX86_BUILTIN_CMPSS,
23778 IX86_BUILTIN_CMPPD,
23779 IX86_BUILTIN_CMPPS,
23780 IX86_BUILTIN_CMPPD256,
23781 IX86_BUILTIN_CMPPS256,
23782 IX86_BUILTIN_CVTDQ2PD256,
23783 IX86_BUILTIN_CVTDQ2PS256,
23784 IX86_BUILTIN_CVTPD2PS256,
23785 IX86_BUILTIN_CVTPS2DQ256,
23786 IX86_BUILTIN_CVTPS2PD256,
23787 IX86_BUILTIN_CVTTPD2DQ256,
23788 IX86_BUILTIN_CVTPD2DQ256,
23789 IX86_BUILTIN_CVTTPS2DQ256,
23790 IX86_BUILTIN_EXTRACTF128PD256,
23791 IX86_BUILTIN_EXTRACTF128PS256,
23792 IX86_BUILTIN_EXTRACTF128SI256,
23793 IX86_BUILTIN_VZEROALL,
23794 IX86_BUILTIN_VZEROUPPER,
23795 IX86_BUILTIN_VPERMILVARPD,
23796 IX86_BUILTIN_VPERMILVARPS,
23797 IX86_BUILTIN_VPERMILVARPD256,
23798 IX86_BUILTIN_VPERMILVARPS256,
23799 IX86_BUILTIN_VPERMILPD,
23800 IX86_BUILTIN_VPERMILPS,
23801 IX86_BUILTIN_VPERMILPD256,
23802 IX86_BUILTIN_VPERMILPS256,
23803 IX86_BUILTIN_VPERMIL2PD,
23804 IX86_BUILTIN_VPERMIL2PS,
23805 IX86_BUILTIN_VPERMIL2PD256,
23806 IX86_BUILTIN_VPERMIL2PS256,
23807 IX86_BUILTIN_VPERM2F128PD256,
23808 IX86_BUILTIN_VPERM2F128PS256,
23809 IX86_BUILTIN_VPERM2F128SI256,
23810 IX86_BUILTIN_VBROADCASTSS,
23811 IX86_BUILTIN_VBROADCASTSD256,
23812 IX86_BUILTIN_VBROADCASTSS256,
23813 IX86_BUILTIN_VBROADCASTPD256,
23814 IX86_BUILTIN_VBROADCASTPS256,
23815 IX86_BUILTIN_VINSERTF128PD256,
23816 IX86_BUILTIN_VINSERTF128PS256,
23817 IX86_BUILTIN_VINSERTF128SI256,
23818 IX86_BUILTIN_LOADUPD256,
23819 IX86_BUILTIN_LOADUPS256,
23820 IX86_BUILTIN_STOREUPD256,
23821 IX86_BUILTIN_STOREUPS256,
23822 IX86_BUILTIN_LDDQU256,
23823 IX86_BUILTIN_MOVNTDQ256,
23824 IX86_BUILTIN_MOVNTPD256,
23825 IX86_BUILTIN_MOVNTPS256,
23826 IX86_BUILTIN_LOADDQU256,
23827 IX86_BUILTIN_STOREDQU256,
23828 IX86_BUILTIN_MASKLOADPD,
23829 IX86_BUILTIN_MASKLOADPS,
23830 IX86_BUILTIN_MASKSTOREPD,
23831 IX86_BUILTIN_MASKSTOREPS,
23832 IX86_BUILTIN_MASKLOADPD256,
23833 IX86_BUILTIN_MASKLOADPS256,
23834 IX86_BUILTIN_MASKSTOREPD256,
23835 IX86_BUILTIN_MASKSTOREPS256,
23836 IX86_BUILTIN_MOVSHDUP256,
23837 IX86_BUILTIN_MOVSLDUP256,
23838 IX86_BUILTIN_MOVDDUP256,
23840 IX86_BUILTIN_SQRTPD256,
23841 IX86_BUILTIN_SQRTPS256,
23842 IX86_BUILTIN_SQRTPS_NR256,
23843 IX86_BUILTIN_RSQRTPS256,
23844 IX86_BUILTIN_RSQRTPS_NR256,
23846 IX86_BUILTIN_RCPPS256,
23848 IX86_BUILTIN_ROUNDPD256,
23849 IX86_BUILTIN_ROUNDPS256,
23851 IX86_BUILTIN_UNPCKHPD256,
23852 IX86_BUILTIN_UNPCKLPD256,
23853 IX86_BUILTIN_UNPCKHPS256,
23854 IX86_BUILTIN_UNPCKLPS256,
23856 IX86_BUILTIN_SI256_SI,
23857 IX86_BUILTIN_PS256_PS,
23858 IX86_BUILTIN_PD256_PD,
23859 IX86_BUILTIN_SI_SI256,
23860 IX86_BUILTIN_PS_PS256,
23861 IX86_BUILTIN_PD_PD256,
23863 IX86_BUILTIN_VTESTZPD,
23864 IX86_BUILTIN_VTESTCPD,
23865 IX86_BUILTIN_VTESTNZCPD,
23866 IX86_BUILTIN_VTESTZPS,
23867 IX86_BUILTIN_VTESTCPS,
23868 IX86_BUILTIN_VTESTNZCPS,
23869 IX86_BUILTIN_VTESTZPD256,
23870 IX86_BUILTIN_VTESTCPD256,
23871 IX86_BUILTIN_VTESTNZCPD256,
23872 IX86_BUILTIN_VTESTZPS256,
23873 IX86_BUILTIN_VTESTCPS256,
23874 IX86_BUILTIN_VTESTNZCPS256,
23875 IX86_BUILTIN_PTESTZ256,
23876 IX86_BUILTIN_PTESTC256,
23877 IX86_BUILTIN_PTESTNZC256,
23879 IX86_BUILTIN_MOVMSKPD256,
23880 IX86_BUILTIN_MOVMSKPS256,
23882 /* TFmode support builtins. */
23884 IX86_BUILTIN_HUGE_VALQ,
23885 IX86_BUILTIN_FABSQ,
23886 IX86_BUILTIN_COPYSIGNQ,
23888 /* Vectorizer support builtins. */
23889 IX86_BUILTIN_CPYSGNPS,
23890 IX86_BUILTIN_CPYSGNPD,
23891 IX86_BUILTIN_CPYSGNPS256,
23892 IX86_BUILTIN_CPYSGNPD256,
23894 IX86_BUILTIN_CVTUDQ2PS,
23896 IX86_BUILTIN_VEC_PERM_V2DF,
23897 IX86_BUILTIN_VEC_PERM_V4SF,
23898 IX86_BUILTIN_VEC_PERM_V2DI,
23899 IX86_BUILTIN_VEC_PERM_V4SI,
23900 IX86_BUILTIN_VEC_PERM_V8HI,
23901 IX86_BUILTIN_VEC_PERM_V16QI,
23902 IX86_BUILTIN_VEC_PERM_V2DI_U,
23903 IX86_BUILTIN_VEC_PERM_V4SI_U,
23904 IX86_BUILTIN_VEC_PERM_V8HI_U,
23905 IX86_BUILTIN_VEC_PERM_V16QI_U,
23906 IX86_BUILTIN_VEC_PERM_V4DF,
23907 IX86_BUILTIN_VEC_PERM_V8SF,
23909 /* FMA4 and XOP instructions. */
23910 IX86_BUILTIN_VFMADDSS,
23911 IX86_BUILTIN_VFMADDSD,
23912 IX86_BUILTIN_VFMADDPS,
23913 IX86_BUILTIN_VFMADDPD,
23914 IX86_BUILTIN_VFMADDPS256,
23915 IX86_BUILTIN_VFMADDPD256,
23916 IX86_BUILTIN_VFMADDSUBPS,
23917 IX86_BUILTIN_VFMADDSUBPD,
23918 IX86_BUILTIN_VFMADDSUBPS256,
23919 IX86_BUILTIN_VFMADDSUBPD256,
23921 IX86_BUILTIN_VPCMOV,
23922 IX86_BUILTIN_VPCMOV_V2DI,
23923 IX86_BUILTIN_VPCMOV_V4SI,
23924 IX86_BUILTIN_VPCMOV_V8HI,
23925 IX86_BUILTIN_VPCMOV_V16QI,
23926 IX86_BUILTIN_VPCMOV_V4SF,
23927 IX86_BUILTIN_VPCMOV_V2DF,
23928 IX86_BUILTIN_VPCMOV256,
23929 IX86_BUILTIN_VPCMOV_V4DI256,
23930 IX86_BUILTIN_VPCMOV_V8SI256,
23931 IX86_BUILTIN_VPCMOV_V16HI256,
23932 IX86_BUILTIN_VPCMOV_V32QI256,
23933 IX86_BUILTIN_VPCMOV_V8SF256,
23934 IX86_BUILTIN_VPCMOV_V4DF256,
23936 IX86_BUILTIN_VPPERM,
23938 IX86_BUILTIN_VPMACSSWW,
23939 IX86_BUILTIN_VPMACSWW,
23940 IX86_BUILTIN_VPMACSSWD,
23941 IX86_BUILTIN_VPMACSWD,
23942 IX86_BUILTIN_VPMACSSDD,
23943 IX86_BUILTIN_VPMACSDD,
23944 IX86_BUILTIN_VPMACSSDQL,
23945 IX86_BUILTIN_VPMACSSDQH,
23946 IX86_BUILTIN_VPMACSDQL,
23947 IX86_BUILTIN_VPMACSDQH,
23948 IX86_BUILTIN_VPMADCSSWD,
23949 IX86_BUILTIN_VPMADCSWD,
23951 IX86_BUILTIN_VPHADDBW,
23952 IX86_BUILTIN_VPHADDBD,
23953 IX86_BUILTIN_VPHADDBQ,
23954 IX86_BUILTIN_VPHADDWD,
23955 IX86_BUILTIN_VPHADDWQ,
23956 IX86_BUILTIN_VPHADDDQ,
23957 IX86_BUILTIN_VPHADDUBW,
23958 IX86_BUILTIN_VPHADDUBD,
23959 IX86_BUILTIN_VPHADDUBQ,
23960 IX86_BUILTIN_VPHADDUWD,
23961 IX86_BUILTIN_VPHADDUWQ,
23962 IX86_BUILTIN_VPHADDUDQ,
23963 IX86_BUILTIN_VPHSUBBW,
23964 IX86_BUILTIN_VPHSUBWD,
23965 IX86_BUILTIN_VPHSUBDQ,
23967 IX86_BUILTIN_VPROTB,
23968 IX86_BUILTIN_VPROTW,
23969 IX86_BUILTIN_VPROTD,
23970 IX86_BUILTIN_VPROTQ,
23971 IX86_BUILTIN_VPROTB_IMM,
23972 IX86_BUILTIN_VPROTW_IMM,
23973 IX86_BUILTIN_VPROTD_IMM,
23974 IX86_BUILTIN_VPROTQ_IMM,
23976 IX86_BUILTIN_VPSHLB,
23977 IX86_BUILTIN_VPSHLW,
23978 IX86_BUILTIN_VPSHLD,
23979 IX86_BUILTIN_VPSHLQ,
23980 IX86_BUILTIN_VPSHAB,
23981 IX86_BUILTIN_VPSHAW,
23982 IX86_BUILTIN_VPSHAD,
23983 IX86_BUILTIN_VPSHAQ,
23985 IX86_BUILTIN_VFRCZSS,
23986 IX86_BUILTIN_VFRCZSD,
23987 IX86_BUILTIN_VFRCZPS,
23988 IX86_BUILTIN_VFRCZPD,
23989 IX86_BUILTIN_VFRCZPS256,
23990 IX86_BUILTIN_VFRCZPD256,
23992 IX86_BUILTIN_VPCOMEQUB,
23993 IX86_BUILTIN_VPCOMNEUB,
23994 IX86_BUILTIN_VPCOMLTUB,
23995 IX86_BUILTIN_VPCOMLEUB,
23996 IX86_BUILTIN_VPCOMGTUB,
23997 IX86_BUILTIN_VPCOMGEUB,
23998 IX86_BUILTIN_VPCOMFALSEUB,
23999 IX86_BUILTIN_VPCOMTRUEUB,
24001 IX86_BUILTIN_VPCOMEQUW,
24002 IX86_BUILTIN_VPCOMNEUW,
24003 IX86_BUILTIN_VPCOMLTUW,
24004 IX86_BUILTIN_VPCOMLEUW,
24005 IX86_BUILTIN_VPCOMGTUW,
24006 IX86_BUILTIN_VPCOMGEUW,
24007 IX86_BUILTIN_VPCOMFALSEUW,
24008 IX86_BUILTIN_VPCOMTRUEUW,
24010 IX86_BUILTIN_VPCOMEQUD,
24011 IX86_BUILTIN_VPCOMNEUD,
24012 IX86_BUILTIN_VPCOMLTUD,
24013 IX86_BUILTIN_VPCOMLEUD,
24014 IX86_BUILTIN_VPCOMGTUD,
24015 IX86_BUILTIN_VPCOMGEUD,
24016 IX86_BUILTIN_VPCOMFALSEUD,
24017 IX86_BUILTIN_VPCOMTRUEUD,
24019 IX86_BUILTIN_VPCOMEQUQ,
24020 IX86_BUILTIN_VPCOMNEUQ,
24021 IX86_BUILTIN_VPCOMLTUQ,
24022 IX86_BUILTIN_VPCOMLEUQ,
24023 IX86_BUILTIN_VPCOMGTUQ,
24024 IX86_BUILTIN_VPCOMGEUQ,
24025 IX86_BUILTIN_VPCOMFALSEUQ,
24026 IX86_BUILTIN_VPCOMTRUEUQ,
24028 IX86_BUILTIN_VPCOMEQB,
24029 IX86_BUILTIN_VPCOMNEB,
24030 IX86_BUILTIN_VPCOMLTB,
24031 IX86_BUILTIN_VPCOMLEB,
24032 IX86_BUILTIN_VPCOMGTB,
24033 IX86_BUILTIN_VPCOMGEB,
24034 IX86_BUILTIN_VPCOMFALSEB,
24035 IX86_BUILTIN_VPCOMTRUEB,
24037 IX86_BUILTIN_VPCOMEQW,
24038 IX86_BUILTIN_VPCOMNEW,
24039 IX86_BUILTIN_VPCOMLTW,
24040 IX86_BUILTIN_VPCOMLEW,
24041 IX86_BUILTIN_VPCOMGTW,
24042 IX86_BUILTIN_VPCOMGEW,
24043 IX86_BUILTIN_VPCOMFALSEW,
24044 IX86_BUILTIN_VPCOMTRUEW,
24046 IX86_BUILTIN_VPCOMEQD,
24047 IX86_BUILTIN_VPCOMNED,
24048 IX86_BUILTIN_VPCOMLTD,
24049 IX86_BUILTIN_VPCOMLED,
24050 IX86_BUILTIN_VPCOMGTD,
24051 IX86_BUILTIN_VPCOMGED,
24052 IX86_BUILTIN_VPCOMFALSED,
24053 IX86_BUILTIN_VPCOMTRUED,
24055 IX86_BUILTIN_VPCOMEQQ,
24056 IX86_BUILTIN_VPCOMNEQ,
24057 IX86_BUILTIN_VPCOMLTQ,
24058 IX86_BUILTIN_VPCOMLEQ,
24059 IX86_BUILTIN_VPCOMGTQ,
24060 IX86_BUILTIN_VPCOMGEQ,
24061 IX86_BUILTIN_VPCOMFALSEQ,
24062 IX86_BUILTIN_VPCOMTRUEQ,
24064 /* LWP instructions. */
24065 IX86_BUILTIN_LLWPCB,
24066 IX86_BUILTIN_SLWPCB,
24067 IX86_BUILTIN_LWPVAL32,
24068 IX86_BUILTIN_LWPVAL64,
24069 IX86_BUILTIN_LWPINS32,
24070 IX86_BUILTIN_LWPINS64,
24074 /* BMI instructions. */
24075 IX86_BUILTIN_BEXTR32,
24076 IX86_BUILTIN_BEXTR64,
24079 /* TBM instructions. */
24080 IX86_BUILTIN_BEXTRI32,
24081 IX86_BUILTIN_BEXTRI64,
24084 /* FSGSBASE instructions. */
24085 IX86_BUILTIN_RDFSBASE32,
24086 IX86_BUILTIN_RDFSBASE64,
24087 IX86_BUILTIN_RDGSBASE32,
24088 IX86_BUILTIN_RDGSBASE64,
24089 IX86_BUILTIN_WRFSBASE32,
24090 IX86_BUILTIN_WRFSBASE64,
24091 IX86_BUILTIN_WRGSBASE32,
24092 IX86_BUILTIN_WRGSBASE64,
24094 /* RDRND instructions. */
24095 IX86_BUILTIN_RDRAND16,
24096 IX86_BUILTIN_RDRAND32,
24097 IX86_BUILTIN_RDRAND64,
24099 /* F16C instructions. */
24100 IX86_BUILTIN_CVTPH2PS,
24101 IX86_BUILTIN_CVTPH2PS256,
24102 IX86_BUILTIN_CVTPS2PH,
24103 IX86_BUILTIN_CVTPS2PH256,
24108 /* Table for the ix86 builtin decls. */
24109 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
24111 /* Table of all of the builtin functions that are possible with different ISA's
24112 but are waiting to be built until a function is declared to use that
24114 struct builtin_isa {
24115 const char *name; /* function name */
24116 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
24117 int isa; /* isa_flags this builtin is defined for */
24118 bool const_p; /* true if the declaration is constant */
24119 bool set_and_not_built_p;
24122 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
24125 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
24126 of which isa_flags to use in the ix86_builtins_isa array. Stores the
24127 function decl in the ix86_builtins array. Returns the function decl or
24128 NULL_TREE, if the builtin was not added.
24130 If the front end has a special hook for builtin functions, delay adding
24131 builtin functions that aren't in the current ISA until the ISA is changed
24132 with function specific optimization. Doing so, can save about 300K for the
24133 default compiler. When the builtin is expanded, check at that time whether
24136 If the front end doesn't have a special hook, record all builtins, even if
24137 it isn't an instruction set in the current ISA in case the user uses
24138 function specific options for a different ISA, so that we don't get scope
24139 errors if a builtin is added in the middle of a function scope. */
24142 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
24143 enum ix86_builtins code)
24145 tree decl = NULL_TREE;
24147 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
24149 ix86_builtins_isa[(int) code].isa = mask;
24151 mask &= ~OPTION_MASK_ISA_64BIT;
24153 || (mask & ix86_isa_flags) != 0
24154 || (lang_hooks.builtin_function
24155 == lang_hooks.builtin_function_ext_scope))
24158 tree type = ix86_get_builtin_func_type (tcode);
24159 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
24161 ix86_builtins[(int) code] = decl;
24162 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
24166 ix86_builtins[(int) code] = NULL_TREE;
24167 ix86_builtins_isa[(int) code].tcode = tcode;
24168 ix86_builtins_isa[(int) code].name = name;
24169 ix86_builtins_isa[(int) code].const_p = false;
24170 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
24177 /* Like def_builtin, but also marks the function decl "const". */
24180 def_builtin_const (int mask, const char *name,
24181 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
24183 tree decl = def_builtin (mask, name, tcode, code);
24185 TREE_READONLY (decl) = 1;
24187 ix86_builtins_isa[(int) code].const_p = true;
24192 /* Add any new builtin functions for a given ISA that may not have been
24193 declared. This saves a bit of space compared to adding all of the
24194 declarations to the tree, even if we didn't use them. */
24197 ix86_add_new_builtins (int isa)
24201 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
24203 if ((ix86_builtins_isa[i].isa & isa) != 0
24204 && ix86_builtins_isa[i].set_and_not_built_p)
24208 /* Don't define the builtin again. */
24209 ix86_builtins_isa[i].set_and_not_built_p = false;
24211 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
24212 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
24213 type, i, BUILT_IN_MD, NULL,
24216 ix86_builtins[i] = decl;
24217 if (ix86_builtins_isa[i].const_p)
24218 TREE_READONLY (decl) = 1;
24223 /* Bits for builtin_description.flag. */
24225 /* Set when we don't support the comparison natively, and should
24226 swap_comparison in order to support it. */
24227 #define BUILTIN_DESC_SWAP_OPERANDS 1
24229 struct builtin_description
24231 const unsigned int mask;
24232 const enum insn_code icode;
24233 const char *const name;
24234 const enum ix86_builtins code;
24235 const enum rtx_code comparison;
24239 static const struct builtin_description bdesc_comi[] =
24241 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
24242 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
24243 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
24244 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
24245 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
24246 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
24247 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
24248 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
24249 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
24250 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
24251 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
24252 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
24253 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
24254 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
24255 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
24256 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
24257 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
24258 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
24259 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
24260 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
24261 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
24262 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
24263 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
24264 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
24267 static const struct builtin_description bdesc_pcmpestr[] =
24270 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
24271 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
24272 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
24273 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
24274 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
24275 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
24276 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
24279 static const struct builtin_description bdesc_pcmpistr[] =
24282 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
24283 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
24284 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
24285 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
24286 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
24287 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
24288 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
24291 /* Special builtins with variable number of arguments. */
24292 static const struct builtin_description bdesc_special_args[] =
24294 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
24295 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
24298 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
24301 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
24304 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
24305 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
24306 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
24308 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
24309 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
24310 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
24311 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
24313 /* SSE or 3DNow!A */
24314 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
24315 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
24318 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
24319 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
24320 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
24321 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
24322 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
24323 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
24324 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
24325 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
24326 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
24328 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
24329 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
24332 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
24335 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
24338 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
24339 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
24342 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
24343 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
24345 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
24346 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
24347 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
24348 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
24349 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
24351 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
24352 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
24353 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
24354 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
24355 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
24356 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
24357 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
24359 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
24360 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
24361 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
24363 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
24364 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
24365 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
24366 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
24367 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
24368 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
24369 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
24370 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
24372 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
24373 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
24374 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
24375 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
24376 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
24377 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
24380 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdfsbasesi, "__builtin_ia32_rdfsbase32", IX86_BUILTIN_RDFSBASE32, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
24381 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdfsbasedi, "__builtin_ia32_rdfsbase64", IX86_BUILTIN_RDFSBASE64, UNKNOWN, (int) UINT64_FTYPE_VOID },
24382 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdgsbasesi, "__builtin_ia32_rdgsbase32", IX86_BUILTIN_RDGSBASE32, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
24383 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdgsbasedi, "__builtin_ia32_rdgsbase64", IX86_BUILTIN_RDGSBASE64, UNKNOWN, (int) UINT64_FTYPE_VOID },
24384 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrfsbasesi, "__builtin_ia32_wrfsbase32", IX86_BUILTIN_WRFSBASE32, UNKNOWN, (int) VOID_FTYPE_UNSIGNED },
24385 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrfsbasedi, "__builtin_ia32_wrfsbase64", IX86_BUILTIN_WRFSBASE64, UNKNOWN, (int) VOID_FTYPE_UINT64 },
24386 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrgsbasesi, "__builtin_ia32_wrgsbase32", IX86_BUILTIN_WRGSBASE32, UNKNOWN, (int) VOID_FTYPE_UNSIGNED },
24387 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrgsbasedi, "__builtin_ia32_wrgsbase64", IX86_BUILTIN_WRGSBASE64, UNKNOWN, (int) VOID_FTYPE_UINT64 },
24390 { OPTION_MASK_ISA_RDRND, CODE_FOR_rdrandhi, "__builtin_ia32_rdrand16", IX86_BUILTIN_RDRAND16, UNKNOWN, (int) UINT16_FTYPE_VOID },
24391 { OPTION_MASK_ISA_RDRND, CODE_FOR_rdrandsi, "__builtin_ia32_rdrand32", IX86_BUILTIN_RDRAND32, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
24392 { OPTION_MASK_ISA_RDRND | OPTION_MASK_ISA_64BIT, CODE_FOR_rdranddi, "__builtin_ia32_rdrand64", IX86_BUILTIN_RDRAND64, UNKNOWN, (int) UINT64_FTYPE_VOID },
24395 /* Builtins with variable number of arguments. */
24396 static const struct builtin_description bdesc_args[] =
24398 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
24399 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
24400 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
24401 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
24402 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
24403 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
24404 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
24407 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24408 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24409 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24410 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24411 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24412 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24414 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24415 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24416 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24417 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24418 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24419 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24420 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24421 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24423 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24424 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24426 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24427 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24428 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24429 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24431 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24432 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24433 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24434 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24435 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24436 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24438 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24439 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24440 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24441 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24442 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
24443 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
24445 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
24446 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
24447 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
24449 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
24451 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
24452 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
24453 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
24454 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
24455 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
24456 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
24458 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
24459 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
24460 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
24461 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
24462 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
24463 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
24465 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
24466 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
24467 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
24468 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
24471 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
24472 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
24473 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
24474 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
24476 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24477 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24478 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24479 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
24480 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
24481 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
24482 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24483 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24484 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24485 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24486 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24487 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24488 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24489 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24490 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24493 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
24494 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
24495 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
24496 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
24497 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24498 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24501 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
24502 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24503 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24504 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24505 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24506 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24507 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
24508 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
24509 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
24510 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
24511 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
24512 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
24514 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
24516 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24517 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24518 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24519 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24520 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24521 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24522 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24523 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24525 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
24526 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
24527 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
24528 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24529 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24530 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24531 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
24532 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
24533 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
24534 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24535 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
24536 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24537 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
24538 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
24539 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
24540 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24541 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
24542 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
24543 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
24544 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24545 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24546 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24548 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24549 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24550 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24551 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24553 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24554 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24555 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24556 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24558 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24560 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24561 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24562 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24563 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24564 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24566 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
24567 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
24568 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
24570 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
24572 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
24573 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
24574 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
24576 /* SSE MMX or 3Dnow!A */
24577 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24578 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24579 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24581 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24582 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24583 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24584 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24586 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
24587 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
24589 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
24592 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
24594 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
24595 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
24596 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
24597 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
24598 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
24599 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
24600 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
24601 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
24602 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
24603 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
24604 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
24605 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
24607 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
24608 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
24609 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
24610 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
24611 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
24612 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
24614 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
24615 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
24616 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
24617 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
24618 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
24620 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
24622 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
24623 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
24624 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
24625 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
24627 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
24628 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
24629 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
24631 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24632 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24633 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24634 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24635 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24636 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24637 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24638 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24640 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
24641 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
24642 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
24643 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
24644 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
24645 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
24646 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
24647 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
24648 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
24649 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
24650 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
24651 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
24652 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
24653 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
24654 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
24655 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
24656 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
24657 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
24658 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
24659 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
24661 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24662 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24663 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24664 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24666 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24667 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24668 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24669 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24671 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24673 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24674 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24675 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24677 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
24679 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24680 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24681 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24682 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24683 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24684 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24685 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24686 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24688 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24689 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24690 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24691 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24692 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24693 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24694 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24695 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24697 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24698 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
24700 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24701 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24702 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24703 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24705 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24706 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24708 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24709 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24710 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24711 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24712 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24713 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24715 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24716 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24717 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24718 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24720 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24721 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24722 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24723 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24724 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24725 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24726 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24727 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24729 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
24730 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
24731 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
24733 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24734 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
24736 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
24737 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
24739 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
24741 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
24742 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
24743 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
24744 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
24746 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
24747 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
24748 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
24749 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
24750 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
24751 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
24752 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
24754 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
24755 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
24756 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
24757 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
24758 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
24759 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
24760 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
24762 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
24763 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
24764 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
24765 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
24767 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
24768 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
24769 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
24771 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
24773 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
24774 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
24776 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
24779 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
24780 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
24783 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
24784 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24786 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24787 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24788 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24789 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24790 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24791 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24794 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
24795 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
24796 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
24797 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
24798 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
24799 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
24801 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24802 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24803 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24804 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24805 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24806 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24807 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24808 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24809 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24810 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24811 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24812 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24813 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
24814 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
24815 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24816 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24817 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24818 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24819 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24820 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24821 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24822 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24823 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24824 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24827 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
24828 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
24831 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
24832 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
24833 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
24834 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
24835 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
24836 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
24837 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
24838 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
24839 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
24840 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
24842 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
24843 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
24844 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
24845 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
24846 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
24847 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
24848 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
24849 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
24850 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
24851 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
24852 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
24853 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
24854 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
24856 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
24857 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24858 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24859 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24860 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24861 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24862 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24863 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24864 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24865 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24866 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
24867 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24870 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
24871 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
24872 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
24873 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
24875 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
24876 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
24877 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
24880 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24881 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
24882 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
24883 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
24884 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
24887 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
24888 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
24889 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
24890 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24893 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
24894 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
24896 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24897 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24898 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24899 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24902 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
24905 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24906 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24907 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24908 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24909 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24910 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24911 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24912 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24913 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24914 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24915 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24916 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24917 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24918 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24919 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24920 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24921 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24922 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24923 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24924 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24925 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24926 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24927 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24928 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24929 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24930 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24932 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
24933 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
24934 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
24935 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
24937 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
24938 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
24939 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
24940 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
24941 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
24942 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
24943 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
24944 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
24945 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
24946 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
24947 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
24948 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
24949 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
24950 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
24951 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
24952 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
24953 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
24954 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
24955 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
24956 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
24957 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
24958 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
24959 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
24960 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
24961 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
24962 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
24963 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
24964 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
24965 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
24966 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
24967 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
24968 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
24969 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
24970 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
24972 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
24973 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
24974 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
24976 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
24977 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
24978 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
24979 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
24980 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
24982 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
24984 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
24985 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
24987 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24988 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24989 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24990 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24992 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
24993 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
24994 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
24995 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8si, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
24996 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8sf, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
24997 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v4df, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
24999 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
25000 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
25001 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
25002 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
25003 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
25004 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
25005 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
25006 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
25007 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
25008 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
25009 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
25010 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
25011 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
25012 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
25013 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
25015 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
25016 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
25018 { OPTION_MASK_ISA_AVX, CODE_FOR_copysignv8sf3, "__builtin_ia32_copysignps256", IX86_BUILTIN_CPYSGNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25019 { OPTION_MASK_ISA_AVX, CODE_FOR_copysignv4df3, "__builtin_ia32_copysignpd256", IX86_BUILTIN_CPYSGNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25021 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
25024 { OPTION_MASK_ISA_BMI, CODE_FOR_bmi_bextr_si, "__builtin_ia32_bextr_u32", IX86_BUILTIN_BEXTR32, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
25025 { OPTION_MASK_ISA_BMI, CODE_FOR_bmi_bextr_di, "__builtin_ia32_bextr_u64", IX86_BUILTIN_BEXTR64, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
25026 { OPTION_MASK_ISA_BMI, CODE_FOR_ctzhi2, "__builtin_ctzs", IX86_BUILTIN_CTZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
25029 { OPTION_MASK_ISA_TBM, CODE_FOR_tbm_bextri_si, "__builtin_ia32_bextri_u32", IX86_BUILTIN_BEXTRI32, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
25030 { OPTION_MASK_ISA_TBM, CODE_FOR_tbm_bextri_di, "__builtin_ia32_bextri_u64", IX86_BUILTIN_BEXTRI64, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
25033 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtph2ps, "__builtin_ia32_vcvtph2ps", IX86_BUILTIN_CVTPH2PS, UNKNOWN, (int) V4SF_FTYPE_V8HI },
25034 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtph2ps256, "__builtin_ia32_vcvtph2ps256", IX86_BUILTIN_CVTPH2PS256, UNKNOWN, (int) V8SF_FTYPE_V8HI },
25035 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtps2ph, "__builtin_ia32_vcvtps2ph", IX86_BUILTIN_CVTPS2PH, UNKNOWN, (int) V8HI_FTYPE_V4SF_INT },
25036 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtps2ph256, "__builtin_ia32_vcvtps2ph256", IX86_BUILTIN_CVTPS2PH256, UNKNOWN, (int) V8HI_FTYPE_V8SF_INT },
25039 /* FMA4 and XOP. */
25040 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
25041 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
25042 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
25043 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
25044 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
25045 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
25046 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
25047 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
25048 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
25049 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
25050 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
25051 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
25052 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
25053 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
25054 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
25055 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
25056 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
25057 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
25058 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
25059 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
25060 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
25061 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
25062 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
25063 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
25064 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
25065 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
25066 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
25067 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
25068 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
25069 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
25070 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
25071 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
25072 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
25073 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
25074 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
25075 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
25076 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
25077 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
25078 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
25079 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
25080 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
25081 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
25082 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
25083 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
25084 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
25085 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
25086 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
25087 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
25088 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
25089 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
25090 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
25091 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
25093 static const struct builtin_description bdesc_multi_arg[] =
25095 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmadd_v4sf,
25096 "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS,
25097 UNKNOWN, (int)MULTI_ARG_3_SF },
25098 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmadd_v2df,
25099 "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD,
25100 UNKNOWN, (int)MULTI_ARG_3_DF },
25102 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v4sf,
25103 "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS,
25104 UNKNOWN, (int)MULTI_ARG_3_SF },
25105 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v2df,
25106 "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD,
25107 UNKNOWN, (int)MULTI_ARG_3_DF },
25108 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v8sf,
25109 "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256,
25110 UNKNOWN, (int)MULTI_ARG_3_SF2 },
25111 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v4df,
25112 "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256,
25113 UNKNOWN, (int)MULTI_ARG_3_DF2 },
25115 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v4sf,
25116 "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS,
25117 UNKNOWN, (int)MULTI_ARG_3_SF },
25118 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v2df,
25119 "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD,
25120 UNKNOWN, (int)MULTI_ARG_3_DF },
25121 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v8sf,
25122 "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256,
25123 UNKNOWN, (int)MULTI_ARG_3_SF2 },
25124 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v4df,
25125 "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256,
25126 UNKNOWN, (int)MULTI_ARG_3_DF2 },
25128 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
25129 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
25130 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
25131 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
25132 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
25133 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
25134 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
25136 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
25137 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
25138 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
25139 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
25140 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
25141 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
25142 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
25144 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
25146 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
25147 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
25148 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25149 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25150 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
25151 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
25152 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25153 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25154 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25155 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25156 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25157 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25159 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
25160 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
25161 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
25162 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
25163 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
25164 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
25165 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
25166 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
25167 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
25168 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
25169 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
25170 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
25171 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
25172 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
25173 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
25174 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
25176 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
25177 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
25178 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
25179 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
25180 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
25181 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
25183 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
25184 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
25185 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
25186 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
25187 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
25188 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
25189 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
25190 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
25191 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
25192 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
25193 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
25194 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
25195 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
25196 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
25197 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
25199 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
25200 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
25201 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
25202 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
25203 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
25204 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
25205 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
25207 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
25208 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
25209 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
25210 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
25211 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
25212 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
25213 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
25215 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
25216 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
25217 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
25218 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
25219 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
25220 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
25221 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
25223 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
25224 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
25225 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
25226 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
25227 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
25228 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
25229 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
25231 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
25232 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
25233 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
25234 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
25235 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
25236 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
25237 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
25239 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
25240 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
25241 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
25242 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
25243 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
25244 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
25245 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
25247 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
25248 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
25249 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
25250 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
25251 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
25252 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
25253 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
25255 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
25256 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
25257 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
25258 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
25259 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
25260 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
25261 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
25263 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
25264 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
25265 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
25266 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
25267 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
25268 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
25269 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
25270 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
25272 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
25273 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
25274 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
25275 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
25276 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
25277 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
25278 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
25279 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
25281 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
25282 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
25283 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
25284 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
25288 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
25289 in the current target ISA to allow the user to compile particular modules
25290 with different target specific options that differ from the command line
25293 ix86_init_mmx_sse_builtins (void)
25295 const struct builtin_description * d;
25296 enum ix86_builtin_func_type ftype;
25299 /* Add all special builtins with variable number of operands. */
25300 for (i = 0, d = bdesc_special_args;
25301 i < ARRAY_SIZE (bdesc_special_args);
25307 ftype = (enum ix86_builtin_func_type) d->flag;
25308 def_builtin (d->mask, d->name, ftype, d->code);
25311 /* Add all builtins with variable number of operands. */
25312 for (i = 0, d = bdesc_args;
25313 i < ARRAY_SIZE (bdesc_args);
25319 ftype = (enum ix86_builtin_func_type) d->flag;
25320 def_builtin_const (d->mask, d->name, ftype, d->code);
25323 /* pcmpestr[im] insns. */
25324 for (i = 0, d = bdesc_pcmpestr;
25325 i < ARRAY_SIZE (bdesc_pcmpestr);
25328 if (d->code == IX86_BUILTIN_PCMPESTRM128)
25329 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
25331 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
25332 def_builtin_const (d->mask, d->name, ftype, d->code);
25335 /* pcmpistr[im] insns. */
25336 for (i = 0, d = bdesc_pcmpistr;
25337 i < ARRAY_SIZE (bdesc_pcmpistr);
25340 if (d->code == IX86_BUILTIN_PCMPISTRM128)
25341 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
25343 ftype = INT_FTYPE_V16QI_V16QI_INT;
25344 def_builtin_const (d->mask, d->name, ftype, d->code);
25347 /* comi/ucomi insns. */
25348 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
25350 if (d->mask == OPTION_MASK_ISA_SSE2)
25351 ftype = INT_FTYPE_V2DF_V2DF;
25353 ftype = INT_FTYPE_V4SF_V4SF;
25354 def_builtin_const (d->mask, d->name, ftype, d->code);
25358 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
25359 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
25360 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
25361 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
25363 /* SSE or 3DNow!A */
25364 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
25365 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
25366 IX86_BUILTIN_MASKMOVQ);
25369 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
25370 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
25372 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
25373 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
25374 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
25375 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
25378 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
25379 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
25380 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
25381 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
25384 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
25385 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
25386 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
25387 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
25388 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
25389 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
25390 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
25391 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
25392 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
25393 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
25394 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
25395 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
25398 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
25399 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
25401 /* MMX access to the vec_init patterns. */
25402 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
25403 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
25405 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
25406 V4HI_FTYPE_HI_HI_HI_HI,
25407 IX86_BUILTIN_VEC_INIT_V4HI);
25409 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
25410 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
25411 IX86_BUILTIN_VEC_INIT_V8QI);
25413 /* Access to the vec_extract patterns. */
25414 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
25415 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
25416 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
25417 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
25418 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
25419 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
25420 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
25421 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
25422 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
25423 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
25425 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
25426 "__builtin_ia32_vec_ext_v4hi",
25427 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
25429 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
25430 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
25432 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
25433 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
25435 /* Access to the vec_set patterns. */
25436 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
25437 "__builtin_ia32_vec_set_v2di",
25438 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
25440 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
25441 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
25443 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
25444 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
25446 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
25447 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
25449 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
25450 "__builtin_ia32_vec_set_v4hi",
25451 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
25453 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
25454 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
25456 /* Add FMA4 multi-arg argument instructions */
25457 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
25462 ftype = (enum ix86_builtin_func_type) d->flag;
25463 def_builtin_const (d->mask, d->name, ftype, d->code);
25467 /* Internal method for ix86_init_builtins. */
25470 ix86_init_builtins_va_builtins_abi (void)
25472 tree ms_va_ref, sysv_va_ref;
25473 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
25474 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
25475 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
25476 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
25480 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
25481 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
25482 ms_va_ref = build_reference_type (ms_va_list_type_node);
25484 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
25487 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
25488 fnvoid_va_start_ms =
25489 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
25490 fnvoid_va_end_sysv =
25491 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
25492 fnvoid_va_start_sysv =
25493 build_varargs_function_type_list (void_type_node, sysv_va_ref,
25495 fnvoid_va_copy_ms =
25496 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
25498 fnvoid_va_copy_sysv =
25499 build_function_type_list (void_type_node, sysv_va_ref,
25500 sysv_va_ref, NULL_TREE);
25502 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
25503 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
25504 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
25505 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
25506 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
25507 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
25508 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
25509 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
25510 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
25511 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
25512 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
25513 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
25517 ix86_init_builtin_types (void)
25519 tree float128_type_node, float80_type_node;
25521 /* The __float80 type. */
25522 float80_type_node = long_double_type_node;
25523 if (TYPE_MODE (float80_type_node) != XFmode)
25525 /* The __float80 type. */
25526 float80_type_node = make_node (REAL_TYPE);
25528 TYPE_PRECISION (float80_type_node) = 80;
25529 layout_type (float80_type_node);
25531 lang_hooks.types.register_builtin_type (float80_type_node, "__float80");
25533 /* The __float128 type. */
25534 float128_type_node = make_node (REAL_TYPE);
25535 TYPE_PRECISION (float128_type_node) = 128;
25536 layout_type (float128_type_node);
25537 lang_hooks.types.register_builtin_type (float128_type_node, "__float128");
25539 /* This macro is built by i386-builtin-types.awk. */
25540 DEFINE_BUILTIN_PRIMITIVE_TYPES;
25544 ix86_init_builtins (void)
25548 ix86_init_builtin_types ();
25550 /* TFmode support builtins. */
25551 def_builtin_const (0, "__builtin_infq",
25552 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
25553 def_builtin_const (0, "__builtin_huge_valq",
25554 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
25556 /* We will expand them to normal call if SSE2 isn't available since
25557 they are used by libgcc. */
25558 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
25559 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
25560 BUILT_IN_MD, "__fabstf2", NULL_TREE);
25561 TREE_READONLY (t) = 1;
25562 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
25564 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
25565 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
25566 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
25567 TREE_READONLY (t) = 1;
25568 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
25570 ix86_init_mmx_sse_builtins ();
25573 ix86_init_builtins_va_builtins_abi ();
25575 #ifdef SUBTARGET_INIT_BUILTINS
25576 SUBTARGET_INIT_BUILTINS;
25580 /* Return the ix86 builtin for CODE. */
25583 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
25585 if (code >= IX86_BUILTIN_MAX)
25586 return error_mark_node;
25588 return ix86_builtins[code];
25591 /* Errors in the source file can cause expand_expr to return const0_rtx
25592 where we expect a vector. To avoid crashing, use one of the vector
25593 clear instructions. */
25595 safe_vector_operand (rtx x, enum machine_mode mode)
25597 if (x == const0_rtx)
25598 x = CONST0_RTX (mode);
25602 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
25605 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
25608 tree arg0 = CALL_EXPR_ARG (exp, 0);
25609 tree arg1 = CALL_EXPR_ARG (exp, 1);
25610 rtx op0 = expand_normal (arg0);
25611 rtx op1 = expand_normal (arg1);
25612 enum machine_mode tmode = insn_data[icode].operand[0].mode;
25613 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
25614 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
25616 if (VECTOR_MODE_P (mode0))
25617 op0 = safe_vector_operand (op0, mode0);
25618 if (VECTOR_MODE_P (mode1))
25619 op1 = safe_vector_operand (op1, mode1);
25621 if (optimize || !target
25622 || GET_MODE (target) != tmode
25623 || !insn_data[icode].operand[0].predicate (target, tmode))
25624 target = gen_reg_rtx (tmode);
25626 if (GET_MODE (op1) == SImode && mode1 == TImode)
25628 rtx x = gen_reg_rtx (V4SImode);
25629 emit_insn (gen_sse2_loadd (x, op1));
25630 op1 = gen_lowpart (TImode, x);
25633 if (!insn_data[icode].operand[1].predicate (op0, mode0))
25634 op0 = copy_to_mode_reg (mode0, op0);
25635 if (!insn_data[icode].operand[2].predicate (op1, mode1))
25636 op1 = copy_to_mode_reg (mode1, op1);
25638 pat = GEN_FCN (icode) (target, op0, op1);
25647 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
25650 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
25651 enum ix86_builtin_func_type m_type,
25652 enum rtx_code sub_code)
25657 bool comparison_p = false;
25659 bool last_arg_constant = false;
25660 int num_memory = 0;
25663 enum machine_mode mode;
25666 enum machine_mode tmode = insn_data[icode].operand[0].mode;
25670 case MULTI_ARG_4_DF2_DI_I:
25671 case MULTI_ARG_4_DF2_DI_I1:
25672 case MULTI_ARG_4_SF2_SI_I:
25673 case MULTI_ARG_4_SF2_SI_I1:
25675 last_arg_constant = true;
25678 case MULTI_ARG_3_SF:
25679 case MULTI_ARG_3_DF:
25680 case MULTI_ARG_3_SF2:
25681 case MULTI_ARG_3_DF2:
25682 case MULTI_ARG_3_DI:
25683 case MULTI_ARG_3_SI:
25684 case MULTI_ARG_3_SI_DI:
25685 case MULTI_ARG_3_HI:
25686 case MULTI_ARG_3_HI_SI:
25687 case MULTI_ARG_3_QI:
25688 case MULTI_ARG_3_DI2:
25689 case MULTI_ARG_3_SI2:
25690 case MULTI_ARG_3_HI2:
25691 case MULTI_ARG_3_QI2:
25695 case MULTI_ARG_2_SF:
25696 case MULTI_ARG_2_DF:
25697 case MULTI_ARG_2_DI:
25698 case MULTI_ARG_2_SI:
25699 case MULTI_ARG_2_HI:
25700 case MULTI_ARG_2_QI:
25704 case MULTI_ARG_2_DI_IMM:
25705 case MULTI_ARG_2_SI_IMM:
25706 case MULTI_ARG_2_HI_IMM:
25707 case MULTI_ARG_2_QI_IMM:
25709 last_arg_constant = true;
25712 case MULTI_ARG_1_SF:
25713 case MULTI_ARG_1_DF:
25714 case MULTI_ARG_1_SF2:
25715 case MULTI_ARG_1_DF2:
25716 case MULTI_ARG_1_DI:
25717 case MULTI_ARG_1_SI:
25718 case MULTI_ARG_1_HI:
25719 case MULTI_ARG_1_QI:
25720 case MULTI_ARG_1_SI_DI:
25721 case MULTI_ARG_1_HI_DI:
25722 case MULTI_ARG_1_HI_SI:
25723 case MULTI_ARG_1_QI_DI:
25724 case MULTI_ARG_1_QI_SI:
25725 case MULTI_ARG_1_QI_HI:
25729 case MULTI_ARG_2_DI_CMP:
25730 case MULTI_ARG_2_SI_CMP:
25731 case MULTI_ARG_2_HI_CMP:
25732 case MULTI_ARG_2_QI_CMP:
25734 comparison_p = true;
25737 case MULTI_ARG_2_SF_TF:
25738 case MULTI_ARG_2_DF_TF:
25739 case MULTI_ARG_2_DI_TF:
25740 case MULTI_ARG_2_SI_TF:
25741 case MULTI_ARG_2_HI_TF:
25742 case MULTI_ARG_2_QI_TF:
25748 gcc_unreachable ();
25751 if (optimize || !target
25752 || GET_MODE (target) != tmode
25753 || !insn_data[icode].operand[0].predicate (target, tmode))
25754 target = gen_reg_rtx (tmode);
25756 gcc_assert (nargs <= 4);
25758 for (i = 0; i < nargs; i++)
25760 tree arg = CALL_EXPR_ARG (exp, i);
25761 rtx op = expand_normal (arg);
25762 int adjust = (comparison_p) ? 1 : 0;
25763 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
25765 if (last_arg_constant && i == nargs-1)
25767 if (!CONST_INT_P (op))
25769 error ("last argument must be an immediate");
25770 return gen_reg_rtx (tmode);
25775 if (VECTOR_MODE_P (mode))
25776 op = safe_vector_operand (op, mode);
25778 /* If we aren't optimizing, only allow one memory operand to be
25780 if (memory_operand (op, mode))
25783 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
25786 || !insn_data[icode].operand[i+adjust+1].predicate (op, mode)
25788 op = force_reg (mode, op);
25792 args[i].mode = mode;
25798 pat = GEN_FCN (icode) (target, args[0].op);
25803 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
25804 GEN_INT ((int)sub_code));
25805 else if (! comparison_p)
25806 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
25809 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
25813 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
25818 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
25822 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
25826 gcc_unreachable ();
25836 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
25837 insns with vec_merge. */
25840 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
25844 tree arg0 = CALL_EXPR_ARG (exp, 0);
25845 rtx op1, op0 = expand_normal (arg0);
25846 enum machine_mode tmode = insn_data[icode].operand[0].mode;
25847 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
25849 if (optimize || !target
25850 || GET_MODE (target) != tmode
25851 || !insn_data[icode].operand[0].predicate (target, tmode))
25852 target = gen_reg_rtx (tmode);
25854 if (VECTOR_MODE_P (mode0))
25855 op0 = safe_vector_operand (op0, mode0);
25857 if ((optimize && !register_operand (op0, mode0))
25858 || !insn_data[icode].operand[1].predicate (op0, mode0))
25859 op0 = copy_to_mode_reg (mode0, op0);
25862 if (!insn_data[icode].operand[2].predicate (op1, mode0))
25863 op1 = copy_to_mode_reg (mode0, op1);
25865 pat = GEN_FCN (icode) (target, op0, op1);
25872 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
25875 ix86_expand_sse_compare (const struct builtin_description *d,
25876 tree exp, rtx target, bool swap)
25879 tree arg0 = CALL_EXPR_ARG (exp, 0);
25880 tree arg1 = CALL_EXPR_ARG (exp, 1);
25881 rtx op0 = expand_normal (arg0);
25882 rtx op1 = expand_normal (arg1);
25884 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
25885 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
25886 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
25887 enum rtx_code comparison = d->comparison;
25889 if (VECTOR_MODE_P (mode0))
25890 op0 = safe_vector_operand (op0, mode0);
25891 if (VECTOR_MODE_P (mode1))
25892 op1 = safe_vector_operand (op1, mode1);
25894 /* Swap operands if we have a comparison that isn't available in
25898 rtx tmp = gen_reg_rtx (mode1);
25899 emit_move_insn (tmp, op1);
25904 if (optimize || !target
25905 || GET_MODE (target) != tmode
25906 || !insn_data[d->icode].operand[0].predicate (target, tmode))
25907 target = gen_reg_rtx (tmode);
25909 if ((optimize && !register_operand (op0, mode0))
25910 || !insn_data[d->icode].operand[1].predicate (op0, mode0))
25911 op0 = copy_to_mode_reg (mode0, op0);
25912 if ((optimize && !register_operand (op1, mode1))
25913 || !insn_data[d->icode].operand[2].predicate (op1, mode1))
25914 op1 = copy_to_mode_reg (mode1, op1);
25916 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
25917 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
25924 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
25927 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
25931 tree arg0 = CALL_EXPR_ARG (exp, 0);
25932 tree arg1 = CALL_EXPR_ARG (exp, 1);
25933 rtx op0 = expand_normal (arg0);
25934 rtx op1 = expand_normal (arg1);
25935 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
25936 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
25937 enum rtx_code comparison = d->comparison;
25939 if (VECTOR_MODE_P (mode0))
25940 op0 = safe_vector_operand (op0, mode0);
25941 if (VECTOR_MODE_P (mode1))
25942 op1 = safe_vector_operand (op1, mode1);
25944 /* Swap operands if we have a comparison that isn't available in
25946 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
25953 target = gen_reg_rtx (SImode);
25954 emit_move_insn (target, const0_rtx);
25955 target = gen_rtx_SUBREG (QImode, target, 0);
25957 if ((optimize && !register_operand (op0, mode0))
25958 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
25959 op0 = copy_to_mode_reg (mode0, op0);
25960 if ((optimize && !register_operand (op1, mode1))
25961 || !insn_data[d->icode].operand[1].predicate (op1, mode1))
25962 op1 = copy_to_mode_reg (mode1, op1);
25964 pat = GEN_FCN (d->icode) (op0, op1);
25968 emit_insn (gen_rtx_SET (VOIDmode,
25969 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
25970 gen_rtx_fmt_ee (comparison, QImode,
25974 return SUBREG_REG (target);
25977 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
25980 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
25984 tree arg0 = CALL_EXPR_ARG (exp, 0);
25985 tree arg1 = CALL_EXPR_ARG (exp, 1);
25986 rtx op0 = expand_normal (arg0);
25987 rtx op1 = expand_normal (arg1);
25988 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
25989 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
25990 enum rtx_code comparison = d->comparison;
25992 if (VECTOR_MODE_P (mode0))
25993 op0 = safe_vector_operand (op0, mode0);
25994 if (VECTOR_MODE_P (mode1))
25995 op1 = safe_vector_operand (op1, mode1);
25997 target = gen_reg_rtx (SImode);
25998 emit_move_insn (target, const0_rtx);
25999 target = gen_rtx_SUBREG (QImode, target, 0);
26001 if ((optimize && !register_operand (op0, mode0))
26002 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
26003 op0 = copy_to_mode_reg (mode0, op0);
26004 if ((optimize && !register_operand (op1, mode1))
26005 || !insn_data[d->icode].operand[1].predicate (op1, mode1))
26006 op1 = copy_to_mode_reg (mode1, op1);
26008 pat = GEN_FCN (d->icode) (op0, op1);
26012 emit_insn (gen_rtx_SET (VOIDmode,
26013 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26014 gen_rtx_fmt_ee (comparison, QImode,
26018 return SUBREG_REG (target);
26021 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
26024 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
26025 tree exp, rtx target)
26028 tree arg0 = CALL_EXPR_ARG (exp, 0);
26029 tree arg1 = CALL_EXPR_ARG (exp, 1);
26030 tree arg2 = CALL_EXPR_ARG (exp, 2);
26031 tree arg3 = CALL_EXPR_ARG (exp, 3);
26032 tree arg4 = CALL_EXPR_ARG (exp, 4);
26033 rtx scratch0, scratch1;
26034 rtx op0 = expand_normal (arg0);
26035 rtx op1 = expand_normal (arg1);
26036 rtx op2 = expand_normal (arg2);
26037 rtx op3 = expand_normal (arg3);
26038 rtx op4 = expand_normal (arg4);
26039 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
26041 tmode0 = insn_data[d->icode].operand[0].mode;
26042 tmode1 = insn_data[d->icode].operand[1].mode;
26043 modev2 = insn_data[d->icode].operand[2].mode;
26044 modei3 = insn_data[d->icode].operand[3].mode;
26045 modev4 = insn_data[d->icode].operand[4].mode;
26046 modei5 = insn_data[d->icode].operand[5].mode;
26047 modeimm = insn_data[d->icode].operand[6].mode;
26049 if (VECTOR_MODE_P (modev2))
26050 op0 = safe_vector_operand (op0, modev2);
26051 if (VECTOR_MODE_P (modev4))
26052 op2 = safe_vector_operand (op2, modev4);
26054 if (!insn_data[d->icode].operand[2].predicate (op0, modev2))
26055 op0 = copy_to_mode_reg (modev2, op0);
26056 if (!insn_data[d->icode].operand[3].predicate (op1, modei3))
26057 op1 = copy_to_mode_reg (modei3, op1);
26058 if ((optimize && !register_operand (op2, modev4))
26059 || !insn_data[d->icode].operand[4].predicate (op2, modev4))
26060 op2 = copy_to_mode_reg (modev4, op2);
26061 if (!insn_data[d->icode].operand[5].predicate (op3, modei5))
26062 op3 = copy_to_mode_reg (modei5, op3);
26064 if (!insn_data[d->icode].operand[6].predicate (op4, modeimm))
26066 error ("the fifth argument must be a 8-bit immediate");
26070 if (d->code == IX86_BUILTIN_PCMPESTRI128)
26072 if (optimize || !target
26073 || GET_MODE (target) != tmode0
26074 || !insn_data[d->icode].operand[0].predicate (target, tmode0))
26075 target = gen_reg_rtx (tmode0);
26077 scratch1 = gen_reg_rtx (tmode1);
26079 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
26081 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
26083 if (optimize || !target
26084 || GET_MODE (target) != tmode1
26085 || !insn_data[d->icode].operand[1].predicate (target, tmode1))
26086 target = gen_reg_rtx (tmode1);
26088 scratch0 = gen_reg_rtx (tmode0);
26090 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
26094 gcc_assert (d->flag);
26096 scratch0 = gen_reg_rtx (tmode0);
26097 scratch1 = gen_reg_rtx (tmode1);
26099 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
26109 target = gen_reg_rtx (SImode);
26110 emit_move_insn (target, const0_rtx);
26111 target = gen_rtx_SUBREG (QImode, target, 0);
26114 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26115 gen_rtx_fmt_ee (EQ, QImode,
26116 gen_rtx_REG ((enum machine_mode) d->flag,
26119 return SUBREG_REG (target);
26126 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
26129 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
26130 tree exp, rtx target)
26133 tree arg0 = CALL_EXPR_ARG (exp, 0);
26134 tree arg1 = CALL_EXPR_ARG (exp, 1);
26135 tree arg2 = CALL_EXPR_ARG (exp, 2);
26136 rtx scratch0, scratch1;
26137 rtx op0 = expand_normal (arg0);
26138 rtx op1 = expand_normal (arg1);
26139 rtx op2 = expand_normal (arg2);
26140 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
26142 tmode0 = insn_data[d->icode].operand[0].mode;
26143 tmode1 = insn_data[d->icode].operand[1].mode;
26144 modev2 = insn_data[d->icode].operand[2].mode;
26145 modev3 = insn_data[d->icode].operand[3].mode;
26146 modeimm = insn_data[d->icode].operand[4].mode;
26148 if (VECTOR_MODE_P (modev2))
26149 op0 = safe_vector_operand (op0, modev2);
26150 if (VECTOR_MODE_P (modev3))
26151 op1 = safe_vector_operand (op1, modev3);
26153 if (!insn_data[d->icode].operand[2].predicate (op0, modev2))
26154 op0 = copy_to_mode_reg (modev2, op0);
26155 if ((optimize && !register_operand (op1, modev3))
26156 || !insn_data[d->icode].operand[3].predicate (op1, modev3))
26157 op1 = copy_to_mode_reg (modev3, op1);
26159 if (!insn_data[d->icode].operand[4].predicate (op2, modeimm))
26161 error ("the third argument must be a 8-bit immediate");
26165 if (d->code == IX86_BUILTIN_PCMPISTRI128)
26167 if (optimize || !target
26168 || GET_MODE (target) != tmode0
26169 || !insn_data[d->icode].operand[0].predicate (target, tmode0))
26170 target = gen_reg_rtx (tmode0);
26172 scratch1 = gen_reg_rtx (tmode1);
26174 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
26176 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
26178 if (optimize || !target
26179 || GET_MODE (target) != tmode1
26180 || !insn_data[d->icode].operand[1].predicate (target, tmode1))
26181 target = gen_reg_rtx (tmode1);
26183 scratch0 = gen_reg_rtx (tmode0);
26185 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
26189 gcc_assert (d->flag);
26191 scratch0 = gen_reg_rtx (tmode0);
26192 scratch1 = gen_reg_rtx (tmode1);
26194 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
26204 target = gen_reg_rtx (SImode);
26205 emit_move_insn (target, const0_rtx);
26206 target = gen_rtx_SUBREG (QImode, target, 0);
26209 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26210 gen_rtx_fmt_ee (EQ, QImode,
26211 gen_rtx_REG ((enum machine_mode) d->flag,
26214 return SUBREG_REG (target);
26220 /* Subroutine of ix86_expand_builtin to take care of insns with
26221 variable number of operands. */
26224 ix86_expand_args_builtin (const struct builtin_description *d,
26225 tree exp, rtx target)
26227 rtx pat, real_target;
26228 unsigned int i, nargs;
26229 unsigned int nargs_constant = 0;
26230 int num_memory = 0;
26234 enum machine_mode mode;
26236 bool last_arg_count = false;
26237 enum insn_code icode = d->icode;
26238 const struct insn_data_d *insn_p = &insn_data[icode];
26239 enum machine_mode tmode = insn_p->operand[0].mode;
26240 enum machine_mode rmode = VOIDmode;
26242 enum rtx_code comparison = d->comparison;
26244 switch ((enum ix86_builtin_func_type) d->flag)
26246 case INT_FTYPE_V8SF_V8SF_PTEST:
26247 case INT_FTYPE_V4DI_V4DI_PTEST:
26248 case INT_FTYPE_V4DF_V4DF_PTEST:
26249 case INT_FTYPE_V4SF_V4SF_PTEST:
26250 case INT_FTYPE_V2DI_V2DI_PTEST:
26251 case INT_FTYPE_V2DF_V2DF_PTEST:
26252 return ix86_expand_sse_ptest (d, exp, target);
26253 case FLOAT128_FTYPE_FLOAT128:
26254 case FLOAT_FTYPE_FLOAT:
26255 case INT_FTYPE_INT:
26256 case UINT64_FTYPE_INT:
26257 case UINT16_FTYPE_UINT16:
26258 case INT64_FTYPE_INT64:
26259 case INT64_FTYPE_V4SF:
26260 case INT64_FTYPE_V2DF:
26261 case INT_FTYPE_V16QI:
26262 case INT_FTYPE_V8QI:
26263 case INT_FTYPE_V8SF:
26264 case INT_FTYPE_V4DF:
26265 case INT_FTYPE_V4SF:
26266 case INT_FTYPE_V2DF:
26267 case V16QI_FTYPE_V16QI:
26268 case V8SI_FTYPE_V8SF:
26269 case V8SI_FTYPE_V4SI:
26270 case V8HI_FTYPE_V8HI:
26271 case V8HI_FTYPE_V16QI:
26272 case V8QI_FTYPE_V8QI:
26273 case V8SF_FTYPE_V8SF:
26274 case V8SF_FTYPE_V8SI:
26275 case V8SF_FTYPE_V4SF:
26276 case V8SF_FTYPE_V8HI:
26277 case V4SI_FTYPE_V4SI:
26278 case V4SI_FTYPE_V16QI:
26279 case V4SI_FTYPE_V4SF:
26280 case V4SI_FTYPE_V8SI:
26281 case V4SI_FTYPE_V8HI:
26282 case V4SI_FTYPE_V4DF:
26283 case V4SI_FTYPE_V2DF:
26284 case V4HI_FTYPE_V4HI:
26285 case V4DF_FTYPE_V4DF:
26286 case V4DF_FTYPE_V4SI:
26287 case V4DF_FTYPE_V4SF:
26288 case V4DF_FTYPE_V2DF:
26289 case V4SF_FTYPE_V4SF:
26290 case V4SF_FTYPE_V4SI:
26291 case V4SF_FTYPE_V8SF:
26292 case V4SF_FTYPE_V4DF:
26293 case V4SF_FTYPE_V8HI:
26294 case V4SF_FTYPE_V2DF:
26295 case V2DI_FTYPE_V2DI:
26296 case V2DI_FTYPE_V16QI:
26297 case V2DI_FTYPE_V8HI:
26298 case V2DI_FTYPE_V4SI:
26299 case V2DF_FTYPE_V2DF:
26300 case V2DF_FTYPE_V4SI:
26301 case V2DF_FTYPE_V4DF:
26302 case V2DF_FTYPE_V4SF:
26303 case V2DF_FTYPE_V2SI:
26304 case V2SI_FTYPE_V2SI:
26305 case V2SI_FTYPE_V4SF:
26306 case V2SI_FTYPE_V2SF:
26307 case V2SI_FTYPE_V2DF:
26308 case V2SF_FTYPE_V2SF:
26309 case V2SF_FTYPE_V2SI:
26312 case V4SF_FTYPE_V4SF_VEC_MERGE:
26313 case V2DF_FTYPE_V2DF_VEC_MERGE:
26314 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
26315 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
26316 case V16QI_FTYPE_V16QI_V16QI:
26317 case V16QI_FTYPE_V8HI_V8HI:
26318 case V8QI_FTYPE_V8QI_V8QI:
26319 case V8QI_FTYPE_V4HI_V4HI:
26320 case V8HI_FTYPE_V8HI_V8HI:
26321 case V8HI_FTYPE_V16QI_V16QI:
26322 case V8HI_FTYPE_V4SI_V4SI:
26323 case V8SF_FTYPE_V8SF_V8SF:
26324 case V8SF_FTYPE_V8SF_V8SI:
26325 case V4SI_FTYPE_V4SI_V4SI:
26326 case V4SI_FTYPE_V8HI_V8HI:
26327 case V4SI_FTYPE_V4SF_V4SF:
26328 case V4SI_FTYPE_V2DF_V2DF:
26329 case V4HI_FTYPE_V4HI_V4HI:
26330 case V4HI_FTYPE_V8QI_V8QI:
26331 case V4HI_FTYPE_V2SI_V2SI:
26332 case V4DF_FTYPE_V4DF_V4DF:
26333 case V4DF_FTYPE_V4DF_V4DI:
26334 case V4SF_FTYPE_V4SF_V4SF:
26335 case V4SF_FTYPE_V4SF_V4SI:
26336 case V4SF_FTYPE_V4SF_V2SI:
26337 case V4SF_FTYPE_V4SF_V2DF:
26338 case V4SF_FTYPE_V4SF_DI:
26339 case V4SF_FTYPE_V4SF_SI:
26340 case V2DI_FTYPE_V2DI_V2DI:
26341 case V2DI_FTYPE_V16QI_V16QI:
26342 case V2DI_FTYPE_V4SI_V4SI:
26343 case V2DI_FTYPE_V2DI_V16QI:
26344 case V2DI_FTYPE_V2DF_V2DF:
26345 case V2SI_FTYPE_V2SI_V2SI:
26346 case V2SI_FTYPE_V4HI_V4HI:
26347 case V2SI_FTYPE_V2SF_V2SF:
26348 case V2DF_FTYPE_V2DF_V2DF:
26349 case V2DF_FTYPE_V2DF_V4SF:
26350 case V2DF_FTYPE_V2DF_V2DI:
26351 case V2DF_FTYPE_V2DF_DI:
26352 case V2DF_FTYPE_V2DF_SI:
26353 case V2SF_FTYPE_V2SF_V2SF:
26354 case V1DI_FTYPE_V1DI_V1DI:
26355 case V1DI_FTYPE_V8QI_V8QI:
26356 case V1DI_FTYPE_V2SI_V2SI:
26357 if (comparison == UNKNOWN)
26358 return ix86_expand_binop_builtin (icode, exp, target);
26361 case V4SF_FTYPE_V4SF_V4SF_SWAP:
26362 case V2DF_FTYPE_V2DF_V2DF_SWAP:
26363 gcc_assert (comparison != UNKNOWN);
26367 case V8HI_FTYPE_V8HI_V8HI_COUNT:
26368 case V8HI_FTYPE_V8HI_SI_COUNT:
26369 case V4SI_FTYPE_V4SI_V4SI_COUNT:
26370 case V4SI_FTYPE_V4SI_SI_COUNT:
26371 case V4HI_FTYPE_V4HI_V4HI_COUNT:
26372 case V4HI_FTYPE_V4HI_SI_COUNT:
26373 case V2DI_FTYPE_V2DI_V2DI_COUNT:
26374 case V2DI_FTYPE_V2DI_SI_COUNT:
26375 case V2SI_FTYPE_V2SI_V2SI_COUNT:
26376 case V2SI_FTYPE_V2SI_SI_COUNT:
26377 case V1DI_FTYPE_V1DI_V1DI_COUNT:
26378 case V1DI_FTYPE_V1DI_SI_COUNT:
26380 last_arg_count = true;
26382 case UINT64_FTYPE_UINT64_UINT64:
26383 case UINT_FTYPE_UINT_UINT:
26384 case UINT_FTYPE_UINT_USHORT:
26385 case UINT_FTYPE_UINT_UCHAR:
26386 case UINT16_FTYPE_UINT16_INT:
26387 case UINT8_FTYPE_UINT8_INT:
26390 case V2DI_FTYPE_V2DI_INT_CONVERT:
26393 nargs_constant = 1;
26395 case V8HI_FTYPE_V8HI_INT:
26396 case V8HI_FTYPE_V8SF_INT:
26397 case V8HI_FTYPE_V4SF_INT:
26398 case V8SF_FTYPE_V8SF_INT:
26399 case V4SI_FTYPE_V4SI_INT:
26400 case V4SI_FTYPE_V8SI_INT:
26401 case V4HI_FTYPE_V4HI_INT:
26402 case V4DF_FTYPE_V4DF_INT:
26403 case V4SF_FTYPE_V4SF_INT:
26404 case V4SF_FTYPE_V8SF_INT:
26405 case V2DI_FTYPE_V2DI_INT:
26406 case V2DF_FTYPE_V2DF_INT:
26407 case V2DF_FTYPE_V4DF_INT:
26409 nargs_constant = 1;
26411 case V16QI_FTYPE_V16QI_V16QI_V16QI:
26412 case V8SF_FTYPE_V8SF_V8SF_V8SF:
26413 case V4DF_FTYPE_V4DF_V4DF_V4DF:
26414 case V4SF_FTYPE_V4SF_V4SF_V4SF:
26415 case V2DF_FTYPE_V2DF_V2DF_V2DF:
26418 case V16QI_FTYPE_V16QI_V16QI_INT:
26419 case V8HI_FTYPE_V8HI_V8HI_INT:
26420 case V8SI_FTYPE_V8SI_V8SI_INT:
26421 case V8SI_FTYPE_V8SI_V4SI_INT:
26422 case V8SF_FTYPE_V8SF_V8SF_INT:
26423 case V8SF_FTYPE_V8SF_V4SF_INT:
26424 case V4SI_FTYPE_V4SI_V4SI_INT:
26425 case V4DF_FTYPE_V4DF_V4DF_INT:
26426 case V4DF_FTYPE_V4DF_V2DF_INT:
26427 case V4SF_FTYPE_V4SF_V4SF_INT:
26428 case V2DI_FTYPE_V2DI_V2DI_INT:
26429 case V2DF_FTYPE_V2DF_V2DF_INT:
26431 nargs_constant = 1;
26433 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
26436 nargs_constant = 1;
26438 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
26441 nargs_constant = 1;
26443 case V2DI_FTYPE_V2DI_UINT_UINT:
26445 nargs_constant = 2;
26447 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
26448 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
26449 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
26450 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
26452 nargs_constant = 1;
26454 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
26456 nargs_constant = 2;
26459 gcc_unreachable ();
26462 gcc_assert (nargs <= ARRAY_SIZE (args));
26464 if (comparison != UNKNOWN)
26466 gcc_assert (nargs == 2);
26467 return ix86_expand_sse_compare (d, exp, target, swap);
26470 if (rmode == VOIDmode || rmode == tmode)
26474 || GET_MODE (target) != tmode
26475 || !insn_p->operand[0].predicate (target, tmode))
26476 target = gen_reg_rtx (tmode);
26477 real_target = target;
26481 target = gen_reg_rtx (rmode);
26482 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
26485 for (i = 0; i < nargs; i++)
26487 tree arg = CALL_EXPR_ARG (exp, i);
26488 rtx op = expand_normal (arg);
26489 enum machine_mode mode = insn_p->operand[i + 1].mode;
26490 bool match = insn_p->operand[i + 1].predicate (op, mode);
26492 if (last_arg_count && (i + 1) == nargs)
26494 /* SIMD shift insns take either an 8-bit immediate or
26495 register as count. But builtin functions take int as
26496 count. If count doesn't match, we put it in register. */
26499 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
26500 if (!insn_p->operand[i + 1].predicate (op, mode))
26501 op = copy_to_reg (op);
26504 else if ((nargs - i) <= nargs_constant)
26509 case CODE_FOR_sse4_1_roundpd:
26510 case CODE_FOR_sse4_1_roundps:
26511 case CODE_FOR_sse4_1_roundsd:
26512 case CODE_FOR_sse4_1_roundss:
26513 case CODE_FOR_sse4_1_blendps:
26514 case CODE_FOR_avx_blendpd256:
26515 case CODE_FOR_avx_vpermilv4df:
26516 case CODE_FOR_avx_roundpd256:
26517 case CODE_FOR_avx_roundps256:
26518 error ("the last argument must be a 4-bit immediate");
26521 case CODE_FOR_sse4_1_blendpd:
26522 case CODE_FOR_avx_vpermilv2df:
26523 case CODE_FOR_xop_vpermil2v2df3:
26524 case CODE_FOR_xop_vpermil2v4sf3:
26525 case CODE_FOR_xop_vpermil2v4df3:
26526 case CODE_FOR_xop_vpermil2v8sf3:
26527 error ("the last argument must be a 2-bit immediate");
26530 case CODE_FOR_avx_vextractf128v4df:
26531 case CODE_FOR_avx_vextractf128v8sf:
26532 case CODE_FOR_avx_vextractf128v8si:
26533 case CODE_FOR_avx_vinsertf128v4df:
26534 case CODE_FOR_avx_vinsertf128v8sf:
26535 case CODE_FOR_avx_vinsertf128v8si:
26536 error ("the last argument must be a 1-bit immediate");
26539 case CODE_FOR_avx_cmpsdv2df3:
26540 case CODE_FOR_avx_cmpssv4sf3:
26541 case CODE_FOR_avx_cmppdv2df3:
26542 case CODE_FOR_avx_cmppsv4sf3:
26543 case CODE_FOR_avx_cmppdv4df3:
26544 case CODE_FOR_avx_cmppsv8sf3:
26545 error ("the last argument must be a 5-bit immediate");
26549 switch (nargs_constant)
26552 if ((nargs - i) == nargs_constant)
26554 error ("the next to last argument must be an 8-bit immediate");
26558 error ("the last argument must be an 8-bit immediate");
26561 gcc_unreachable ();
26568 if (VECTOR_MODE_P (mode))
26569 op = safe_vector_operand (op, mode);
26571 /* If we aren't optimizing, only allow one memory operand to
26573 if (memory_operand (op, mode))
26576 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
26578 if (optimize || !match || num_memory > 1)
26579 op = copy_to_mode_reg (mode, op);
26583 op = copy_to_reg (op);
26584 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
26589 args[i].mode = mode;
26595 pat = GEN_FCN (icode) (real_target, args[0].op);
26598 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
26601 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
26605 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
26606 args[2].op, args[3].op);
26609 gcc_unreachable ();
26619 /* Subroutine of ix86_expand_builtin to take care of special insns
26620 with variable number of operands. */
26623 ix86_expand_special_args_builtin (const struct builtin_description *d,
26624 tree exp, rtx target)
26628 unsigned int i, nargs, arg_adjust, memory;
26632 enum machine_mode mode;
26634 enum insn_code icode = d->icode;
26635 bool last_arg_constant = false;
26636 const struct insn_data_d *insn_p = &insn_data[icode];
26637 enum machine_mode tmode = insn_p->operand[0].mode;
26638 enum { load, store } klass;
26640 switch ((enum ix86_builtin_func_type) d->flag)
26642 case VOID_FTYPE_VOID:
26643 if (icode == CODE_FOR_avx_vzeroupper)
26644 target = GEN_INT (vzeroupper_intrinsic);
26645 emit_insn (GEN_FCN (icode) (target));
26647 case VOID_FTYPE_UINT64:
26648 case VOID_FTYPE_UNSIGNED:
26654 case UINT64_FTYPE_VOID:
26655 case UNSIGNED_FTYPE_VOID:
26656 case UINT16_FTYPE_VOID:
26661 case UINT64_FTYPE_PUNSIGNED:
26662 case V2DI_FTYPE_PV2DI:
26663 case V32QI_FTYPE_PCCHAR:
26664 case V16QI_FTYPE_PCCHAR:
26665 case V8SF_FTYPE_PCV4SF:
26666 case V8SF_FTYPE_PCFLOAT:
26667 case V4SF_FTYPE_PCFLOAT:
26668 case V4DF_FTYPE_PCV2DF:
26669 case V4DF_FTYPE_PCDOUBLE:
26670 case V2DF_FTYPE_PCDOUBLE:
26671 case VOID_FTYPE_PVOID:
26676 case VOID_FTYPE_PV2SF_V4SF:
26677 case VOID_FTYPE_PV4DI_V4DI:
26678 case VOID_FTYPE_PV2DI_V2DI:
26679 case VOID_FTYPE_PCHAR_V32QI:
26680 case VOID_FTYPE_PCHAR_V16QI:
26681 case VOID_FTYPE_PFLOAT_V8SF:
26682 case VOID_FTYPE_PFLOAT_V4SF:
26683 case VOID_FTYPE_PDOUBLE_V4DF:
26684 case VOID_FTYPE_PDOUBLE_V2DF:
26685 case VOID_FTYPE_PULONGLONG_ULONGLONG:
26686 case VOID_FTYPE_PINT_INT:
26689 /* Reserve memory operand for target. */
26690 memory = ARRAY_SIZE (args);
26692 case V4SF_FTYPE_V4SF_PCV2SF:
26693 case V2DF_FTYPE_V2DF_PCDOUBLE:
26698 case V8SF_FTYPE_PCV8SF_V8SF:
26699 case V4DF_FTYPE_PCV4DF_V4DF:
26700 case V4SF_FTYPE_PCV4SF_V4SF:
26701 case V2DF_FTYPE_PCV2DF_V2DF:
26706 case VOID_FTYPE_PV8SF_V8SF_V8SF:
26707 case VOID_FTYPE_PV4DF_V4DF_V4DF:
26708 case VOID_FTYPE_PV4SF_V4SF_V4SF:
26709 case VOID_FTYPE_PV2DF_V2DF_V2DF:
26712 /* Reserve memory operand for target. */
26713 memory = ARRAY_SIZE (args);
26715 case VOID_FTYPE_UINT_UINT_UINT:
26716 case VOID_FTYPE_UINT64_UINT_UINT:
26717 case UCHAR_FTYPE_UINT_UINT_UINT:
26718 case UCHAR_FTYPE_UINT64_UINT_UINT:
26721 memory = ARRAY_SIZE (args);
26722 last_arg_constant = true;
26725 gcc_unreachable ();
26728 gcc_assert (nargs <= ARRAY_SIZE (args));
26730 if (klass == store)
26732 arg = CALL_EXPR_ARG (exp, 0);
26733 op = expand_normal (arg);
26734 gcc_assert (target == 0);
26736 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
26738 target = force_reg (tmode, op);
26746 || GET_MODE (target) != tmode
26747 || !insn_p->operand[0].predicate (target, tmode))
26748 target = gen_reg_rtx (tmode);
26751 for (i = 0; i < nargs; i++)
26753 enum machine_mode mode = insn_p->operand[i + 1].mode;
26756 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
26757 op = expand_normal (arg);
26758 match = insn_p->operand[i + 1].predicate (op, mode);
26760 if (last_arg_constant && (i + 1) == nargs)
26764 if (icode == CODE_FOR_lwp_lwpvalsi3
26765 || icode == CODE_FOR_lwp_lwpinssi3
26766 || icode == CODE_FOR_lwp_lwpvaldi3
26767 || icode == CODE_FOR_lwp_lwpinsdi3)
26768 error ("the last argument must be a 32-bit immediate");
26770 error ("the last argument must be an 8-bit immediate");
26778 /* This must be the memory operand. */
26779 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
26780 gcc_assert (GET_MODE (op) == mode
26781 || GET_MODE (op) == VOIDmode);
26785 /* This must be register. */
26786 if (VECTOR_MODE_P (mode))
26787 op = safe_vector_operand (op, mode);
26789 gcc_assert (GET_MODE (op) == mode
26790 || GET_MODE (op) == VOIDmode);
26791 op = copy_to_mode_reg (mode, op);
26796 args[i].mode = mode;
26802 pat = GEN_FCN (icode) (target);
26805 pat = GEN_FCN (icode) (target, args[0].op);
26808 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
26811 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
26814 gcc_unreachable ();
26820 return klass == store ? 0 : target;
26823 /* Return the integer constant in ARG. Constrain it to be in the range
26824 of the subparts of VEC_TYPE; issue an error if not. */
26827 get_element_number (tree vec_type, tree arg)
26829 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
26831 if (!host_integerp (arg, 1)
26832 || (elt = tree_low_cst (arg, 1), elt > max))
26834 error ("selector must be an integer constant in the range 0..%wi", max);
26841 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
26842 ix86_expand_vector_init. We DO have language-level syntax for this, in
26843 the form of (type){ init-list }. Except that since we can't place emms
26844 instructions from inside the compiler, we can't allow the use of MMX
26845 registers unless the user explicitly asks for it. So we do *not* define
26846 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
26847 we have builtins invoked by mmintrin.h that gives us license to emit
26848 these sorts of instructions. */
26851 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
26853 enum machine_mode tmode = TYPE_MODE (type);
26854 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
26855 int i, n_elt = GET_MODE_NUNITS (tmode);
26856 rtvec v = rtvec_alloc (n_elt);
26858 gcc_assert (VECTOR_MODE_P (tmode));
26859 gcc_assert (call_expr_nargs (exp) == n_elt);
26861 for (i = 0; i < n_elt; ++i)
26863 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
26864 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
26867 if (!target || !register_operand (target, tmode))
26868 target = gen_reg_rtx (tmode);
26870 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
26874 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
26875 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
26876 had a language-level syntax for referencing vector elements. */
26879 ix86_expand_vec_ext_builtin (tree exp, rtx target)
26881 enum machine_mode tmode, mode0;
26886 arg0 = CALL_EXPR_ARG (exp, 0);
26887 arg1 = CALL_EXPR_ARG (exp, 1);
26889 op0 = expand_normal (arg0);
26890 elt = get_element_number (TREE_TYPE (arg0), arg1);
26892 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
26893 mode0 = TYPE_MODE (TREE_TYPE (arg0));
26894 gcc_assert (VECTOR_MODE_P (mode0));
26896 op0 = force_reg (mode0, op0);
26898 if (optimize || !target || !register_operand (target, tmode))
26899 target = gen_reg_rtx (tmode);
26901 ix86_expand_vector_extract (true, target, op0, elt);
26906 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
26907 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
26908 a language-level syntax for referencing vector elements. */
26911 ix86_expand_vec_set_builtin (tree exp)
26913 enum machine_mode tmode, mode1;
26914 tree arg0, arg1, arg2;
26916 rtx op0, op1, target;
26918 arg0 = CALL_EXPR_ARG (exp, 0);
26919 arg1 = CALL_EXPR_ARG (exp, 1);
26920 arg2 = CALL_EXPR_ARG (exp, 2);
26922 tmode = TYPE_MODE (TREE_TYPE (arg0));
26923 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
26924 gcc_assert (VECTOR_MODE_P (tmode));
26926 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
26927 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
26928 elt = get_element_number (TREE_TYPE (arg0), arg2);
26930 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
26931 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
26933 op0 = force_reg (tmode, op0);
26934 op1 = force_reg (mode1, op1);
26936 /* OP0 is the source of these builtin functions and shouldn't be
26937 modified. Create a copy, use it and return it as target. */
26938 target = gen_reg_rtx (tmode);
26939 emit_move_insn (target, op0);
26940 ix86_expand_vector_set (true, target, op1, elt);
26945 /* Expand an expression EXP that calls a built-in function,
26946 with result going to TARGET if that's convenient
26947 (and in mode MODE if that's convenient).
26948 SUBTARGET may be used as the target for computing one of EXP's operands.
26949 IGNORE is nonzero if the value is to be ignored. */
26952 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
26953 enum machine_mode mode ATTRIBUTE_UNUSED,
26954 int ignore ATTRIBUTE_UNUSED)
26956 const struct builtin_description *d;
26958 enum insn_code icode;
26959 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
26960 tree arg0, arg1, arg2;
26961 rtx op0, op1, op2, pat;
26962 enum machine_mode mode0, mode1, mode2;
26963 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
26965 /* Determine whether the builtin function is available under the current ISA.
26966 Originally the builtin was not created if it wasn't applicable to the
26967 current ISA based on the command line switches. With function specific
26968 options, we need to check in the context of the function making the call
26969 whether it is supported. */
26970 if (ix86_builtins_isa[fcode].isa
26971 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
26973 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
26974 NULL, NULL, false);
26977 error ("%qE needs unknown isa option", fndecl);
26980 gcc_assert (opts != NULL);
26981 error ("%qE needs isa option %s", fndecl, opts);
26989 case IX86_BUILTIN_MASKMOVQ:
26990 case IX86_BUILTIN_MASKMOVDQU:
26991 icode = (fcode == IX86_BUILTIN_MASKMOVQ
26992 ? CODE_FOR_mmx_maskmovq
26993 : CODE_FOR_sse2_maskmovdqu);
26994 /* Note the arg order is different from the operand order. */
26995 arg1 = CALL_EXPR_ARG (exp, 0);
26996 arg2 = CALL_EXPR_ARG (exp, 1);
26997 arg0 = CALL_EXPR_ARG (exp, 2);
26998 op0 = expand_normal (arg0);
26999 op1 = expand_normal (arg1);
27000 op2 = expand_normal (arg2);
27001 mode0 = insn_data[icode].operand[0].mode;
27002 mode1 = insn_data[icode].operand[1].mode;
27003 mode2 = insn_data[icode].operand[2].mode;
27005 op0 = force_reg (Pmode, op0);
27006 op0 = gen_rtx_MEM (mode1, op0);
27008 if (!insn_data[icode].operand[0].predicate (op0, mode0))
27009 op0 = copy_to_mode_reg (mode0, op0);
27010 if (!insn_data[icode].operand[1].predicate (op1, mode1))
27011 op1 = copy_to_mode_reg (mode1, op1);
27012 if (!insn_data[icode].operand[2].predicate (op2, mode2))
27013 op2 = copy_to_mode_reg (mode2, op2);
27014 pat = GEN_FCN (icode) (op0, op1, op2);
27020 case IX86_BUILTIN_LDMXCSR:
27021 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
27022 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
27023 emit_move_insn (target, op0);
27024 emit_insn (gen_sse_ldmxcsr (target));
27027 case IX86_BUILTIN_STMXCSR:
27028 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
27029 emit_insn (gen_sse_stmxcsr (target));
27030 return copy_to_mode_reg (SImode, target);
27032 case IX86_BUILTIN_CLFLUSH:
27033 arg0 = CALL_EXPR_ARG (exp, 0);
27034 op0 = expand_normal (arg0);
27035 icode = CODE_FOR_sse2_clflush;
27036 if (!insn_data[icode].operand[0].predicate (op0, Pmode))
27037 op0 = copy_to_mode_reg (Pmode, op0);
27039 emit_insn (gen_sse2_clflush (op0));
27042 case IX86_BUILTIN_MONITOR:
27043 arg0 = CALL_EXPR_ARG (exp, 0);
27044 arg1 = CALL_EXPR_ARG (exp, 1);
27045 arg2 = CALL_EXPR_ARG (exp, 2);
27046 op0 = expand_normal (arg0);
27047 op1 = expand_normal (arg1);
27048 op2 = expand_normal (arg2);
27050 op0 = copy_to_mode_reg (Pmode, op0);
27052 op1 = copy_to_mode_reg (SImode, op1);
27054 op2 = copy_to_mode_reg (SImode, op2);
27055 emit_insn (ix86_gen_monitor (op0, op1, op2));
27058 case IX86_BUILTIN_MWAIT:
27059 arg0 = CALL_EXPR_ARG (exp, 0);
27060 arg1 = CALL_EXPR_ARG (exp, 1);
27061 op0 = expand_normal (arg0);
27062 op1 = expand_normal (arg1);
27064 op0 = copy_to_mode_reg (SImode, op0);
27066 op1 = copy_to_mode_reg (SImode, op1);
27067 emit_insn (gen_sse3_mwait (op0, op1));
27070 case IX86_BUILTIN_VEC_INIT_V2SI:
27071 case IX86_BUILTIN_VEC_INIT_V4HI:
27072 case IX86_BUILTIN_VEC_INIT_V8QI:
27073 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
27075 case IX86_BUILTIN_VEC_EXT_V2DF:
27076 case IX86_BUILTIN_VEC_EXT_V2DI:
27077 case IX86_BUILTIN_VEC_EXT_V4SF:
27078 case IX86_BUILTIN_VEC_EXT_V4SI:
27079 case IX86_BUILTIN_VEC_EXT_V8HI:
27080 case IX86_BUILTIN_VEC_EXT_V2SI:
27081 case IX86_BUILTIN_VEC_EXT_V4HI:
27082 case IX86_BUILTIN_VEC_EXT_V16QI:
27083 return ix86_expand_vec_ext_builtin (exp, target);
27085 case IX86_BUILTIN_VEC_SET_V2DI:
27086 case IX86_BUILTIN_VEC_SET_V4SF:
27087 case IX86_BUILTIN_VEC_SET_V4SI:
27088 case IX86_BUILTIN_VEC_SET_V8HI:
27089 case IX86_BUILTIN_VEC_SET_V4HI:
27090 case IX86_BUILTIN_VEC_SET_V16QI:
27091 return ix86_expand_vec_set_builtin (exp);
27093 case IX86_BUILTIN_VEC_PERM_V2DF:
27094 case IX86_BUILTIN_VEC_PERM_V4SF:
27095 case IX86_BUILTIN_VEC_PERM_V2DI:
27096 case IX86_BUILTIN_VEC_PERM_V4SI:
27097 case IX86_BUILTIN_VEC_PERM_V8HI:
27098 case IX86_BUILTIN_VEC_PERM_V16QI:
27099 case IX86_BUILTIN_VEC_PERM_V2DI_U:
27100 case IX86_BUILTIN_VEC_PERM_V4SI_U:
27101 case IX86_BUILTIN_VEC_PERM_V8HI_U:
27102 case IX86_BUILTIN_VEC_PERM_V16QI_U:
27103 case IX86_BUILTIN_VEC_PERM_V4DF:
27104 case IX86_BUILTIN_VEC_PERM_V8SF:
27105 return ix86_expand_vec_perm_builtin (exp);
27107 case IX86_BUILTIN_INFQ:
27108 case IX86_BUILTIN_HUGE_VALQ:
27110 REAL_VALUE_TYPE inf;
27114 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
27116 tmp = validize_mem (force_const_mem (mode, tmp));
27119 target = gen_reg_rtx (mode);
27121 emit_move_insn (target, tmp);
27125 case IX86_BUILTIN_LLWPCB:
27126 arg0 = CALL_EXPR_ARG (exp, 0);
27127 op0 = expand_normal (arg0);
27128 icode = CODE_FOR_lwp_llwpcb;
27129 if (!insn_data[icode].operand[0].predicate (op0, Pmode))
27130 op0 = copy_to_mode_reg (Pmode, op0);
27131 emit_insn (gen_lwp_llwpcb (op0));
27134 case IX86_BUILTIN_SLWPCB:
27135 icode = CODE_FOR_lwp_slwpcb;
27137 || !insn_data[icode].operand[0].predicate (target, Pmode))
27138 target = gen_reg_rtx (Pmode);
27139 emit_insn (gen_lwp_slwpcb (target));
27142 case IX86_BUILTIN_BEXTRI32:
27143 case IX86_BUILTIN_BEXTRI64:
27144 arg0 = CALL_EXPR_ARG (exp, 0);
27145 arg1 = CALL_EXPR_ARG (exp, 1);
27146 op0 = expand_normal (arg0);
27147 op1 = expand_normal (arg1);
27148 icode = (fcode == IX86_BUILTIN_BEXTRI32
27149 ? CODE_FOR_tbm_bextri_si
27150 : CODE_FOR_tbm_bextri_di);
27151 if (!CONST_INT_P (op1))
27153 error ("last argument must be an immediate");
27158 unsigned char length = (INTVAL (op1) >> 8) & 0xFF;
27159 unsigned char lsb_index = INTVAL (op1) & 0xFF;
27160 op1 = GEN_INT (length);
27161 op2 = GEN_INT (lsb_index);
27162 pat = GEN_FCN (icode) (target, op0, op1, op2);
27172 for (i = 0, d = bdesc_special_args;
27173 i < ARRAY_SIZE (bdesc_special_args);
27175 if (d->code == fcode)
27176 return ix86_expand_special_args_builtin (d, exp, target);
27178 for (i = 0, d = bdesc_args;
27179 i < ARRAY_SIZE (bdesc_args);
27181 if (d->code == fcode)
27184 case IX86_BUILTIN_FABSQ:
27185 case IX86_BUILTIN_COPYSIGNQ:
27187 /* Emit a normal call if SSE2 isn't available. */
27188 return expand_call (exp, target, ignore);
27190 return ix86_expand_args_builtin (d, exp, target);
27193 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
27194 if (d->code == fcode)
27195 return ix86_expand_sse_comi (d, exp, target);
27197 for (i = 0, d = bdesc_pcmpestr;
27198 i < ARRAY_SIZE (bdesc_pcmpestr);
27200 if (d->code == fcode)
27201 return ix86_expand_sse_pcmpestr (d, exp, target);
27203 for (i = 0, d = bdesc_pcmpistr;
27204 i < ARRAY_SIZE (bdesc_pcmpistr);
27206 if (d->code == fcode)
27207 return ix86_expand_sse_pcmpistr (d, exp, target);
27209 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
27210 if (d->code == fcode)
27211 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
27212 (enum ix86_builtin_func_type)
27213 d->flag, d->comparison);
27215 gcc_unreachable ();
27218 /* Returns a function decl for a vectorized version of the builtin function
27219 with builtin function code FN and the result vector type TYPE, or NULL_TREE
27220 if it is not available. */
27223 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
27226 enum machine_mode in_mode, out_mode;
27228 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
27230 if (TREE_CODE (type_out) != VECTOR_TYPE
27231 || TREE_CODE (type_in) != VECTOR_TYPE
27232 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
27235 out_mode = TYPE_MODE (TREE_TYPE (type_out));
27236 out_n = TYPE_VECTOR_SUBPARTS (type_out);
27237 in_mode = TYPE_MODE (TREE_TYPE (type_in));
27238 in_n = TYPE_VECTOR_SUBPARTS (type_in);
27242 case BUILT_IN_SQRT:
27243 if (out_mode == DFmode && in_mode == DFmode)
27245 if (out_n == 2 && in_n == 2)
27246 return ix86_builtins[IX86_BUILTIN_SQRTPD];
27247 else if (out_n == 4 && in_n == 4)
27248 return ix86_builtins[IX86_BUILTIN_SQRTPD256];
27252 case BUILT_IN_SQRTF:
27253 if (out_mode == SFmode && in_mode == SFmode)
27255 if (out_n == 4 && in_n == 4)
27256 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
27257 else if (out_n == 8 && in_n == 8)
27258 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR256];
27262 case BUILT_IN_LRINT:
27263 if (out_mode == SImode && out_n == 4
27264 && in_mode == DFmode && in_n == 2)
27265 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
27268 case BUILT_IN_LRINTF:
27269 if (out_mode == SImode && in_mode == SFmode)
27271 if (out_n == 4 && in_n == 4)
27272 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
27273 else if (out_n == 8 && in_n == 8)
27274 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ256];
27278 case BUILT_IN_COPYSIGN:
27279 if (out_mode == DFmode && in_mode == DFmode)
27281 if (out_n == 2 && in_n == 2)
27282 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
27283 else if (out_n == 4 && in_n == 4)
27284 return ix86_builtins[IX86_BUILTIN_CPYSGNPD256];
27288 case BUILT_IN_COPYSIGNF:
27289 if (out_mode == SFmode && in_mode == SFmode)
27291 if (out_n == 4 && in_n == 4)
27292 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
27293 else if (out_n == 8 && in_n == 8)
27294 return ix86_builtins[IX86_BUILTIN_CPYSGNPS256];
27299 if (out_mode == DFmode && in_mode == DFmode)
27301 if (out_n == 2 && in_n == 2)
27302 return ix86_builtins[IX86_BUILTIN_VFMADDPD];
27303 if (out_n == 4 && in_n == 4)
27304 return ix86_builtins[IX86_BUILTIN_VFMADDPD256];
27308 case BUILT_IN_FMAF:
27309 if (out_mode == SFmode && in_mode == SFmode)
27311 if (out_n == 4 && in_n == 4)
27312 return ix86_builtins[IX86_BUILTIN_VFMADDPS];
27313 if (out_n == 8 && in_n == 8)
27314 return ix86_builtins[IX86_BUILTIN_VFMADDPS256];
27322 /* Dispatch to a handler for a vectorization library. */
27323 if (ix86_veclib_handler)
27324 return ix86_veclib_handler ((enum built_in_function) fn, type_out,
27330 /* Handler for an SVML-style interface to
27331 a library with vectorized intrinsics. */
27334 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
27337 tree fntype, new_fndecl, args;
27340 enum machine_mode el_mode, in_mode;
27343 /* The SVML is suitable for unsafe math only. */
27344 if (!flag_unsafe_math_optimizations)
27347 el_mode = TYPE_MODE (TREE_TYPE (type_out));
27348 n = TYPE_VECTOR_SUBPARTS (type_out);
27349 in_mode = TYPE_MODE (TREE_TYPE (type_in));
27350 in_n = TYPE_VECTOR_SUBPARTS (type_in);
27351 if (el_mode != in_mode
27359 case BUILT_IN_LOG10:
27361 case BUILT_IN_TANH:
27363 case BUILT_IN_ATAN:
27364 case BUILT_IN_ATAN2:
27365 case BUILT_IN_ATANH:
27366 case BUILT_IN_CBRT:
27367 case BUILT_IN_SINH:
27369 case BUILT_IN_ASINH:
27370 case BUILT_IN_ASIN:
27371 case BUILT_IN_COSH:
27373 case BUILT_IN_ACOSH:
27374 case BUILT_IN_ACOS:
27375 if (el_mode != DFmode || n != 2)
27379 case BUILT_IN_EXPF:
27380 case BUILT_IN_LOGF:
27381 case BUILT_IN_LOG10F:
27382 case BUILT_IN_POWF:
27383 case BUILT_IN_TANHF:
27384 case BUILT_IN_TANF:
27385 case BUILT_IN_ATANF:
27386 case BUILT_IN_ATAN2F:
27387 case BUILT_IN_ATANHF:
27388 case BUILT_IN_CBRTF:
27389 case BUILT_IN_SINHF:
27390 case BUILT_IN_SINF:
27391 case BUILT_IN_ASINHF:
27392 case BUILT_IN_ASINF:
27393 case BUILT_IN_COSHF:
27394 case BUILT_IN_COSF:
27395 case BUILT_IN_ACOSHF:
27396 case BUILT_IN_ACOSF:
27397 if (el_mode != SFmode || n != 4)
27405 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
27407 if (fn == BUILT_IN_LOGF)
27408 strcpy (name, "vmlsLn4");
27409 else if (fn == BUILT_IN_LOG)
27410 strcpy (name, "vmldLn2");
27413 sprintf (name, "vmls%s", bname+10);
27414 name[strlen (name)-1] = '4';
27417 sprintf (name, "vmld%s2", bname+10);
27419 /* Convert to uppercase. */
27423 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
27424 args = TREE_CHAIN (args))
27428 fntype = build_function_type_list (type_out, type_in, NULL);
27430 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
27432 /* Build a function declaration for the vectorized function. */
27433 new_fndecl = build_decl (BUILTINS_LOCATION,
27434 FUNCTION_DECL, get_identifier (name), fntype);
27435 TREE_PUBLIC (new_fndecl) = 1;
27436 DECL_EXTERNAL (new_fndecl) = 1;
27437 DECL_IS_NOVOPS (new_fndecl) = 1;
27438 TREE_READONLY (new_fndecl) = 1;
27443 /* Handler for an ACML-style interface to
27444 a library with vectorized intrinsics. */
27447 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
27449 char name[20] = "__vr.._";
27450 tree fntype, new_fndecl, args;
27453 enum machine_mode el_mode, in_mode;
27456 /* The ACML is 64bits only and suitable for unsafe math only as
27457 it does not correctly support parts of IEEE with the required
27458 precision such as denormals. */
27460 || !flag_unsafe_math_optimizations)
27463 el_mode = TYPE_MODE (TREE_TYPE (type_out));
27464 n = TYPE_VECTOR_SUBPARTS (type_out);
27465 in_mode = TYPE_MODE (TREE_TYPE (type_in));
27466 in_n = TYPE_VECTOR_SUBPARTS (type_in);
27467 if (el_mode != in_mode
27477 case BUILT_IN_LOG2:
27478 case BUILT_IN_LOG10:
27481 if (el_mode != DFmode
27486 case BUILT_IN_SINF:
27487 case BUILT_IN_COSF:
27488 case BUILT_IN_EXPF:
27489 case BUILT_IN_POWF:
27490 case BUILT_IN_LOGF:
27491 case BUILT_IN_LOG2F:
27492 case BUILT_IN_LOG10F:
27495 if (el_mode != SFmode
27504 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
27505 sprintf (name + 7, "%s", bname+10);
27508 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
27509 args = TREE_CHAIN (args))
27513 fntype = build_function_type_list (type_out, type_in, NULL);
27515 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
27517 /* Build a function declaration for the vectorized function. */
27518 new_fndecl = build_decl (BUILTINS_LOCATION,
27519 FUNCTION_DECL, get_identifier (name), fntype);
27520 TREE_PUBLIC (new_fndecl) = 1;
27521 DECL_EXTERNAL (new_fndecl) = 1;
27522 DECL_IS_NOVOPS (new_fndecl) = 1;
27523 TREE_READONLY (new_fndecl) = 1;
27529 /* Returns a decl of a function that implements conversion of an integer vector
27530 into a floating-point vector, or vice-versa. DEST_TYPE and SRC_TYPE
27531 are the types involved when converting according to CODE.
27532 Return NULL_TREE if it is not available. */
27535 ix86_vectorize_builtin_conversion (unsigned int code,
27536 tree dest_type, tree src_type)
27544 switch (TYPE_MODE (src_type))
27547 switch (TYPE_MODE (dest_type))
27550 return (TYPE_UNSIGNED (src_type)
27551 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
27552 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
27554 return (TYPE_UNSIGNED (src_type)
27556 : ix86_builtins[IX86_BUILTIN_CVTDQ2PD256]);
27562 switch (TYPE_MODE (dest_type))
27565 return (TYPE_UNSIGNED (src_type)
27567 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS256]);
27576 case FIX_TRUNC_EXPR:
27577 switch (TYPE_MODE (dest_type))
27580 switch (TYPE_MODE (src_type))
27583 return (TYPE_UNSIGNED (dest_type)
27585 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ]);
27587 return (TYPE_UNSIGNED (dest_type)
27589 : ix86_builtins[IX86_BUILTIN_CVTTPD2DQ256]);
27596 switch (TYPE_MODE (src_type))
27599 return (TYPE_UNSIGNED (dest_type)
27601 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ256]);
27618 /* Returns a code for a target-specific builtin that implements
27619 reciprocal of the function, or NULL_TREE if not available. */
27622 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
27623 bool sqrt ATTRIBUTE_UNUSED)
27625 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
27626 && flag_finite_math_only && !flag_trapping_math
27627 && flag_unsafe_math_optimizations))
27631 /* Machine dependent builtins. */
27634 /* Vectorized version of sqrt to rsqrt conversion. */
27635 case IX86_BUILTIN_SQRTPS_NR:
27636 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
27638 case IX86_BUILTIN_SQRTPS_NR256:
27639 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR256];
27645 /* Normal builtins. */
27648 /* Sqrt to rsqrt conversion. */
27649 case BUILT_IN_SQRTF:
27650 return ix86_builtins[IX86_BUILTIN_RSQRTF];
27657 /* Helper for avx_vpermilps256_operand et al. This is also used by
27658 the expansion functions to turn the parallel back into a mask.
27659 The return value is 0 for no match and the imm8+1 for a match. */
27662 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
27664 unsigned i, nelt = GET_MODE_NUNITS (mode);
27666 unsigned char ipar[8];
27668 if (XVECLEN (par, 0) != (int) nelt)
27671 /* Validate that all of the elements are constants, and not totally
27672 out of range. Copy the data into an integral array to make the
27673 subsequent checks easier. */
27674 for (i = 0; i < nelt; ++i)
27676 rtx er = XVECEXP (par, 0, i);
27677 unsigned HOST_WIDE_INT ei;
27679 if (!CONST_INT_P (er))
27690 /* In the 256-bit DFmode case, we can only move elements within
27692 for (i = 0; i < 2; ++i)
27696 mask |= ipar[i] << i;
27698 for (i = 2; i < 4; ++i)
27702 mask |= (ipar[i] - 2) << i;
27707 /* In the 256-bit SFmode case, we have full freedom of movement
27708 within the low 128-bit lane, but the high 128-bit lane must
27709 mirror the exact same pattern. */
27710 for (i = 0; i < 4; ++i)
27711 if (ipar[i] + 4 != ipar[i + 4])
27718 /* In the 128-bit case, we've full freedom in the placement of
27719 the elements from the source operand. */
27720 for (i = 0; i < nelt; ++i)
27721 mask |= ipar[i] << (i * (nelt / 2));
27725 gcc_unreachable ();
27728 /* Make sure success has a non-zero value by adding one. */
27732 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
27733 the expansion functions to turn the parallel back into a mask.
27734 The return value is 0 for no match and the imm8+1 for a match. */
27737 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
27739 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
27741 unsigned char ipar[8];
27743 if (XVECLEN (par, 0) != (int) nelt)
27746 /* Validate that all of the elements are constants, and not totally
27747 out of range. Copy the data into an integral array to make the
27748 subsequent checks easier. */
27749 for (i = 0; i < nelt; ++i)
27751 rtx er = XVECEXP (par, 0, i);
27752 unsigned HOST_WIDE_INT ei;
27754 if (!CONST_INT_P (er))
27757 if (ei >= 2 * nelt)
27762 /* Validate that the halves of the permute are halves. */
27763 for (i = 0; i < nelt2 - 1; ++i)
27764 if (ipar[i] + 1 != ipar[i + 1])
27766 for (i = nelt2; i < nelt - 1; ++i)
27767 if (ipar[i] + 1 != ipar[i + 1])
27770 /* Reconstruct the mask. */
27771 for (i = 0; i < 2; ++i)
27773 unsigned e = ipar[i * nelt2];
27777 mask |= e << (i * 4);
27780 /* Make sure success has a non-zero value by adding one. */
27785 /* Store OPERAND to the memory after reload is completed. This means
27786 that we can't easily use assign_stack_local. */
27788 ix86_force_to_memory (enum machine_mode mode, rtx operand)
27792 gcc_assert (reload_completed);
27793 if (ix86_using_red_zone ())
27795 result = gen_rtx_MEM (mode,
27796 gen_rtx_PLUS (Pmode,
27798 GEN_INT (-RED_ZONE_SIZE)));
27799 emit_move_insn (result, operand);
27801 else if (TARGET_64BIT)
27807 operand = gen_lowpart (DImode, operand);
27811 gen_rtx_SET (VOIDmode,
27812 gen_rtx_MEM (DImode,
27813 gen_rtx_PRE_DEC (DImode,
27814 stack_pointer_rtx)),
27818 gcc_unreachable ();
27820 result = gen_rtx_MEM (mode, stack_pointer_rtx);
27829 split_double_mode (mode, &operand, 1, operands, operands + 1);
27831 gen_rtx_SET (VOIDmode,
27832 gen_rtx_MEM (SImode,
27833 gen_rtx_PRE_DEC (Pmode,
27834 stack_pointer_rtx)),
27837 gen_rtx_SET (VOIDmode,
27838 gen_rtx_MEM (SImode,
27839 gen_rtx_PRE_DEC (Pmode,
27840 stack_pointer_rtx)),
27845 /* Store HImodes as SImodes. */
27846 operand = gen_lowpart (SImode, operand);
27850 gen_rtx_SET (VOIDmode,
27851 gen_rtx_MEM (GET_MODE (operand),
27852 gen_rtx_PRE_DEC (SImode,
27853 stack_pointer_rtx)),
27857 gcc_unreachable ();
27859 result = gen_rtx_MEM (mode, stack_pointer_rtx);
27864 /* Free operand from the memory. */
27866 ix86_free_from_memory (enum machine_mode mode)
27868 if (!ix86_using_red_zone ())
27872 if (mode == DImode || TARGET_64BIT)
27876 /* Use LEA to deallocate stack space. In peephole2 it will be converted
27877 to pop or add instruction if registers are available. */
27878 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
27879 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
27884 /* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
27885 SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
27887 static const reg_class_t *
27888 i386_ira_cover_classes (void)
27890 static const reg_class_t sse_fpmath_classes[] = {
27891 GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
27893 static const reg_class_t no_sse_fpmath_classes[] = {
27894 GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
27897 return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
27900 /* Implement TARGET_PREFERRED_RELOAD_CLASS.
27902 Put float CONST_DOUBLE in the constant pool instead of fp regs.
27903 QImode must go into class Q_REGS.
27904 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
27905 movdf to do mem-to-mem moves through integer regs. */
27908 ix86_preferred_reload_class (rtx x, reg_class_t regclass)
27910 enum machine_mode mode = GET_MODE (x);
27912 /* We're only allowed to return a subclass of CLASS. Many of the
27913 following checks fail for NO_REGS, so eliminate that early. */
27914 if (regclass == NO_REGS)
27917 /* All classes can load zeros. */
27918 if (x == CONST0_RTX (mode))
27921 /* Force constants into memory if we are loading a (nonzero) constant into
27922 an MMX or SSE register. This is because there are no MMX/SSE instructions
27923 to load from a constant. */
27925 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
27928 /* Prefer SSE regs only, if we can use them for math. */
27929 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
27930 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
27932 /* Floating-point constants need more complex checks. */
27933 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
27935 /* General regs can load everything. */
27936 if (reg_class_subset_p (regclass, GENERAL_REGS))
27939 /* Floats can load 0 and 1 plus some others. Note that we eliminated
27940 zero above. We only want to wind up preferring 80387 registers if
27941 we plan on doing computation with them. */
27943 && standard_80387_constant_p (x))
27945 /* Limit class to non-sse. */
27946 if (regclass == FLOAT_SSE_REGS)
27948 if (regclass == FP_TOP_SSE_REGS)
27950 if (regclass == FP_SECOND_SSE_REGS)
27951 return FP_SECOND_REG;
27952 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
27959 /* Generally when we see PLUS here, it's the function invariant
27960 (plus soft-fp const_int). Which can only be computed into general
27962 if (GET_CODE (x) == PLUS)
27963 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
27965 /* QImode constants are easy to load, but non-constant QImode data
27966 must go into Q_REGS. */
27967 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
27969 if (reg_class_subset_p (regclass, Q_REGS))
27971 if (reg_class_subset_p (Q_REGS, regclass))
27979 /* Discourage putting floating-point values in SSE registers unless
27980 SSE math is being used, and likewise for the 387 registers. */
27982 ix86_preferred_output_reload_class (rtx x, reg_class_t regclass)
27984 enum machine_mode mode = GET_MODE (x);
27986 /* Restrict the output reload class to the register bank that we are doing
27987 math on. If we would like not to return a subset of CLASS, reject this
27988 alternative: if reload cannot do this, it will still use its choice. */
27989 mode = GET_MODE (x);
27990 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
27991 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
27993 if (X87_FLOAT_MODE_P (mode))
27995 if (regclass == FP_TOP_SSE_REGS)
27997 else if (regclass == FP_SECOND_SSE_REGS)
27998 return FP_SECOND_REG;
28000 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
28007 ix86_secondary_reload (bool in_p, rtx x, reg_class_t rclass,
28008 enum machine_mode mode,
28009 secondary_reload_info *sri ATTRIBUTE_UNUSED)
28011 /* QImode spills from non-QI registers require
28012 intermediate register on 32bit targets. */
28013 if (!in_p && mode == QImode && !TARGET_64BIT
28014 && (rclass == GENERAL_REGS
28015 || rclass == LEGACY_REGS
28016 || rclass == INDEX_REGS))
28025 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
28026 regno = true_regnum (x);
28028 /* Return Q_REGS if the operand is in memory. */
28036 /* Implement TARGET_CLASS_LIKELY_SPILLED_P. */
28039 ix86_class_likely_spilled_p (reg_class_t rclass)
28050 case SSE_FIRST_REG:
28052 case FP_SECOND_REG:
28062 /* If we are copying between general and FP registers, we need a memory
28063 location. The same is true for SSE and MMX registers.
28065 To optimize register_move_cost performance, allow inline variant.
28067 The macro can't work reliably when one of the CLASSES is class containing
28068 registers from multiple units (SSE, MMX, integer). We avoid this by never
28069 combining those units in single alternative in the machine description.
28070 Ensure that this constraint holds to avoid unexpected surprises.
28072 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
28073 enforce these sanity checks. */
28076 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
28077 enum machine_mode mode, int strict)
28079 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
28080 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
28081 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
28082 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
28083 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
28084 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
28086 gcc_assert (!strict);
28090 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
28093 /* ??? This is a lie. We do have moves between mmx/general, and for
28094 mmx/sse2. But by saying we need secondary memory we discourage the
28095 register allocator from using the mmx registers unless needed. */
28096 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
28099 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
28101 /* SSE1 doesn't have any direct moves from other classes. */
28105 /* If the target says that inter-unit moves are more expensive
28106 than moving through memory, then don't generate them. */
28107 if (!TARGET_INTER_UNIT_MOVES)
28110 /* Between SSE and general, we have moves no larger than word size. */
28111 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
28119 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
28120 enum machine_mode mode, int strict)
28122 return inline_secondary_memory_needed (class1, class2, mode, strict);
28125 /* Return true if the registers in CLASS cannot represent the change from
28126 modes FROM to TO. */
28129 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
28130 enum reg_class regclass)
28135 /* x87 registers can't do subreg at all, as all values are reformatted
28136 to extended precision. */
28137 if (MAYBE_FLOAT_CLASS_P (regclass))
28140 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
28142 /* Vector registers do not support QI or HImode loads. If we don't
28143 disallow a change to these modes, reload will assume it's ok to
28144 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
28145 the vec_dupv4hi pattern. */
28146 if (GET_MODE_SIZE (from) < 4)
28149 /* Vector registers do not support subreg with nonzero offsets, which
28150 are otherwise valid for integer registers. Since we can't see
28151 whether we have a nonzero offset from here, prohibit all
28152 nonparadoxical subregs changing size. */
28153 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
28160 /* Return the cost of moving data of mode M between a
28161 register and memory. A value of 2 is the default; this cost is
28162 relative to those in `REGISTER_MOVE_COST'.
28164 This function is used extensively by register_move_cost that is used to
28165 build tables at startup. Make it inline in this case.
28166 When IN is 2, return maximum of in and out move cost.
28168 If moving between registers and memory is more expensive than
28169 between two registers, you should define this macro to express the
28172 Model also increased moving costs of QImode registers in non
28176 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
28180 if (FLOAT_CLASS_P (regclass))
28198 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
28199 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
28201 if (SSE_CLASS_P (regclass))
28204 switch (GET_MODE_SIZE (mode))
28219 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
28220 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
28222 if (MMX_CLASS_P (regclass))
28225 switch (GET_MODE_SIZE (mode))
28237 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
28238 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
28240 switch (GET_MODE_SIZE (mode))
28243 if (Q_CLASS_P (regclass) || TARGET_64BIT)
28246 return ix86_cost->int_store[0];
28247 if (TARGET_PARTIAL_REG_DEPENDENCY
28248 && optimize_function_for_speed_p (cfun))
28249 cost = ix86_cost->movzbl_load;
28251 cost = ix86_cost->int_load[0];
28253 return MAX (cost, ix86_cost->int_store[0]);
28259 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
28261 return ix86_cost->movzbl_load;
28263 return ix86_cost->int_store[0] + 4;
28268 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
28269 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
28271 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
28272 if (mode == TFmode)
28275 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
28277 cost = ix86_cost->int_load[2];
28279 cost = ix86_cost->int_store[2];
28280 return (cost * (((int) GET_MODE_SIZE (mode)
28281 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
28286 ix86_memory_move_cost (enum machine_mode mode, reg_class_t regclass,
28289 return inline_memory_move_cost (mode, (enum reg_class) regclass, in ? 1 : 0);
28293 /* Return the cost of moving data from a register in class CLASS1 to
28294 one in class CLASS2.
28296 It is not required that the cost always equal 2 when FROM is the same as TO;
28297 on some machines it is expensive to move between registers if they are not
28298 general registers. */
28301 ix86_register_move_cost (enum machine_mode mode, reg_class_t class1_i,
28302 reg_class_t class2_i)
28304 enum reg_class class1 = (enum reg_class) class1_i;
28305 enum reg_class class2 = (enum reg_class) class2_i;
28307 /* In case we require secondary memory, compute cost of the store followed
28308 by load. In order to avoid bad register allocation choices, we need
28309 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
28311 if (inline_secondary_memory_needed (class1, class2, mode, 0))
28315 cost += inline_memory_move_cost (mode, class1, 2);
28316 cost += inline_memory_move_cost (mode, class2, 2);
28318 /* In case of copying from general_purpose_register we may emit multiple
28319 stores followed by single load causing memory size mismatch stall.
28320 Count this as arbitrarily high cost of 20. */
28321 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
28324 /* In the case of FP/MMX moves, the registers actually overlap, and we
28325 have to switch modes in order to treat them differently. */
28326 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
28327 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
28333 /* Moves between SSE/MMX and integer unit are expensive. */
28334 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
28335 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
28337 /* ??? By keeping returned value relatively high, we limit the number
28338 of moves between integer and MMX/SSE registers for all targets.
28339 Additionally, high value prevents problem with x86_modes_tieable_p(),
28340 where integer modes in MMX/SSE registers are not tieable
28341 because of missing QImode and HImode moves to, from or between
28342 MMX/SSE registers. */
28343 return MAX (8, ix86_cost->mmxsse_to_integer);
28345 if (MAYBE_FLOAT_CLASS_P (class1))
28346 return ix86_cost->fp_move;
28347 if (MAYBE_SSE_CLASS_P (class1))
28348 return ix86_cost->sse_move;
28349 if (MAYBE_MMX_CLASS_P (class1))
28350 return ix86_cost->mmx_move;
28354 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
28357 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
28359 /* Flags and only flags can only hold CCmode values. */
28360 if (CC_REGNO_P (regno))
28361 return GET_MODE_CLASS (mode) == MODE_CC;
28362 if (GET_MODE_CLASS (mode) == MODE_CC
28363 || GET_MODE_CLASS (mode) == MODE_RANDOM
28364 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
28366 if (FP_REGNO_P (regno))
28367 return VALID_FP_MODE_P (mode);
28368 if (SSE_REGNO_P (regno))
28370 /* We implement the move patterns for all vector modes into and
28371 out of SSE registers, even when no operation instructions
28372 are available. OImode move is available only when AVX is
28374 return ((TARGET_AVX && mode == OImode)
28375 || VALID_AVX256_REG_MODE (mode)
28376 || VALID_SSE_REG_MODE (mode)
28377 || VALID_SSE2_REG_MODE (mode)
28378 || VALID_MMX_REG_MODE (mode)
28379 || VALID_MMX_REG_MODE_3DNOW (mode));
28381 if (MMX_REGNO_P (regno))
28383 /* We implement the move patterns for 3DNOW modes even in MMX mode,
28384 so if the register is available at all, then we can move data of
28385 the given mode into or out of it. */
28386 return (VALID_MMX_REG_MODE (mode)
28387 || VALID_MMX_REG_MODE_3DNOW (mode));
28390 if (mode == QImode)
28392 /* Take care for QImode values - they can be in non-QI regs,
28393 but then they do cause partial register stalls. */
28394 if (regno <= BX_REG || TARGET_64BIT)
28396 if (!TARGET_PARTIAL_REG_STALL)
28398 return reload_in_progress || reload_completed;
28400 /* We handle both integer and floats in the general purpose registers. */
28401 else if (VALID_INT_MODE_P (mode))
28403 else if (VALID_FP_MODE_P (mode))
28405 else if (VALID_DFP_MODE_P (mode))
28407 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
28408 on to use that value in smaller contexts, this can easily force a
28409 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
28410 supporting DImode, allow it. */
28411 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
28417 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
28418 tieable integer mode. */
28421 ix86_tieable_integer_mode_p (enum machine_mode mode)
28430 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
28433 return TARGET_64BIT;
28440 /* Return true if MODE1 is accessible in a register that can hold MODE2
28441 without copying. That is, all register classes that can hold MODE2
28442 can also hold MODE1. */
28445 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
28447 if (mode1 == mode2)
28450 if (ix86_tieable_integer_mode_p (mode1)
28451 && ix86_tieable_integer_mode_p (mode2))
28454 /* MODE2 being XFmode implies fp stack or general regs, which means we
28455 can tie any smaller floating point modes to it. Note that we do not
28456 tie this with TFmode. */
28457 if (mode2 == XFmode)
28458 return mode1 == SFmode || mode1 == DFmode;
28460 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
28461 that we can tie it with SFmode. */
28462 if (mode2 == DFmode)
28463 return mode1 == SFmode;
28465 /* If MODE2 is only appropriate for an SSE register, then tie with
28466 any other mode acceptable to SSE registers. */
28467 if (GET_MODE_SIZE (mode2) == 16
28468 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
28469 return (GET_MODE_SIZE (mode1) == 16
28470 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
28472 /* If MODE2 is appropriate for an MMX register, then tie
28473 with any other mode acceptable to MMX registers. */
28474 if (GET_MODE_SIZE (mode2) == 8
28475 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
28476 return (GET_MODE_SIZE (mode1) == 8
28477 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
28482 /* Compute a (partial) cost for rtx X. Return true if the complete
28483 cost has been computed, and false if subexpressions should be
28484 scanned. In either case, *TOTAL contains the cost result. */
28487 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
28489 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
28490 enum machine_mode mode = GET_MODE (x);
28491 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
28499 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
28501 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
28503 else if (flag_pic && SYMBOLIC_CONST (x)
28505 || (!GET_CODE (x) != LABEL_REF
28506 && (GET_CODE (x) != SYMBOL_REF
28507 || !SYMBOL_REF_LOCAL_P (x)))))
28514 if (mode == VOIDmode)
28517 switch (standard_80387_constant_p (x))
28522 default: /* Other constants */
28527 /* Start with (MEM (SYMBOL_REF)), since that's where
28528 it'll probably end up. Add a penalty for size. */
28529 *total = (COSTS_N_INSNS (1)
28530 + (flag_pic != 0 && !TARGET_64BIT)
28531 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
28537 /* The zero extensions is often completely free on x86_64, so make
28538 it as cheap as possible. */
28539 if (TARGET_64BIT && mode == DImode
28540 && GET_MODE (XEXP (x, 0)) == SImode)
28542 else if (TARGET_ZERO_EXTEND_WITH_AND)
28543 *total = cost->add;
28545 *total = cost->movzx;
28549 *total = cost->movsx;
28553 if (CONST_INT_P (XEXP (x, 1))
28554 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
28556 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
28559 *total = cost->add;
28562 if ((value == 2 || value == 3)
28563 && cost->lea <= cost->shift_const)
28565 *total = cost->lea;
28575 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
28577 if (CONST_INT_P (XEXP (x, 1)))
28579 if (INTVAL (XEXP (x, 1)) > 32)
28580 *total = cost->shift_const + COSTS_N_INSNS (2);
28582 *total = cost->shift_const * 2;
28586 if (GET_CODE (XEXP (x, 1)) == AND)
28587 *total = cost->shift_var * 2;
28589 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
28594 if (CONST_INT_P (XEXP (x, 1)))
28595 *total = cost->shift_const;
28597 *total = cost->shift_var;
28605 gcc_assert (FLOAT_MODE_P (mode));
28606 gcc_assert (TARGET_FMA || TARGET_FMA4);
28608 /* ??? SSE scalar/vector cost should be used here. */
28609 /* ??? Bald assumption that fma has the same cost as fmul. */
28610 *total = cost->fmul;
28611 *total += rtx_cost (XEXP (x, 1), FMA, speed);
28613 /* Negate in op0 or op2 is free: FMS, FNMA, FNMS. */
28615 if (GET_CODE (sub) == NEG)
28617 *total += rtx_cost (sub, FMA, speed);
28620 if (GET_CODE (sub) == NEG)
28622 *total += rtx_cost (sub, FMA, speed);
28627 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
28629 /* ??? SSE scalar cost should be used here. */
28630 *total = cost->fmul;
28633 else if (X87_FLOAT_MODE_P (mode))
28635 *total = cost->fmul;
28638 else if (FLOAT_MODE_P (mode))
28640 /* ??? SSE vector cost should be used here. */
28641 *total = cost->fmul;
28646 rtx op0 = XEXP (x, 0);
28647 rtx op1 = XEXP (x, 1);
28649 if (CONST_INT_P (XEXP (x, 1)))
28651 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
28652 for (nbits = 0; value != 0; value &= value - 1)
28656 /* This is arbitrary. */
28659 /* Compute costs correctly for widening multiplication. */
28660 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
28661 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
28662 == GET_MODE_SIZE (mode))
28664 int is_mulwiden = 0;
28665 enum machine_mode inner_mode = GET_MODE (op0);
28667 if (GET_CODE (op0) == GET_CODE (op1))
28668 is_mulwiden = 1, op1 = XEXP (op1, 0);
28669 else if (CONST_INT_P (op1))
28671 if (GET_CODE (op0) == SIGN_EXTEND)
28672 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
28675 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
28679 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
28682 *total = (cost->mult_init[MODE_INDEX (mode)]
28683 + nbits * cost->mult_bit
28684 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
28693 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
28694 /* ??? SSE cost should be used here. */
28695 *total = cost->fdiv;
28696 else if (X87_FLOAT_MODE_P (mode))
28697 *total = cost->fdiv;
28698 else if (FLOAT_MODE_P (mode))
28699 /* ??? SSE vector cost should be used here. */
28700 *total = cost->fdiv;
28702 *total = cost->divide[MODE_INDEX (mode)];
28706 if (GET_MODE_CLASS (mode) == MODE_INT
28707 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
28709 if (GET_CODE (XEXP (x, 0)) == PLUS
28710 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
28711 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
28712 && CONSTANT_P (XEXP (x, 1)))
28714 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
28715 if (val == 2 || val == 4 || val == 8)
28717 *total = cost->lea;
28718 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
28719 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
28720 outer_code, speed);
28721 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
28725 else if (GET_CODE (XEXP (x, 0)) == MULT
28726 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
28728 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
28729 if (val == 2 || val == 4 || val == 8)
28731 *total = cost->lea;
28732 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
28733 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
28737 else if (GET_CODE (XEXP (x, 0)) == PLUS)
28739 *total = cost->lea;
28740 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
28741 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
28742 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
28749 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
28751 /* ??? SSE cost should be used here. */
28752 *total = cost->fadd;
28755 else if (X87_FLOAT_MODE_P (mode))
28757 *total = cost->fadd;
28760 else if (FLOAT_MODE_P (mode))
28762 /* ??? SSE vector cost should be used here. */
28763 *total = cost->fadd;
28771 if (!TARGET_64BIT && mode == DImode)
28773 *total = (cost->add * 2
28774 + (rtx_cost (XEXP (x, 0), outer_code, speed)
28775 << (GET_MODE (XEXP (x, 0)) != DImode))
28776 + (rtx_cost (XEXP (x, 1), outer_code, speed)
28777 << (GET_MODE (XEXP (x, 1)) != DImode)));
28783 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
28785 /* ??? SSE cost should be used here. */
28786 *total = cost->fchs;
28789 else if (X87_FLOAT_MODE_P (mode))
28791 *total = cost->fchs;
28794 else if (FLOAT_MODE_P (mode))
28796 /* ??? SSE vector cost should be used here. */
28797 *total = cost->fchs;
28803 if (!TARGET_64BIT && mode == DImode)
28804 *total = cost->add * 2;
28806 *total = cost->add;
28810 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
28811 && XEXP (XEXP (x, 0), 1) == const1_rtx
28812 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
28813 && XEXP (x, 1) == const0_rtx)
28815 /* This kind of construct is implemented using test[bwl].
28816 Treat it as if we had an AND. */
28817 *total = (cost->add
28818 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
28819 + rtx_cost (const1_rtx, outer_code, speed));
28825 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
28830 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
28831 /* ??? SSE cost should be used here. */
28832 *total = cost->fabs;
28833 else if (X87_FLOAT_MODE_P (mode))
28834 *total = cost->fabs;
28835 else if (FLOAT_MODE_P (mode))
28836 /* ??? SSE vector cost should be used here. */
28837 *total = cost->fabs;
28841 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
28842 /* ??? SSE cost should be used here. */
28843 *total = cost->fsqrt;
28844 else if (X87_FLOAT_MODE_P (mode))
28845 *total = cost->fsqrt;
28846 else if (FLOAT_MODE_P (mode))
28847 /* ??? SSE vector cost should be used here. */
28848 *total = cost->fsqrt;
28852 if (XINT (x, 1) == UNSPEC_TP)
28859 case VEC_DUPLICATE:
28860 /* ??? Assume all of these vector manipulation patterns are
28861 recognizable. In which case they all pretty much have the
28863 *total = COSTS_N_INSNS (1);
28873 static int current_machopic_label_num;
28875 /* Given a symbol name and its associated stub, write out the
28876 definition of the stub. */
28879 machopic_output_stub (FILE *file, const char *symb, const char *stub)
28881 unsigned int length;
28882 char *binder_name, *symbol_name, lazy_ptr_name[32];
28883 int label = ++current_machopic_label_num;
28885 /* For 64-bit we shouldn't get here. */
28886 gcc_assert (!TARGET_64BIT);
28888 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
28889 symb = targetm.strip_name_encoding (symb);
28891 length = strlen (stub);
28892 binder_name = XALLOCAVEC (char, length + 32);
28893 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
28895 length = strlen (symb);
28896 symbol_name = XALLOCAVEC (char, length + 32);
28897 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
28899 sprintf (lazy_ptr_name, "L%d$lz", label);
28901 if (MACHOPIC_ATT_STUB)
28902 switch_to_section (darwin_sections[machopic_picsymbol_stub3_section]);
28903 else if (MACHOPIC_PURE)
28905 if (TARGET_DEEP_BRANCH_PREDICTION)
28906 switch_to_section (darwin_sections[machopic_picsymbol_stub2_section]);
28908 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
28911 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
28913 fprintf (file, "%s:\n", stub);
28914 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
28916 if (MACHOPIC_ATT_STUB)
28918 fprintf (file, "\thlt ; hlt ; hlt ; hlt ; hlt\n");
28920 else if (MACHOPIC_PURE)
28923 if (TARGET_DEEP_BRANCH_PREDICTION)
28925 /* 25-byte PIC stub using "CALL get_pc_thunk". */
28926 rtx tmp = gen_rtx_REG (SImode, 2 /* ECX */);
28927 output_set_got (tmp, NULL_RTX); /* "CALL ___<cpu>.get_pc_thunk.cx". */
28928 fprintf (file, "LPC$%d:\tmovl\t%s-LPC$%d(%%ecx),%%ecx\n", label, lazy_ptr_name, label);
28932 /* 26-byte PIC stub using inline picbase: "CALL L42 ! L42: pop %eax". */
28933 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%ecx\n", label, label);
28934 fprintf (file, "\tmovl %s-LPC$%d(%%ecx),%%ecx\n", lazy_ptr_name, label);
28936 fprintf (file, "\tjmp\t*%%ecx\n");
28939 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
28941 /* The AT&T-style ("self-modifying") stub is not lazily bound, thus
28942 it needs no stub-binding-helper. */
28943 if (MACHOPIC_ATT_STUB)
28946 fprintf (file, "%s:\n", binder_name);
28950 fprintf (file, "\tlea\t%s-%s(%%ecx),%%ecx\n", lazy_ptr_name, binder_name);
28951 fprintf (file, "\tpushl\t%%ecx\n");
28954 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
28956 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
28958 /* N.B. Keep the correspondence of these
28959 'symbol_ptr/symbol_ptr2/symbol_ptr3' sections consistent with the
28960 old-pic/new-pic/non-pic stubs; altering this will break
28961 compatibility with existing dylibs. */
28965 if (TARGET_DEEP_BRANCH_PREDICTION)
28966 /* 25-byte PIC stub using "CALL get_pc_thunk". */
28967 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr2_section]);
28969 /* 26-byte PIC stub using inline picbase: "CALL L42 ! L42: pop %ebx". */
28970 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
28973 /* 16-byte -mdynamic-no-pic stub. */
28974 switch_to_section(darwin_sections[machopic_lazy_symbol_ptr3_section]);
28976 fprintf (file, "%s:\n", lazy_ptr_name);
28977 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
28978 fprintf (file, ASM_LONG "%s\n", binder_name);
28980 #endif /* TARGET_MACHO */
28982 /* Order the registers for register allocator. */
28985 x86_order_regs_for_local_alloc (void)
28990 /* First allocate the local general purpose registers. */
28991 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
28992 if (GENERAL_REGNO_P (i) && call_used_regs[i])
28993 reg_alloc_order [pos++] = i;
28995 /* Global general purpose registers. */
28996 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
28997 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
28998 reg_alloc_order [pos++] = i;
29000 /* x87 registers come first in case we are doing FP math
29002 if (!TARGET_SSE_MATH)
29003 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
29004 reg_alloc_order [pos++] = i;
29006 /* SSE registers. */
29007 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
29008 reg_alloc_order [pos++] = i;
29009 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
29010 reg_alloc_order [pos++] = i;
29012 /* x87 registers. */
29013 if (TARGET_SSE_MATH)
29014 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
29015 reg_alloc_order [pos++] = i;
29017 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
29018 reg_alloc_order [pos++] = i;
29020 /* Initialize the rest of array as we do not allocate some registers
29022 while (pos < FIRST_PSEUDO_REGISTER)
29023 reg_alloc_order [pos++] = 0;
29026 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
29027 struct attribute_spec.handler. */
29029 ix86_handle_abi_attribute (tree *node, tree name,
29030 tree args ATTRIBUTE_UNUSED,
29031 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29033 if (TREE_CODE (*node) != FUNCTION_TYPE
29034 && TREE_CODE (*node) != METHOD_TYPE
29035 && TREE_CODE (*node) != FIELD_DECL
29036 && TREE_CODE (*node) != TYPE_DECL)
29038 warning (OPT_Wattributes, "%qE attribute only applies to functions",
29040 *no_add_attrs = true;
29045 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
29047 *no_add_attrs = true;
29051 /* Can combine regparm with all attributes but fastcall. */
29052 if (is_attribute_p ("ms_abi", name))
29054 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
29056 error ("ms_abi and sysv_abi attributes are not compatible");
29061 else if (is_attribute_p ("sysv_abi", name))
29063 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
29065 error ("ms_abi and sysv_abi attributes are not compatible");
29074 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
29075 struct attribute_spec.handler. */
29077 ix86_handle_struct_attribute (tree *node, tree name,
29078 tree args ATTRIBUTE_UNUSED,
29079 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29082 if (DECL_P (*node))
29084 if (TREE_CODE (*node) == TYPE_DECL)
29085 type = &TREE_TYPE (*node);
29090 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
29091 || TREE_CODE (*type) == UNION_TYPE)))
29093 warning (OPT_Wattributes, "%qE attribute ignored",
29095 *no_add_attrs = true;
29098 else if ((is_attribute_p ("ms_struct", name)
29099 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
29100 || ((is_attribute_p ("gcc_struct", name)
29101 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
29103 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
29105 *no_add_attrs = true;
29112 ix86_handle_fndecl_attribute (tree *node, tree name,
29113 tree args ATTRIBUTE_UNUSED,
29114 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29116 if (TREE_CODE (*node) != FUNCTION_DECL)
29118 warning (OPT_Wattributes, "%qE attribute only applies to functions",
29120 *no_add_attrs = true;
29126 ix86_ms_bitfield_layout_p (const_tree record_type)
29128 return ((TARGET_MS_BITFIELD_LAYOUT
29129 && !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
29130 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type)));
29133 /* Returns an expression indicating where the this parameter is
29134 located on entry to the FUNCTION. */
29137 x86_this_parameter (tree function)
29139 tree type = TREE_TYPE (function);
29140 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
29145 const int *parm_regs;
29147 if (ix86_function_type_abi (type) == MS_ABI)
29148 parm_regs = x86_64_ms_abi_int_parameter_registers;
29150 parm_regs = x86_64_int_parameter_registers;
29151 return gen_rtx_REG (DImode, parm_regs[aggr]);
29154 nregs = ix86_function_regparm (type, function);
29156 if (nregs > 0 && !stdarg_p (type))
29160 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
29161 regno = aggr ? DX_REG : CX_REG;
29162 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
29166 return gen_rtx_MEM (SImode,
29167 plus_constant (stack_pointer_rtx, 4));
29176 return gen_rtx_MEM (SImode,
29177 plus_constant (stack_pointer_rtx, 4));
29180 return gen_rtx_REG (SImode, regno);
29183 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
29186 /* Determine whether x86_output_mi_thunk can succeed. */
29189 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
29190 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
29191 HOST_WIDE_INT vcall_offset, const_tree function)
29193 /* 64-bit can handle anything. */
29197 /* For 32-bit, everything's fine if we have one free register. */
29198 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
29201 /* Need a free register for vcall_offset. */
29205 /* Need a free register for GOT references. */
29206 if (flag_pic && !targetm.binds_local_p (function))
29209 /* Otherwise ok. */
29213 /* Output the assembler code for a thunk function. THUNK_DECL is the
29214 declaration for the thunk function itself, FUNCTION is the decl for
29215 the target function. DELTA is an immediate constant offset to be
29216 added to THIS. If VCALL_OFFSET is nonzero, the word at
29217 *(*this + vcall_offset) should be added to THIS. */
29220 x86_output_mi_thunk (FILE *file,
29221 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
29222 HOST_WIDE_INT vcall_offset, tree function)
29225 rtx this_param = x86_this_parameter (function);
29228 /* Make sure unwind info is emitted for the thunk if needed. */
29229 final_start_function (emit_barrier (), file, 1);
29231 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
29232 pull it in now and let DELTA benefit. */
29233 if (REG_P (this_param))
29234 this_reg = this_param;
29235 else if (vcall_offset)
29237 /* Put the this parameter into %eax. */
29238 xops[0] = this_param;
29239 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
29240 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
29243 this_reg = NULL_RTX;
29245 /* Adjust the this parameter by a fixed constant. */
29248 xops[0] = GEN_INT (delta);
29249 xops[1] = this_reg ? this_reg : this_param;
29252 if (!x86_64_general_operand (xops[0], DImode))
29254 tmp = gen_rtx_REG (DImode, R10_REG);
29256 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
29258 xops[1] = this_param;
29260 if (x86_maybe_negate_const_int (&xops[0], DImode))
29261 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
29263 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
29265 else if (x86_maybe_negate_const_int (&xops[0], SImode))
29266 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
29268 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
29271 /* Adjust the this parameter by a value stored in the vtable. */
29275 tmp = gen_rtx_REG (DImode, R10_REG);
29278 int tmp_regno = CX_REG;
29279 if (lookup_attribute ("fastcall",
29280 TYPE_ATTRIBUTES (TREE_TYPE (function)))
29281 || lookup_attribute ("thiscall",
29282 TYPE_ATTRIBUTES (TREE_TYPE (function))))
29283 tmp_regno = AX_REG;
29284 tmp = gen_rtx_REG (SImode, tmp_regno);
29287 xops[0] = gen_rtx_MEM (Pmode, this_reg);
29289 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
29291 /* Adjust the this parameter. */
29292 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
29293 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
29295 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
29296 xops[0] = GEN_INT (vcall_offset);
29298 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
29299 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
29301 xops[1] = this_reg;
29302 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
29305 /* If necessary, drop THIS back to its stack slot. */
29306 if (this_reg && this_reg != this_param)
29308 xops[0] = this_reg;
29309 xops[1] = this_param;
29310 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
29313 xops[0] = XEXP (DECL_RTL (function), 0);
29316 if (!flag_pic || targetm.binds_local_p (function)
29317 || DEFAULT_ABI == MS_ABI)
29318 output_asm_insn ("jmp\t%P0", xops);
29319 /* All thunks should be in the same object as their target,
29320 and thus binds_local_p should be true. */
29321 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
29322 gcc_unreachable ();
29325 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
29326 tmp = gen_rtx_CONST (Pmode, tmp);
29327 tmp = gen_rtx_MEM (QImode, tmp);
29329 output_asm_insn ("jmp\t%A0", xops);
29334 if (!flag_pic || targetm.binds_local_p (function))
29335 output_asm_insn ("jmp\t%P0", xops);
29340 rtx sym_ref = XEXP (DECL_RTL (function), 0);
29341 if (TARGET_MACHO_BRANCH_ISLANDS)
29342 sym_ref = (gen_rtx_SYMBOL_REF
29344 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
29345 tmp = gen_rtx_MEM (QImode, sym_ref);
29347 output_asm_insn ("jmp\t%0", xops);
29350 #endif /* TARGET_MACHO */
29352 tmp = gen_rtx_REG (SImode, CX_REG);
29353 output_set_got (tmp, NULL_RTX);
29356 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
29357 output_asm_insn ("jmp\t{*}%1", xops);
29360 final_end_function ();
29364 x86_file_start (void)
29366 default_file_start ();
29368 darwin_file_start ();
29370 if (X86_FILE_START_VERSION_DIRECTIVE)
29371 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
29372 if (X86_FILE_START_FLTUSED)
29373 fputs ("\t.global\t__fltused\n", asm_out_file);
29374 if (ix86_asm_dialect == ASM_INTEL)
29375 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
29379 x86_field_alignment (tree field, int computed)
29381 enum machine_mode mode;
29382 tree type = TREE_TYPE (field);
29384 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
29386 mode = TYPE_MODE (strip_array_types (type));
29387 if (mode == DFmode || mode == DCmode
29388 || GET_MODE_CLASS (mode) == MODE_INT
29389 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
29390 return MIN (32, computed);
29394 /* Output assembler code to FILE to increment profiler label # LABELNO
29395 for profiling a function entry. */
29397 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
29399 const char *mcount_name = (flag_fentry ? MCOUNT_NAME_BEFORE_PROLOGUE
29404 #ifndef NO_PROFILE_COUNTERS
29405 fprintf (file, "\tleaq\t%sP%d(%%rip),%%r11\n", LPREFIX, labelno);
29408 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
29409 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", mcount_name);
29411 fprintf (file, "\tcall\t%s\n", mcount_name);
29415 #ifndef NO_PROFILE_COUNTERS
29416 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
29419 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", mcount_name);
29423 #ifndef NO_PROFILE_COUNTERS
29424 fprintf (file, "\tmovl\t$%sP%d,%%" PROFILE_COUNT_REGISTER "\n",
29427 fprintf (file, "\tcall\t%s\n", mcount_name);
29431 /* We don't have exact information about the insn sizes, but we may assume
29432 quite safely that we are informed about all 1 byte insns and memory
29433 address sizes. This is enough to eliminate unnecessary padding in
29437 min_insn_size (rtx insn)
29441 if (!INSN_P (insn) || !active_insn_p (insn))
29444 /* Discard alignments we've emit and jump instructions. */
29445 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
29446 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
29448 if (JUMP_TABLE_DATA_P (insn))
29451 /* Important case - calls are always 5 bytes.
29452 It is common to have many calls in the row. */
29454 && symbolic_reference_mentioned_p (PATTERN (insn))
29455 && !SIBLING_CALL_P (insn))
29457 len = get_attr_length (insn);
29461 /* For normal instructions we rely on get_attr_length being exact,
29462 with a few exceptions. */
29463 if (!JUMP_P (insn))
29465 enum attr_type type = get_attr_type (insn);
29470 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
29471 || asm_noperands (PATTERN (insn)) >= 0)
29478 /* Otherwise trust get_attr_length. */
29482 l = get_attr_length_address (insn);
29483 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
29492 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
29494 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
29498 ix86_avoid_jump_mispredicts (void)
29500 rtx insn, start = get_insns ();
29501 int nbytes = 0, njumps = 0;
29504 /* Look for all minimal intervals of instructions containing 4 jumps.
29505 The intervals are bounded by START and INSN. NBYTES is the total
29506 size of instructions in the interval including INSN and not including
29507 START. When the NBYTES is smaller than 16 bytes, it is possible
29508 that the end of START and INSN ends up in the same 16byte page.
29510 The smallest offset in the page INSN can start is the case where START
29511 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
29512 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
29514 for (insn = start; insn; insn = NEXT_INSN (insn))
29518 if (LABEL_P (insn))
29520 int align = label_to_alignment (insn);
29521 int max_skip = label_to_max_skip (insn);
29525 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
29526 already in the current 16 byte page, because otherwise
29527 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
29528 bytes to reach 16 byte boundary. */
29530 || (align <= 3 && max_skip != (1 << align) - 1))
29533 fprintf (dump_file, "Label %i with max_skip %i\n",
29534 INSN_UID (insn), max_skip);
29537 while (nbytes + max_skip >= 16)
29539 start = NEXT_INSN (start);
29540 if ((JUMP_P (start)
29541 && GET_CODE (PATTERN (start)) != ADDR_VEC
29542 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
29544 njumps--, isjump = 1;
29547 nbytes -= min_insn_size (start);
29553 min_size = min_insn_size (insn);
29554 nbytes += min_size;
29556 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
29557 INSN_UID (insn), min_size);
29559 && GET_CODE (PATTERN (insn)) != ADDR_VEC
29560 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
29568 start = NEXT_INSN (start);
29569 if ((JUMP_P (start)
29570 && GET_CODE (PATTERN (start)) != ADDR_VEC
29571 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
29573 njumps--, isjump = 1;
29576 nbytes -= min_insn_size (start);
29578 gcc_assert (njumps >= 0);
29580 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
29581 INSN_UID (start), INSN_UID (insn), nbytes);
29583 if (njumps == 3 && isjump && nbytes < 16)
29585 int padsize = 15 - nbytes + min_insn_size (insn);
29588 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
29589 INSN_UID (insn), padsize);
29590 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
29596 /* AMD Athlon works faster
29597 when RET is not destination of conditional jump or directly preceded
29598 by other jump instruction. We avoid the penalty by inserting NOP just
29599 before the RET instructions in such cases. */
29601 ix86_pad_returns (void)
29606 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
29608 basic_block bb = e->src;
29609 rtx ret = BB_END (bb);
29611 bool replace = false;
29613 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
29614 || optimize_bb_for_size_p (bb))
29616 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
29617 if (active_insn_p (prev) || LABEL_P (prev))
29619 if (prev && LABEL_P (prev))
29624 FOR_EACH_EDGE (e, ei, bb->preds)
29625 if (EDGE_FREQUENCY (e) && e->src->index >= 0
29626 && !(e->flags & EDGE_FALLTHRU))
29631 prev = prev_active_insn (ret);
29633 && ((JUMP_P (prev) && any_condjump_p (prev))
29636 /* Empty functions get branch mispredict even when the jump destination
29637 is not visible to us. */
29638 if (!prev && !optimize_function_for_size_p (cfun))
29643 emit_jump_insn_before (gen_return_internal_long (), ret);
29649 /* Count the minimum number of instructions in BB. Return 4 if the
29650 number of instructions >= 4. */
29653 ix86_count_insn_bb (basic_block bb)
29656 int insn_count = 0;
29658 /* Count number of instructions in this block. Return 4 if the number
29659 of instructions >= 4. */
29660 FOR_BB_INSNS (bb, insn)
29662 /* Only happen in exit blocks. */
29664 && GET_CODE (PATTERN (insn)) == RETURN)
29667 if (NONDEBUG_INSN_P (insn)
29668 && GET_CODE (PATTERN (insn)) != USE
29669 && GET_CODE (PATTERN (insn)) != CLOBBER)
29672 if (insn_count >= 4)
29681 /* Count the minimum number of instructions in code path in BB.
29682 Return 4 if the number of instructions >= 4. */
29685 ix86_count_insn (basic_block bb)
29689 int min_prev_count;
29691 /* Only bother counting instructions along paths with no
29692 more than 2 basic blocks between entry and exit. Given
29693 that BB has an edge to exit, determine if a predecessor
29694 of BB has an edge from entry. If so, compute the number
29695 of instructions in the predecessor block. If there
29696 happen to be multiple such blocks, compute the minimum. */
29697 min_prev_count = 4;
29698 FOR_EACH_EDGE (e, ei, bb->preds)
29701 edge_iterator prev_ei;
29703 if (e->src == ENTRY_BLOCK_PTR)
29705 min_prev_count = 0;
29708 FOR_EACH_EDGE (prev_e, prev_ei, e->src->preds)
29710 if (prev_e->src == ENTRY_BLOCK_PTR)
29712 int count = ix86_count_insn_bb (e->src);
29713 if (count < min_prev_count)
29714 min_prev_count = count;
29720 if (min_prev_count < 4)
29721 min_prev_count += ix86_count_insn_bb (bb);
29723 return min_prev_count;
29726 /* Pad short funtion to 4 instructions. */
29729 ix86_pad_short_function (void)
29734 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
29736 rtx ret = BB_END (e->src);
29737 if (JUMP_P (ret) && GET_CODE (PATTERN (ret)) == RETURN)
29739 int insn_count = ix86_count_insn (e->src);
29741 /* Pad short function. */
29742 if (insn_count < 4)
29746 /* Find epilogue. */
29749 || NOTE_KIND (insn) != NOTE_INSN_EPILOGUE_BEG))
29750 insn = PREV_INSN (insn);
29755 /* Two NOPs are counted as one instruction. */
29756 insn_count = 2 * (4 - insn_count);
29757 emit_insn_before (gen_nops (GEN_INT (insn_count)), insn);
29763 /* Implement machine specific optimizations. We implement padding of returns
29764 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
29768 if (optimize && optimize_function_for_speed_p (cfun))
29770 if (TARGET_PAD_SHORT_FUNCTION)
29771 ix86_pad_short_function ();
29772 else if (TARGET_PAD_RETURNS)
29773 ix86_pad_returns ();
29774 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
29775 if (TARGET_FOUR_JUMP_LIMIT)
29776 ix86_avoid_jump_mispredicts ();
29780 /* Run the vzeroupper optimization if needed. */
29781 if (cfun->machine->use_vzeroupper_p)
29782 move_or_delete_vzeroupper ();
29785 /* Return nonzero when QImode register that must be represented via REX prefix
29788 x86_extended_QIreg_mentioned_p (rtx insn)
29791 extract_insn_cached (insn);
29792 for (i = 0; i < recog_data.n_operands; i++)
29793 if (REG_P (recog_data.operand[i])
29794 && REGNO (recog_data.operand[i]) > BX_REG)
29799 /* Return nonzero when P points to register encoded via REX prefix.
29800 Called via for_each_rtx. */
29802 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
29804 unsigned int regno;
29807 regno = REGNO (*p);
29808 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
29811 /* Return true when INSN mentions register that must be encoded using REX
29814 x86_extended_reg_mentioned_p (rtx insn)
29816 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
29817 extended_reg_mentioned_1, NULL);
29820 /* If profitable, negate (without causing overflow) integer constant
29821 of mode MODE at location LOC. Return true in this case. */
29823 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
29827 if (!CONST_INT_P (*loc))
29833 /* DImode x86_64 constants must fit in 32 bits. */
29834 gcc_assert (x86_64_immediate_operand (*loc, mode));
29845 gcc_unreachable ();
29848 /* Avoid overflows. */
29849 if (mode_signbit_p (mode, *loc))
29852 val = INTVAL (*loc);
29854 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
29855 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
29856 if ((val < 0 && val != -128)
29859 *loc = GEN_INT (-val);
29866 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
29867 optabs would emit if we didn't have TFmode patterns. */
29870 x86_emit_floatuns (rtx operands[2])
29872 rtx neglab, donelab, i0, i1, f0, in, out;
29873 enum machine_mode mode, inmode;
29875 inmode = GET_MODE (operands[1]);
29876 gcc_assert (inmode == SImode || inmode == DImode);
29879 in = force_reg (inmode, operands[1]);
29880 mode = GET_MODE (out);
29881 neglab = gen_label_rtx ();
29882 donelab = gen_label_rtx ();
29883 f0 = gen_reg_rtx (mode);
29885 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
29887 expand_float (out, in, 0);
29889 emit_jump_insn (gen_jump (donelab));
29892 emit_label (neglab);
29894 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
29896 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
29898 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
29900 expand_float (f0, i0, 0);
29902 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
29904 emit_label (donelab);
29907 /* AVX does not support 32-byte integer vector operations,
29908 thus the longest vector we are faced with is V16QImode. */
29909 #define MAX_VECT_LEN 16
29911 struct expand_vec_perm_d
29913 rtx target, op0, op1;
29914 unsigned char perm[MAX_VECT_LEN];
29915 enum machine_mode vmode;
29916 unsigned char nelt;
29920 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
29921 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
29923 /* Get a vector mode of the same size as the original but with elements
29924 twice as wide. This is only guaranteed to apply to integral vectors. */
29926 static inline enum machine_mode
29927 get_mode_wider_vector (enum machine_mode o)
29929 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
29930 enum machine_mode n = GET_MODE_WIDER_MODE (o);
29931 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
29932 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
29936 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
29937 with all elements equal to VAR. Return true if successful. */
29940 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
29941 rtx target, rtx val)
29964 /* First attempt to recognize VAL as-is. */
29965 dup = gen_rtx_VEC_DUPLICATE (mode, val);
29966 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
29967 if (recog_memoized (insn) < 0)
29970 /* If that fails, force VAL into a register. */
29973 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
29974 seq = get_insns ();
29977 emit_insn_before (seq, insn);
29979 ok = recog_memoized (insn) >= 0;
29988 if (TARGET_SSE || TARGET_3DNOW_A)
29992 val = gen_lowpart (SImode, val);
29993 x = gen_rtx_TRUNCATE (HImode, val);
29994 x = gen_rtx_VEC_DUPLICATE (mode, x);
29995 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30008 struct expand_vec_perm_d dperm;
30012 memset (&dperm, 0, sizeof (dperm));
30013 dperm.target = target;
30014 dperm.vmode = mode;
30015 dperm.nelt = GET_MODE_NUNITS (mode);
30016 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
30018 /* Extend to SImode using a paradoxical SUBREG. */
30019 tmp1 = gen_reg_rtx (SImode);
30020 emit_move_insn (tmp1, gen_lowpart (SImode, val));
30022 /* Insert the SImode value as low element of a V4SImode vector. */
30023 tmp2 = gen_lowpart (V4SImode, dperm.op0);
30024 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
30026 ok = (expand_vec_perm_1 (&dperm)
30027 || expand_vec_perm_broadcast_1 (&dperm));
30039 /* Replicate the value once into the next wider mode and recurse. */
30041 enum machine_mode smode, wsmode, wvmode;
30044 smode = GET_MODE_INNER (mode);
30045 wvmode = get_mode_wider_vector (mode);
30046 wsmode = GET_MODE_INNER (wvmode);
30048 val = convert_modes (wsmode, smode, val, true);
30049 x = expand_simple_binop (wsmode, ASHIFT, val,
30050 GEN_INT (GET_MODE_BITSIZE (smode)),
30051 NULL_RTX, 1, OPTAB_LIB_WIDEN);
30052 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
30054 x = gen_lowpart (wvmode, target);
30055 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
30063 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
30064 rtx x = gen_reg_rtx (hvmode);
30066 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
30069 x = gen_rtx_VEC_CONCAT (mode, x, x);
30070 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30079 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
30080 whose ONE_VAR element is VAR, and other elements are zero. Return true
30084 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
30085 rtx target, rtx var, int one_var)
30087 enum machine_mode vsimode;
30090 bool use_vector_set = false;
30095 /* For SSE4.1, we normally use vector set. But if the second
30096 element is zero and inter-unit moves are OK, we use movq
30098 use_vector_set = (TARGET_64BIT
30100 && !(TARGET_INTER_UNIT_MOVES
30106 use_vector_set = TARGET_SSE4_1;
30109 use_vector_set = TARGET_SSE2;
30112 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
30119 use_vector_set = TARGET_AVX;
30122 /* Use ix86_expand_vector_set in 64bit mode only. */
30123 use_vector_set = TARGET_AVX && TARGET_64BIT;
30129 if (use_vector_set)
30131 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
30132 var = force_reg (GET_MODE_INNER (mode), var);
30133 ix86_expand_vector_set (mmx_ok, target, var, one_var);
30149 var = force_reg (GET_MODE_INNER (mode), var);
30150 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
30151 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30156 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
30157 new_target = gen_reg_rtx (mode);
30159 new_target = target;
30160 var = force_reg (GET_MODE_INNER (mode), var);
30161 x = gen_rtx_VEC_DUPLICATE (mode, var);
30162 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
30163 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
30166 /* We need to shuffle the value to the correct position, so
30167 create a new pseudo to store the intermediate result. */
30169 /* With SSE2, we can use the integer shuffle insns. */
30170 if (mode != V4SFmode && TARGET_SSE2)
30172 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
30174 GEN_INT (one_var == 1 ? 0 : 1),
30175 GEN_INT (one_var == 2 ? 0 : 1),
30176 GEN_INT (one_var == 3 ? 0 : 1)));
30177 if (target != new_target)
30178 emit_move_insn (target, new_target);
30182 /* Otherwise convert the intermediate result to V4SFmode and
30183 use the SSE1 shuffle instructions. */
30184 if (mode != V4SFmode)
30186 tmp = gen_reg_rtx (V4SFmode);
30187 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
30192 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
30194 GEN_INT (one_var == 1 ? 0 : 1),
30195 GEN_INT (one_var == 2 ? 0+4 : 1+4),
30196 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
30198 if (mode != V4SFmode)
30199 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
30200 else if (tmp != target)
30201 emit_move_insn (target, tmp);
30203 else if (target != new_target)
30204 emit_move_insn (target, new_target);
30209 vsimode = V4SImode;
30215 vsimode = V2SImode;
30221 /* Zero extend the variable element to SImode and recurse. */
30222 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
30224 x = gen_reg_rtx (vsimode);
30225 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
30227 gcc_unreachable ();
30229 emit_move_insn (target, gen_lowpart (mode, x));
30237 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
30238 consisting of the values in VALS. It is known that all elements
30239 except ONE_VAR are constants. Return true if successful. */
30242 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
30243 rtx target, rtx vals, int one_var)
30245 rtx var = XVECEXP (vals, 0, one_var);
30246 enum machine_mode wmode;
30249 const_vec = copy_rtx (vals);
30250 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
30251 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
30259 /* For the two element vectors, it's just as easy to use
30260 the general case. */
30264 /* Use ix86_expand_vector_set in 64bit mode only. */
30287 /* There's no way to set one QImode entry easily. Combine
30288 the variable value with its adjacent constant value, and
30289 promote to an HImode set. */
30290 x = XVECEXP (vals, 0, one_var ^ 1);
30293 var = convert_modes (HImode, QImode, var, true);
30294 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
30295 NULL_RTX, 1, OPTAB_LIB_WIDEN);
30296 x = GEN_INT (INTVAL (x) & 0xff);
30300 var = convert_modes (HImode, QImode, var, true);
30301 x = gen_int_mode (INTVAL (x) << 8, HImode);
30303 if (x != const0_rtx)
30304 var = expand_simple_binop (HImode, IOR, var, x, var,
30305 1, OPTAB_LIB_WIDEN);
30307 x = gen_reg_rtx (wmode);
30308 emit_move_insn (x, gen_lowpart (wmode, const_vec));
30309 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
30311 emit_move_insn (target, gen_lowpart (mode, x));
30318 emit_move_insn (target, const_vec);
30319 ix86_expand_vector_set (mmx_ok, target, var, one_var);
30323 /* A subroutine of ix86_expand_vector_init_general. Use vector
30324 concatenate to handle the most general case: all values variable,
30325 and none identical. */
30328 ix86_expand_vector_init_concat (enum machine_mode mode,
30329 rtx target, rtx *ops, int n)
30331 enum machine_mode cmode, hmode = VOIDmode;
30332 rtx first[8], second[4];
30372 gcc_unreachable ();
30375 if (!register_operand (ops[1], cmode))
30376 ops[1] = force_reg (cmode, ops[1]);
30377 if (!register_operand (ops[0], cmode))
30378 ops[0] = force_reg (cmode, ops[0]);
30379 emit_insn (gen_rtx_SET (VOIDmode, target,
30380 gen_rtx_VEC_CONCAT (mode, ops[0],
30400 gcc_unreachable ();
30416 gcc_unreachable ();
30421 /* FIXME: We process inputs backward to help RA. PR 36222. */
30424 for (; i > 0; i -= 2, j--)
30426 first[j] = gen_reg_rtx (cmode);
30427 v = gen_rtvec (2, ops[i - 1], ops[i]);
30428 ix86_expand_vector_init (false, first[j],
30429 gen_rtx_PARALLEL (cmode, v));
30435 gcc_assert (hmode != VOIDmode);
30436 for (i = j = 0; i < n; i += 2, j++)
30438 second[j] = gen_reg_rtx (hmode);
30439 ix86_expand_vector_init_concat (hmode, second [j],
30443 ix86_expand_vector_init_concat (mode, target, second, n);
30446 ix86_expand_vector_init_concat (mode, target, first, n);
30450 gcc_unreachable ();
30454 /* A subroutine of ix86_expand_vector_init_general. Use vector
30455 interleave to handle the most general case: all values variable,
30456 and none identical. */
30459 ix86_expand_vector_init_interleave (enum machine_mode mode,
30460 rtx target, rtx *ops, int n)
30462 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
30465 rtx (*gen_load_even) (rtx, rtx, rtx);
30466 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
30467 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
30472 gen_load_even = gen_vec_setv8hi;
30473 gen_interleave_first_low = gen_vec_interleave_lowv4si;
30474 gen_interleave_second_low = gen_vec_interleave_lowv2di;
30475 inner_mode = HImode;
30476 first_imode = V4SImode;
30477 second_imode = V2DImode;
30478 third_imode = VOIDmode;
30481 gen_load_even = gen_vec_setv16qi;
30482 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
30483 gen_interleave_second_low = gen_vec_interleave_lowv4si;
30484 inner_mode = QImode;
30485 first_imode = V8HImode;
30486 second_imode = V4SImode;
30487 third_imode = V2DImode;
30490 gcc_unreachable ();
30493 for (i = 0; i < n; i++)
30495 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
30496 op0 = gen_reg_rtx (SImode);
30497 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
30499 /* Insert the SImode value as low element of V4SImode vector. */
30500 op1 = gen_reg_rtx (V4SImode);
30501 op0 = gen_rtx_VEC_MERGE (V4SImode,
30502 gen_rtx_VEC_DUPLICATE (V4SImode,
30504 CONST0_RTX (V4SImode),
30506 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
30508 /* Cast the V4SImode vector back to a vector in orignal mode. */
30509 op0 = gen_reg_rtx (mode);
30510 emit_move_insn (op0, gen_lowpart (mode, op1));
30512 /* Load even elements into the second positon. */
30513 emit_insn (gen_load_even (op0,
30514 force_reg (inner_mode,
30518 /* Cast vector to FIRST_IMODE vector. */
30519 ops[i] = gen_reg_rtx (first_imode);
30520 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
30523 /* Interleave low FIRST_IMODE vectors. */
30524 for (i = j = 0; i < n; i += 2, j++)
30526 op0 = gen_reg_rtx (first_imode);
30527 emit_insn (gen_interleave_first_low (op0, ops[i], ops[i + 1]));
30529 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
30530 ops[j] = gen_reg_rtx (second_imode);
30531 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
30534 /* Interleave low SECOND_IMODE vectors. */
30535 switch (second_imode)
30538 for (i = j = 0; i < n / 2; i += 2, j++)
30540 op0 = gen_reg_rtx (second_imode);
30541 emit_insn (gen_interleave_second_low (op0, ops[i],
30544 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
30546 ops[j] = gen_reg_rtx (third_imode);
30547 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
30549 second_imode = V2DImode;
30550 gen_interleave_second_low = gen_vec_interleave_lowv2di;
30554 op0 = gen_reg_rtx (second_imode);
30555 emit_insn (gen_interleave_second_low (op0, ops[0],
30558 /* Cast the SECOND_IMODE vector back to a vector on original
30560 emit_insn (gen_rtx_SET (VOIDmode, target,
30561 gen_lowpart (mode, op0)));
30565 gcc_unreachable ();
30569 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
30570 all values variable, and none identical. */
30573 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
30574 rtx target, rtx vals)
30576 rtx ops[32], op0, op1;
30577 enum machine_mode half_mode = VOIDmode;
30584 if (!mmx_ok && !TARGET_SSE)
30596 n = GET_MODE_NUNITS (mode);
30597 for (i = 0; i < n; i++)
30598 ops[i] = XVECEXP (vals, 0, i);
30599 ix86_expand_vector_init_concat (mode, target, ops, n);
30603 half_mode = V16QImode;
30607 half_mode = V8HImode;
30611 n = GET_MODE_NUNITS (mode);
30612 for (i = 0; i < n; i++)
30613 ops[i] = XVECEXP (vals, 0, i);
30614 op0 = gen_reg_rtx (half_mode);
30615 op1 = gen_reg_rtx (half_mode);
30616 ix86_expand_vector_init_interleave (half_mode, op0, ops,
30618 ix86_expand_vector_init_interleave (half_mode, op1,
30619 &ops [n >> 1], n >> 2);
30620 emit_insn (gen_rtx_SET (VOIDmode, target,
30621 gen_rtx_VEC_CONCAT (mode, op0, op1)));
30625 if (!TARGET_SSE4_1)
30633 /* Don't use ix86_expand_vector_init_interleave if we can't
30634 move from GPR to SSE register directly. */
30635 if (!TARGET_INTER_UNIT_MOVES)
30638 n = GET_MODE_NUNITS (mode);
30639 for (i = 0; i < n; i++)
30640 ops[i] = XVECEXP (vals, 0, i);
30641 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
30649 gcc_unreachable ();
30653 int i, j, n_elts, n_words, n_elt_per_word;
30654 enum machine_mode inner_mode;
30655 rtx words[4], shift;
30657 inner_mode = GET_MODE_INNER (mode);
30658 n_elts = GET_MODE_NUNITS (mode);
30659 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
30660 n_elt_per_word = n_elts / n_words;
30661 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
30663 for (i = 0; i < n_words; ++i)
30665 rtx word = NULL_RTX;
30667 for (j = 0; j < n_elt_per_word; ++j)
30669 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
30670 elt = convert_modes (word_mode, inner_mode, elt, true);
30676 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
30677 word, 1, OPTAB_LIB_WIDEN);
30678 word = expand_simple_binop (word_mode, IOR, word, elt,
30679 word, 1, OPTAB_LIB_WIDEN);
30687 emit_move_insn (target, gen_lowpart (mode, words[0]));
30688 else if (n_words == 2)
30690 rtx tmp = gen_reg_rtx (mode);
30691 emit_clobber (tmp);
30692 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
30693 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
30694 emit_move_insn (target, tmp);
30696 else if (n_words == 4)
30698 rtx tmp = gen_reg_rtx (V4SImode);
30699 gcc_assert (word_mode == SImode);
30700 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
30701 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
30702 emit_move_insn (target, gen_lowpart (mode, tmp));
30705 gcc_unreachable ();
30709 /* Initialize vector TARGET via VALS. Suppress the use of MMX
30710 instructions unless MMX_OK is true. */
30713 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
30715 enum machine_mode mode = GET_MODE (target);
30716 enum machine_mode inner_mode = GET_MODE_INNER (mode);
30717 int n_elts = GET_MODE_NUNITS (mode);
30718 int n_var = 0, one_var = -1;
30719 bool all_same = true, all_const_zero = true;
30723 for (i = 0; i < n_elts; ++i)
30725 x = XVECEXP (vals, 0, i);
30726 if (!(CONST_INT_P (x)
30727 || GET_CODE (x) == CONST_DOUBLE
30728 || GET_CODE (x) == CONST_FIXED))
30729 n_var++, one_var = i;
30730 else if (x != CONST0_RTX (inner_mode))
30731 all_const_zero = false;
30732 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
30736 /* Constants are best loaded from the constant pool. */
30739 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
30743 /* If all values are identical, broadcast the value. */
30745 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
30746 XVECEXP (vals, 0, 0)))
30749 /* Values where only one field is non-constant are best loaded from
30750 the pool and overwritten via move later. */
30754 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
30755 XVECEXP (vals, 0, one_var),
30759 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
30763 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
30767 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
30769 enum machine_mode mode = GET_MODE (target);
30770 enum machine_mode inner_mode = GET_MODE_INNER (mode);
30771 enum machine_mode half_mode;
30772 bool use_vec_merge = false;
30774 static rtx (*gen_extract[6][2]) (rtx, rtx)
30776 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
30777 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
30778 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
30779 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
30780 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
30781 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
30783 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
30785 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
30786 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
30787 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
30788 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
30789 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
30790 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
30800 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
30801 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
30803 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
30805 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
30806 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
30812 use_vec_merge = TARGET_SSE4_1;
30820 /* For the two element vectors, we implement a VEC_CONCAT with
30821 the extraction of the other element. */
30823 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
30824 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
30827 op0 = val, op1 = tmp;
30829 op0 = tmp, op1 = val;
30831 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
30832 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
30837 use_vec_merge = TARGET_SSE4_1;
30844 use_vec_merge = true;
30848 /* tmp = target = A B C D */
30849 tmp = copy_to_reg (target);
30850 /* target = A A B B */
30851 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
30852 /* target = X A B B */
30853 ix86_expand_vector_set (false, target, val, 0);
30854 /* target = A X C D */
30855 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
30856 const1_rtx, const0_rtx,
30857 GEN_INT (2+4), GEN_INT (3+4)));
30861 /* tmp = target = A B C D */
30862 tmp = copy_to_reg (target);
30863 /* tmp = X B C D */
30864 ix86_expand_vector_set (false, tmp, val, 0);
30865 /* target = A B X D */
30866 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
30867 const0_rtx, const1_rtx,
30868 GEN_INT (0+4), GEN_INT (3+4)));
30872 /* tmp = target = A B C D */
30873 tmp = copy_to_reg (target);
30874 /* tmp = X B C D */
30875 ix86_expand_vector_set (false, tmp, val, 0);
30876 /* target = A B X D */
30877 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
30878 const0_rtx, const1_rtx,
30879 GEN_INT (2+4), GEN_INT (0+4)));
30883 gcc_unreachable ();
30888 use_vec_merge = TARGET_SSE4_1;
30892 /* Element 0 handled by vec_merge below. */
30895 use_vec_merge = true;
30901 /* With SSE2, use integer shuffles to swap element 0 and ELT,
30902 store into element 0, then shuffle them back. */
30906 order[0] = GEN_INT (elt);
30907 order[1] = const1_rtx;
30908 order[2] = const2_rtx;
30909 order[3] = GEN_INT (3);
30910 order[elt] = const0_rtx;
30912 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
30913 order[1], order[2], order[3]));
30915 ix86_expand_vector_set (false, target, val, 0);
30917 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
30918 order[1], order[2], order[3]));
30922 /* For SSE1, we have to reuse the V4SF code. */
30923 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
30924 gen_lowpart (SFmode, val), elt);
30929 use_vec_merge = TARGET_SSE2;
30932 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
30936 use_vec_merge = TARGET_SSE4_1;
30943 half_mode = V16QImode;
30949 half_mode = V8HImode;
30955 half_mode = V4SImode;
30961 half_mode = V2DImode;
30967 half_mode = V4SFmode;
30973 half_mode = V2DFmode;
30979 /* Compute offset. */
30983 gcc_assert (i <= 1);
30985 /* Extract the half. */
30986 tmp = gen_reg_rtx (half_mode);
30987 emit_insn (gen_extract[j][i] (tmp, target));
30989 /* Put val in tmp at elt. */
30990 ix86_expand_vector_set (false, tmp, val, elt);
30993 emit_insn (gen_insert[j][i] (target, target, tmp));
31002 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
31003 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
31004 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31008 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
31010 emit_move_insn (mem, target);
31012 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
31013 emit_move_insn (tmp, val);
31015 emit_move_insn (target, mem);
31020 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
31022 enum machine_mode mode = GET_MODE (vec);
31023 enum machine_mode inner_mode = GET_MODE_INNER (mode);
31024 bool use_vec_extr = false;
31037 use_vec_extr = true;
31041 use_vec_extr = TARGET_SSE4_1;
31053 tmp = gen_reg_rtx (mode);
31054 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
31055 GEN_INT (elt), GEN_INT (elt),
31056 GEN_INT (elt+4), GEN_INT (elt+4)));
31060 tmp = gen_reg_rtx (mode);
31061 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
31065 gcc_unreachable ();
31068 use_vec_extr = true;
31073 use_vec_extr = TARGET_SSE4_1;
31087 tmp = gen_reg_rtx (mode);
31088 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
31089 GEN_INT (elt), GEN_INT (elt),
31090 GEN_INT (elt), GEN_INT (elt)));
31094 tmp = gen_reg_rtx (mode);
31095 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
31099 gcc_unreachable ();
31102 use_vec_extr = true;
31107 /* For SSE1, we have to reuse the V4SF code. */
31108 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
31109 gen_lowpart (V4SFmode, vec), elt);
31115 use_vec_extr = TARGET_SSE2;
31118 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
31122 use_vec_extr = TARGET_SSE4_1;
31126 /* ??? Could extract the appropriate HImode element and shift. */
31133 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
31134 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
31136 /* Let the rtl optimizers know about the zero extension performed. */
31137 if (inner_mode == QImode || inner_mode == HImode)
31139 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
31140 target = gen_lowpart (SImode, target);
31143 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31147 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
31149 emit_move_insn (mem, vec);
31151 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
31152 emit_move_insn (target, tmp);
31156 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
31157 pattern to reduce; DEST is the destination; IN is the input vector. */
31160 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
31162 rtx tmp1, tmp2, tmp3;
31164 tmp1 = gen_reg_rtx (V4SFmode);
31165 tmp2 = gen_reg_rtx (V4SFmode);
31166 tmp3 = gen_reg_rtx (V4SFmode);
31168 emit_insn (gen_sse_movhlps (tmp1, in, in));
31169 emit_insn (fn (tmp2, tmp1, in));
31171 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
31172 const1_rtx, const1_rtx,
31173 GEN_INT (1+4), GEN_INT (1+4)));
31174 emit_insn (fn (dest, tmp2, tmp3));
31177 /* Target hook for scalar_mode_supported_p. */
31179 ix86_scalar_mode_supported_p (enum machine_mode mode)
31181 if (DECIMAL_FLOAT_MODE_P (mode))
31182 return default_decimal_float_supported_p ();
31183 else if (mode == TFmode)
31186 return default_scalar_mode_supported_p (mode);
31189 /* Implements target hook vector_mode_supported_p. */
31191 ix86_vector_mode_supported_p (enum machine_mode mode)
31193 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
31195 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
31197 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
31199 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
31201 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
31206 /* Target hook for c_mode_for_suffix. */
31207 static enum machine_mode
31208 ix86_c_mode_for_suffix (char suffix)
31218 /* Worker function for TARGET_MD_ASM_CLOBBERS.
31220 We do this in the new i386 backend to maintain source compatibility
31221 with the old cc0-based compiler. */
31224 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
31225 tree inputs ATTRIBUTE_UNUSED,
31228 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
31230 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
31235 /* Implements target vector targetm.asm.encode_section_info. This
31236 is not used by netware. */
31238 static void ATTRIBUTE_UNUSED
31239 ix86_encode_section_info (tree decl, rtx rtl, int first)
31241 default_encode_section_info (decl, rtl, first);
31243 if (TREE_CODE (decl) == VAR_DECL
31244 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
31245 && ix86_in_large_data_p (decl))
31246 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
31249 /* Worker function for REVERSE_CONDITION. */
31252 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
31254 return (mode != CCFPmode && mode != CCFPUmode
31255 ? reverse_condition (code)
31256 : reverse_condition_maybe_unordered (code));
31259 /* Output code to perform an x87 FP register move, from OPERANDS[1]
31263 output_387_reg_move (rtx insn, rtx *operands)
31265 if (REG_P (operands[0]))
31267 if (REG_P (operands[1])
31268 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
31270 if (REGNO (operands[0]) == FIRST_STACK_REG)
31271 return output_387_ffreep (operands, 0);
31272 return "fstp\t%y0";
31274 if (STACK_TOP_P (operands[0]))
31275 return "fld%Z1\t%y1";
31278 else if (MEM_P (operands[0]))
31280 gcc_assert (REG_P (operands[1]));
31281 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
31282 return "fstp%Z0\t%y0";
31285 /* There is no non-popping store to memory for XFmode.
31286 So if we need one, follow the store with a load. */
31287 if (GET_MODE (operands[0]) == XFmode)
31288 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
31290 return "fst%Z0\t%y0";
31297 /* Output code to perform a conditional jump to LABEL, if C2 flag in
31298 FP status register is set. */
31301 ix86_emit_fp_unordered_jump (rtx label)
31303 rtx reg = gen_reg_rtx (HImode);
31306 emit_insn (gen_x86_fnstsw_1 (reg));
31308 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
31310 emit_insn (gen_x86_sahf_1 (reg));
31312 temp = gen_rtx_REG (CCmode, FLAGS_REG);
31313 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
31317 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
31319 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
31320 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
31323 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
31324 gen_rtx_LABEL_REF (VOIDmode, label),
31326 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
31328 emit_jump_insn (temp);
31329 predict_jump (REG_BR_PROB_BASE * 10 / 100);
31332 /* Output code to perform a log1p XFmode calculation. */
31334 void ix86_emit_i387_log1p (rtx op0, rtx op1)
31336 rtx label1 = gen_label_rtx ();
31337 rtx label2 = gen_label_rtx ();
31339 rtx tmp = gen_reg_rtx (XFmode);
31340 rtx tmp2 = gen_reg_rtx (XFmode);
31343 emit_insn (gen_absxf2 (tmp, op1));
31344 test = gen_rtx_GE (VOIDmode, tmp,
31345 CONST_DOUBLE_FROM_REAL_VALUE (
31346 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
31348 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
31350 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
31351 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
31352 emit_jump (label2);
31354 emit_label (label1);
31355 emit_move_insn (tmp, CONST1_RTX (XFmode));
31356 emit_insn (gen_addxf3 (tmp, op1, tmp));
31357 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
31358 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
31360 emit_label (label2);
31363 /* Output code to perform a Newton-Rhapson approximation of a single precision
31364 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
31366 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
31368 rtx x0, x1, e0, e1, two;
31370 x0 = gen_reg_rtx (mode);
31371 e0 = gen_reg_rtx (mode);
31372 e1 = gen_reg_rtx (mode);
31373 x1 = gen_reg_rtx (mode);
31375 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
31377 if (VECTOR_MODE_P (mode))
31378 two = ix86_build_const_vector (mode, true, two);
31380 two = force_reg (mode, two);
31382 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
31384 /* x0 = rcp(b) estimate */
31385 emit_insn (gen_rtx_SET (VOIDmode, x0,
31386 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
31389 emit_insn (gen_rtx_SET (VOIDmode, e0,
31390 gen_rtx_MULT (mode, x0, a)));
31392 emit_insn (gen_rtx_SET (VOIDmode, e1,
31393 gen_rtx_MULT (mode, x0, b)));
31395 emit_insn (gen_rtx_SET (VOIDmode, x1,
31396 gen_rtx_MINUS (mode, two, e1)));
31397 /* res = e0 * x1 */
31398 emit_insn (gen_rtx_SET (VOIDmode, res,
31399 gen_rtx_MULT (mode, e0, x1)));
31402 /* Output code to perform a Newton-Rhapson approximation of a
31403 single precision floating point [reciprocal] square root. */
31405 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
31408 rtx x0, e0, e1, e2, e3, mthree, mhalf;
31411 x0 = gen_reg_rtx (mode);
31412 e0 = gen_reg_rtx (mode);
31413 e1 = gen_reg_rtx (mode);
31414 e2 = gen_reg_rtx (mode);
31415 e3 = gen_reg_rtx (mode);
31417 real_from_integer (&r, VOIDmode, -3, -1, 0);
31418 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
31420 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
31421 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
31423 if (VECTOR_MODE_P (mode))
31425 mthree = ix86_build_const_vector (mode, true, mthree);
31426 mhalf = ix86_build_const_vector (mode, true, mhalf);
31429 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
31430 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
31432 /* x0 = rsqrt(a) estimate */
31433 emit_insn (gen_rtx_SET (VOIDmode, x0,
31434 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
31437 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
31442 zero = gen_reg_rtx (mode);
31443 mask = gen_reg_rtx (mode);
31445 zero = force_reg (mode, CONST0_RTX(mode));
31446 emit_insn (gen_rtx_SET (VOIDmode, mask,
31447 gen_rtx_NE (mode, zero, a)));
31449 emit_insn (gen_rtx_SET (VOIDmode, x0,
31450 gen_rtx_AND (mode, x0, mask)));
31454 emit_insn (gen_rtx_SET (VOIDmode, e0,
31455 gen_rtx_MULT (mode, x0, a)));
31457 emit_insn (gen_rtx_SET (VOIDmode, e1,
31458 gen_rtx_MULT (mode, e0, x0)));
31461 mthree = force_reg (mode, mthree);
31462 emit_insn (gen_rtx_SET (VOIDmode, e2,
31463 gen_rtx_PLUS (mode, e1, mthree)));
31465 mhalf = force_reg (mode, mhalf);
31467 /* e3 = -.5 * x0 */
31468 emit_insn (gen_rtx_SET (VOIDmode, e3,
31469 gen_rtx_MULT (mode, x0, mhalf)));
31471 /* e3 = -.5 * e0 */
31472 emit_insn (gen_rtx_SET (VOIDmode, e3,
31473 gen_rtx_MULT (mode, e0, mhalf)));
31474 /* ret = e2 * e3 */
31475 emit_insn (gen_rtx_SET (VOIDmode, res,
31476 gen_rtx_MULT (mode, e2, e3)));
31479 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
31481 static void ATTRIBUTE_UNUSED
31482 i386_solaris_elf_named_section (const char *name, unsigned int flags,
31485 /* With Binutils 2.15, the "@unwind" marker must be specified on
31486 every occurrence of the ".eh_frame" section, not just the first
31489 && strcmp (name, ".eh_frame") == 0)
31491 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
31492 flags & SECTION_WRITE ? "aw" : "a");
31495 default_elf_asm_named_section (name, flags, decl);
31498 /* Return the mangling of TYPE if it is an extended fundamental type. */
31500 static const char *
31501 ix86_mangle_type (const_tree type)
31503 type = TYPE_MAIN_VARIANT (type);
31505 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
31506 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
31509 switch (TYPE_MODE (type))
31512 /* __float128 is "g". */
31515 /* "long double" or __float80 is "e". */
31522 /* For 32-bit code we can save PIC register setup by using
31523 __stack_chk_fail_local hidden function instead of calling
31524 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
31525 register, so it is better to call __stack_chk_fail directly. */
31528 ix86_stack_protect_fail (void)
31530 return TARGET_64BIT
31531 ? default_external_stack_protect_fail ()
31532 : default_hidden_stack_protect_fail ();
31535 /* Select a format to encode pointers in exception handling data. CODE
31536 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
31537 true if the symbol may be affected by dynamic relocations.
31539 ??? All x86 object file formats are capable of representing this.
31540 After all, the relocation needed is the same as for the call insn.
31541 Whether or not a particular assembler allows us to enter such, I
31542 guess we'll have to see. */
31544 asm_preferred_eh_data_format (int code, int global)
31548 int type = DW_EH_PE_sdata8;
31550 || ix86_cmodel == CM_SMALL_PIC
31551 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
31552 type = DW_EH_PE_sdata4;
31553 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
31555 if (ix86_cmodel == CM_SMALL
31556 || (ix86_cmodel == CM_MEDIUM && code))
31557 return DW_EH_PE_udata4;
31558 return DW_EH_PE_absptr;
31561 /* Expand copysign from SIGN to the positive value ABS_VALUE
31562 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
31565 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
31567 enum machine_mode mode = GET_MODE (sign);
31568 rtx sgn = gen_reg_rtx (mode);
31569 if (mask == NULL_RTX)
31571 enum machine_mode vmode;
31573 if (mode == SFmode)
31575 else if (mode == DFmode)
31580 mask = ix86_build_signbit_mask (vmode, VECTOR_MODE_P (mode), false);
31581 if (!VECTOR_MODE_P (mode))
31583 /* We need to generate a scalar mode mask in this case. */
31584 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
31585 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
31586 mask = gen_reg_rtx (mode);
31587 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
31591 mask = gen_rtx_NOT (mode, mask);
31592 emit_insn (gen_rtx_SET (VOIDmode, sgn,
31593 gen_rtx_AND (mode, mask, sign)));
31594 emit_insn (gen_rtx_SET (VOIDmode, result,
31595 gen_rtx_IOR (mode, abs_value, sgn)));
31598 /* Expand fabs (OP0) and return a new rtx that holds the result. The
31599 mask for masking out the sign-bit is stored in *SMASK, if that is
31602 ix86_expand_sse_fabs (rtx op0, rtx *smask)
31604 enum machine_mode vmode, mode = GET_MODE (op0);
31607 xa = gen_reg_rtx (mode);
31608 if (mode == SFmode)
31610 else if (mode == DFmode)
31614 mask = ix86_build_signbit_mask (vmode, VECTOR_MODE_P (mode), true);
31615 if (!VECTOR_MODE_P (mode))
31617 /* We need to generate a scalar mode mask in this case. */
31618 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
31619 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
31620 mask = gen_reg_rtx (mode);
31621 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
31623 emit_insn (gen_rtx_SET (VOIDmode, xa,
31624 gen_rtx_AND (mode, op0, mask)));
31632 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
31633 swapping the operands if SWAP_OPERANDS is true. The expanded
31634 code is a forward jump to a newly created label in case the
31635 comparison is true. The generated label rtx is returned. */
31637 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
31638 bool swap_operands)
31649 label = gen_label_rtx ();
31650 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
31651 emit_insn (gen_rtx_SET (VOIDmode, tmp,
31652 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
31653 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
31654 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
31655 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
31656 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
31657 JUMP_LABEL (tmp) = label;
31662 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
31663 using comparison code CODE. Operands are swapped for the comparison if
31664 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
31666 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
31667 bool swap_operands)
31669 enum machine_mode mode = GET_MODE (op0);
31670 rtx mask = gen_reg_rtx (mode);
31679 if (mode == DFmode)
31680 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
31681 gen_rtx_fmt_ee (code, mode, op0, op1)));
31683 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
31684 gen_rtx_fmt_ee (code, mode, op0, op1)));
31689 /* Generate and return a rtx of mode MODE for 2**n where n is the number
31690 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
31692 ix86_gen_TWO52 (enum machine_mode mode)
31694 REAL_VALUE_TYPE TWO52r;
31697 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
31698 TWO52 = const_double_from_real_value (TWO52r, mode);
31699 TWO52 = force_reg (mode, TWO52);
31704 /* Expand SSE sequence for computing lround from OP1 storing
31707 ix86_expand_lround (rtx op0, rtx op1)
31709 /* C code for the stuff we're doing below:
31710 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
31713 enum machine_mode mode = GET_MODE (op1);
31714 const struct real_format *fmt;
31715 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
31718 /* load nextafter (0.5, 0.0) */
31719 fmt = REAL_MODE_FORMAT (mode);
31720 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
31721 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
31723 /* adj = copysign (0.5, op1) */
31724 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
31725 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
31727 /* adj = op1 + adj */
31728 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
31730 /* op0 = (imode)adj */
31731 expand_fix (op0, adj, 0);
31734 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
31737 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
31739 /* C code for the stuff we're doing below (for do_floor):
31741 xi -= (double)xi > op1 ? 1 : 0;
31744 enum machine_mode fmode = GET_MODE (op1);
31745 enum machine_mode imode = GET_MODE (op0);
31746 rtx ireg, freg, label, tmp;
31748 /* reg = (long)op1 */
31749 ireg = gen_reg_rtx (imode);
31750 expand_fix (ireg, op1, 0);
31752 /* freg = (double)reg */
31753 freg = gen_reg_rtx (fmode);
31754 expand_float (freg, ireg, 0);
31756 /* ireg = (freg > op1) ? ireg - 1 : ireg */
31757 label = ix86_expand_sse_compare_and_jump (UNLE,
31758 freg, op1, !do_floor);
31759 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
31760 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
31761 emit_move_insn (ireg, tmp);
31763 emit_label (label);
31764 LABEL_NUSES (label) = 1;
31766 emit_move_insn (op0, ireg);
31769 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
31770 result in OPERAND0. */
31772 ix86_expand_rint (rtx operand0, rtx operand1)
31774 /* C code for the stuff we're doing below:
31775 xa = fabs (operand1);
31776 if (!isless (xa, 2**52))
31778 xa = xa + 2**52 - 2**52;
31779 return copysign (xa, operand1);
31781 enum machine_mode mode = GET_MODE (operand0);
31782 rtx res, xa, label, TWO52, mask;
31784 res = gen_reg_rtx (mode);
31785 emit_move_insn (res, operand1);
31787 /* xa = abs (operand1) */
31788 xa = ix86_expand_sse_fabs (res, &mask);
31790 /* if (!isless (xa, TWO52)) goto label; */
31791 TWO52 = ix86_gen_TWO52 (mode);
31792 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
31794 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
31795 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
31797 ix86_sse_copysign_to_positive (res, xa, res, mask);
31799 emit_label (label);
31800 LABEL_NUSES (label) = 1;
31802 emit_move_insn (operand0, res);
31805 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
31808 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
31810 /* C code for the stuff we expand below.
31811 double xa = fabs (x), x2;
31812 if (!isless (xa, TWO52))
31814 xa = xa + TWO52 - TWO52;
31815 x2 = copysign (xa, x);
31824 enum machine_mode mode = GET_MODE (operand0);
31825 rtx xa, TWO52, tmp, label, one, res, mask;
31827 TWO52 = ix86_gen_TWO52 (mode);
31829 /* Temporary for holding the result, initialized to the input
31830 operand to ease control flow. */
31831 res = gen_reg_rtx (mode);
31832 emit_move_insn (res, operand1);
31834 /* xa = abs (operand1) */
31835 xa = ix86_expand_sse_fabs (res, &mask);
31837 /* if (!isless (xa, TWO52)) goto label; */
31838 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
31840 /* xa = xa + TWO52 - TWO52; */
31841 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
31842 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
31844 /* xa = copysign (xa, operand1) */
31845 ix86_sse_copysign_to_positive (xa, xa, res, mask);
31847 /* generate 1.0 or -1.0 */
31848 one = force_reg (mode,
31849 const_double_from_real_value (do_floor
31850 ? dconst1 : dconstm1, mode));
31852 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
31853 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
31854 emit_insn (gen_rtx_SET (VOIDmode, tmp,
31855 gen_rtx_AND (mode, one, tmp)));
31856 /* We always need to subtract here to preserve signed zero. */
31857 tmp = expand_simple_binop (mode, MINUS,
31858 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
31859 emit_move_insn (res, tmp);
31861 emit_label (label);
31862 LABEL_NUSES (label) = 1;
31864 emit_move_insn (operand0, res);
31867 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
31870 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
31872 /* C code for the stuff we expand below.
31873 double xa = fabs (x), x2;
31874 if (!isless (xa, TWO52))
31876 x2 = (double)(long)x;
31883 if (HONOR_SIGNED_ZEROS (mode))
31884 return copysign (x2, x);
31887 enum machine_mode mode = GET_MODE (operand0);
31888 rtx xa, xi, TWO52, tmp, label, one, res, mask;
31890 TWO52 = ix86_gen_TWO52 (mode);
31892 /* Temporary for holding the result, initialized to the input
31893 operand to ease control flow. */
31894 res = gen_reg_rtx (mode);
31895 emit_move_insn (res, operand1);
31897 /* xa = abs (operand1) */
31898 xa = ix86_expand_sse_fabs (res, &mask);
31900 /* if (!isless (xa, TWO52)) goto label; */
31901 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
31903 /* xa = (double)(long)x */
31904 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
31905 expand_fix (xi, res, 0);
31906 expand_float (xa, xi, 0);
31909 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
31911 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
31912 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
31913 emit_insn (gen_rtx_SET (VOIDmode, tmp,
31914 gen_rtx_AND (mode, one, tmp)));
31915 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
31916 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
31917 emit_move_insn (res, tmp);
31919 if (HONOR_SIGNED_ZEROS (mode))
31920 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
31922 emit_label (label);
31923 LABEL_NUSES (label) = 1;
31925 emit_move_insn (operand0, res);
31928 /* Expand SSE sequence for computing round from OPERAND1 storing
31929 into OPERAND0. Sequence that works without relying on DImode truncation
31930 via cvttsd2siq that is only available on 64bit targets. */
31932 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
31934 /* C code for the stuff we expand below.
31935 double xa = fabs (x), xa2, x2;
31936 if (!isless (xa, TWO52))
31938 Using the absolute value and copying back sign makes
31939 -0.0 -> -0.0 correct.
31940 xa2 = xa + TWO52 - TWO52;
31945 else if (dxa > 0.5)
31947 x2 = copysign (xa2, x);
31950 enum machine_mode mode = GET_MODE (operand0);
31951 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
31953 TWO52 = ix86_gen_TWO52 (mode);
31955 /* Temporary for holding the result, initialized to the input
31956 operand to ease control flow. */
31957 res = gen_reg_rtx (mode);
31958 emit_move_insn (res, operand1);
31960 /* xa = abs (operand1) */
31961 xa = ix86_expand_sse_fabs (res, &mask);
31963 /* if (!isless (xa, TWO52)) goto label; */
31964 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
31966 /* xa2 = xa + TWO52 - TWO52; */
31967 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
31968 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
31970 /* dxa = xa2 - xa; */
31971 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
31973 /* generate 0.5, 1.0 and -0.5 */
31974 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
31975 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
31976 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
31980 tmp = gen_reg_rtx (mode);
31981 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
31982 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
31983 emit_insn (gen_rtx_SET (VOIDmode, tmp,
31984 gen_rtx_AND (mode, one, tmp)));
31985 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
31986 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
31987 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
31988 emit_insn (gen_rtx_SET (VOIDmode, tmp,
31989 gen_rtx_AND (mode, one, tmp)));
31990 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
31992 /* res = copysign (xa2, operand1) */
31993 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
31995 emit_label (label);
31996 LABEL_NUSES (label) = 1;
31998 emit_move_insn (operand0, res);
32001 /* Expand SSE sequence for computing trunc from OPERAND1 storing
32004 ix86_expand_trunc (rtx operand0, rtx operand1)
32006 /* C code for SSE variant we expand below.
32007 double xa = fabs (x), x2;
32008 if (!isless (xa, TWO52))
32010 x2 = (double)(long)x;
32011 if (HONOR_SIGNED_ZEROS (mode))
32012 return copysign (x2, x);
32015 enum machine_mode mode = GET_MODE (operand0);
32016 rtx xa, xi, TWO52, label, res, mask;
32018 TWO52 = ix86_gen_TWO52 (mode);
32020 /* Temporary for holding the result, initialized to the input
32021 operand to ease control flow. */
32022 res = gen_reg_rtx (mode);
32023 emit_move_insn (res, operand1);
32025 /* xa = abs (operand1) */
32026 xa = ix86_expand_sse_fabs (res, &mask);
32028 /* if (!isless (xa, TWO52)) goto label; */
32029 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32031 /* x = (double)(long)x */
32032 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
32033 expand_fix (xi, res, 0);
32034 expand_float (res, xi, 0);
32036 if (HONOR_SIGNED_ZEROS (mode))
32037 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
32039 emit_label (label);
32040 LABEL_NUSES (label) = 1;
32042 emit_move_insn (operand0, res);
32045 /* Expand SSE sequence for computing trunc from OPERAND1 storing
32048 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
32050 enum machine_mode mode = GET_MODE (operand0);
32051 rtx xa, mask, TWO52, label, one, res, smask, tmp;
32053 /* C code for SSE variant we expand below.
32054 double xa = fabs (x), x2;
32055 if (!isless (xa, TWO52))
32057 xa2 = xa + TWO52 - TWO52;
32061 x2 = copysign (xa2, x);
32065 TWO52 = ix86_gen_TWO52 (mode);
32067 /* Temporary for holding the result, initialized to the input
32068 operand to ease control flow. */
32069 res = gen_reg_rtx (mode);
32070 emit_move_insn (res, operand1);
32072 /* xa = abs (operand1) */
32073 xa = ix86_expand_sse_fabs (res, &smask);
32075 /* if (!isless (xa, TWO52)) goto label; */
32076 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32078 /* res = xa + TWO52 - TWO52; */
32079 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32080 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
32081 emit_move_insn (res, tmp);
32084 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
32086 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
32087 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
32088 emit_insn (gen_rtx_SET (VOIDmode, mask,
32089 gen_rtx_AND (mode, mask, one)));
32090 tmp = expand_simple_binop (mode, MINUS,
32091 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
32092 emit_move_insn (res, tmp);
32094 /* res = copysign (res, operand1) */
32095 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
32097 emit_label (label);
32098 LABEL_NUSES (label) = 1;
32100 emit_move_insn (operand0, res);
32103 /* Expand SSE sequence for computing round from OPERAND1 storing
32106 ix86_expand_round (rtx operand0, rtx operand1)
32108 /* C code for the stuff we're doing below:
32109 double xa = fabs (x);
32110 if (!isless (xa, TWO52))
32112 xa = (double)(long)(xa + nextafter (0.5, 0.0));
32113 return copysign (xa, x);
32115 enum machine_mode mode = GET_MODE (operand0);
32116 rtx res, TWO52, xa, label, xi, half, mask;
32117 const struct real_format *fmt;
32118 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
32120 /* Temporary for holding the result, initialized to the input
32121 operand to ease control flow. */
32122 res = gen_reg_rtx (mode);
32123 emit_move_insn (res, operand1);
32125 TWO52 = ix86_gen_TWO52 (mode);
32126 xa = ix86_expand_sse_fabs (res, &mask);
32127 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32129 /* load nextafter (0.5, 0.0) */
32130 fmt = REAL_MODE_FORMAT (mode);
32131 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
32132 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
32134 /* xa = xa + 0.5 */
32135 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
32136 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
32138 /* xa = (double)(int64_t)xa */
32139 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
32140 expand_fix (xi, xa, 0);
32141 expand_float (xa, xi, 0);
32143 /* res = copysign (xa, operand1) */
32144 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
32146 emit_label (label);
32147 LABEL_NUSES (label) = 1;
32149 emit_move_insn (operand0, res);
32153 /* Table of valid machine attributes. */
32154 static const struct attribute_spec ix86_attribute_table[] =
32156 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
32157 /* Stdcall attribute says callee is responsible for popping arguments
32158 if they are not variable. */
32159 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
32160 /* Fastcall attribute says callee is responsible for popping arguments
32161 if they are not variable. */
32162 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
32163 /* Thiscall attribute says callee is responsible for popping arguments
32164 if they are not variable. */
32165 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
32166 /* Cdecl attribute says the callee is a normal C declaration */
32167 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
32168 /* Regparm attribute specifies how many integer arguments are to be
32169 passed in registers. */
32170 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
32171 /* Sseregparm attribute says we are using x86_64 calling conventions
32172 for FP arguments. */
32173 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
32174 /* force_align_arg_pointer says this function realigns the stack at entry. */
32175 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
32176 false, true, true, ix86_handle_cconv_attribute },
32177 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
32178 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
32179 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
32180 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
32182 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
32183 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
32184 #ifdef SUBTARGET_ATTRIBUTE_TABLE
32185 SUBTARGET_ATTRIBUTE_TABLE,
32187 /* ms_abi and sysv_abi calling convention function attributes. */
32188 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
32189 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
32190 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute },
32192 { NULL, 0, 0, false, false, false, NULL }
32195 /* Implement targetm.vectorize.builtin_vectorization_cost. */
32197 ix86_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
32198 tree vectype ATTRIBUTE_UNUSED,
32199 int misalign ATTRIBUTE_UNUSED)
32201 switch (type_of_cost)
32204 return ix86_cost->scalar_stmt_cost;
32207 return ix86_cost->scalar_load_cost;
32210 return ix86_cost->scalar_store_cost;
32213 return ix86_cost->vec_stmt_cost;
32216 return ix86_cost->vec_align_load_cost;
32219 return ix86_cost->vec_store_cost;
32221 case vec_to_scalar:
32222 return ix86_cost->vec_to_scalar_cost;
32224 case scalar_to_vec:
32225 return ix86_cost->scalar_to_vec_cost;
32227 case unaligned_load:
32228 case unaligned_store:
32229 return ix86_cost->vec_unalign_load_cost;
32231 case cond_branch_taken:
32232 return ix86_cost->cond_taken_branch_cost;
32234 case cond_branch_not_taken:
32235 return ix86_cost->cond_not_taken_branch_cost;
32241 gcc_unreachable ();
32246 /* Implement targetm.vectorize.builtin_vec_perm. */
32249 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
32251 tree itype = TREE_TYPE (vec_type);
32252 bool u = TYPE_UNSIGNED (itype);
32253 enum machine_mode vmode = TYPE_MODE (vec_type);
32254 enum ix86_builtins fcode;
32255 bool ok = TARGET_SSE2;
32261 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
32264 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
32266 itype = ix86_get_builtin_type (IX86_BT_DI);
32271 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
32275 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
32277 itype = ix86_get_builtin_type (IX86_BT_SI);
32281 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
32284 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
32287 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
32290 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
32300 *mask_type = itype;
32301 return ix86_builtins[(int) fcode];
32304 /* Return a vector mode with twice as many elements as VMODE. */
32305 /* ??? Consider moving this to a table generated by genmodes.c. */
32307 static enum machine_mode
32308 doublesize_vector_mode (enum machine_mode vmode)
32312 case V2SFmode: return V4SFmode;
32313 case V1DImode: return V2DImode;
32314 case V2SImode: return V4SImode;
32315 case V4HImode: return V8HImode;
32316 case V8QImode: return V16QImode;
32318 case V2DFmode: return V4DFmode;
32319 case V4SFmode: return V8SFmode;
32320 case V2DImode: return V4DImode;
32321 case V4SImode: return V8SImode;
32322 case V8HImode: return V16HImode;
32323 case V16QImode: return V32QImode;
32325 case V4DFmode: return V8DFmode;
32326 case V8SFmode: return V16SFmode;
32327 case V4DImode: return V8DImode;
32328 case V8SImode: return V16SImode;
32329 case V16HImode: return V32HImode;
32330 case V32QImode: return V64QImode;
32333 gcc_unreachable ();
32337 /* Construct (set target (vec_select op0 (parallel perm))) and
32338 return true if that's a valid instruction in the active ISA. */
32341 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
32343 rtx rperm[MAX_VECT_LEN], x;
32346 for (i = 0; i < nelt; ++i)
32347 rperm[i] = GEN_INT (perm[i]);
32349 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
32350 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
32351 x = gen_rtx_SET (VOIDmode, target, x);
32354 if (recog_memoized (x) < 0)
32362 /* Similar, but generate a vec_concat from op0 and op1 as well. */
32365 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
32366 const unsigned char *perm, unsigned nelt)
32368 enum machine_mode v2mode;
32371 v2mode = doublesize_vector_mode (GET_MODE (op0));
32372 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
32373 return expand_vselect (target, x, perm, nelt);
32376 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
32377 in terms of blendp[sd] / pblendw / pblendvb. */
32380 expand_vec_perm_blend (struct expand_vec_perm_d *d)
32382 enum machine_mode vmode = d->vmode;
32383 unsigned i, mask, nelt = d->nelt;
32384 rtx target, op0, op1, x;
32386 if (!TARGET_SSE4_1 || d->op0 == d->op1)
32388 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
32391 /* This is a blend, not a permute. Elements must stay in their
32392 respective lanes. */
32393 for (i = 0; i < nelt; ++i)
32395 unsigned e = d->perm[i];
32396 if (!(e == i || e == i + nelt))
32403 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
32404 decision should be extracted elsewhere, so that we only try that
32405 sequence once all budget==3 options have been tried. */
32407 /* For bytes, see if bytes move in pairs so we can use pblendw with
32408 an immediate argument, rather than pblendvb with a vector argument. */
32409 if (vmode == V16QImode)
32411 bool pblendw_ok = true;
32412 for (i = 0; i < 16 && pblendw_ok; i += 2)
32413 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
32417 rtx rperm[16], vperm;
32419 for (i = 0; i < nelt; ++i)
32420 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
32422 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
32423 vperm = force_reg (V16QImode, vperm);
32425 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
32430 target = d->target;
32442 for (i = 0; i < nelt; ++i)
32443 mask |= (d->perm[i] >= nelt) << i;
32447 for (i = 0; i < 2; ++i)
32448 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
32452 for (i = 0; i < 4; ++i)
32453 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
32457 for (i = 0; i < 8; ++i)
32458 mask |= (d->perm[i * 2] >= 16) << i;
32462 target = gen_lowpart (vmode, target);
32463 op0 = gen_lowpart (vmode, op0);
32464 op1 = gen_lowpart (vmode, op1);
32468 gcc_unreachable ();
32471 /* This matches five different patterns with the different modes. */
32472 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
32473 x = gen_rtx_SET (VOIDmode, target, x);
32479 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
32480 in terms of the variable form of vpermilps.
32482 Note that we will have already failed the immediate input vpermilps,
32483 which requires that the high and low part shuffle be identical; the
32484 variable form doesn't require that. */
32487 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
32489 rtx rperm[8], vperm;
32492 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
32495 /* We can only permute within the 128-bit lane. */
32496 for (i = 0; i < 8; ++i)
32498 unsigned e = d->perm[i];
32499 if (i < 4 ? e >= 4 : e < 4)
32506 for (i = 0; i < 8; ++i)
32508 unsigned e = d->perm[i];
32510 /* Within each 128-bit lane, the elements of op0 are numbered
32511 from 0 and the elements of op1 are numbered from 4. */
32517 rperm[i] = GEN_INT (e);
32520 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
32521 vperm = force_reg (V8SImode, vperm);
32522 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
32527 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
32528 in terms of pshufb or vpperm. */
32531 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
32533 unsigned i, nelt, eltsz;
32534 rtx rperm[16], vperm, target, op0, op1;
32536 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
32538 if (GET_MODE_SIZE (d->vmode) != 16)
32545 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
32547 for (i = 0; i < nelt; ++i)
32549 unsigned j, e = d->perm[i];
32550 for (j = 0; j < eltsz; ++j)
32551 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
32554 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
32555 vperm = force_reg (V16QImode, vperm);
32557 target = gen_lowpart (V16QImode, d->target);
32558 op0 = gen_lowpart (V16QImode, d->op0);
32559 if (d->op0 == d->op1)
32560 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
32563 op1 = gen_lowpart (V16QImode, d->op1);
32564 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
32570 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
32571 in a single instruction. */
32574 expand_vec_perm_1 (struct expand_vec_perm_d *d)
32576 unsigned i, nelt = d->nelt;
32577 unsigned char perm2[MAX_VECT_LEN];
32579 /* Check plain VEC_SELECT first, because AVX has instructions that could
32580 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
32581 input where SEL+CONCAT may not. */
32582 if (d->op0 == d->op1)
32584 int mask = nelt - 1;
32586 for (i = 0; i < nelt; i++)
32587 perm2[i] = d->perm[i] & mask;
32589 if (expand_vselect (d->target, d->op0, perm2, nelt))
32592 /* There are plenty of patterns in sse.md that are written for
32593 SEL+CONCAT and are not replicated for a single op. Perhaps
32594 that should be changed, to avoid the nastiness here. */
32596 /* Recognize interleave style patterns, which means incrementing
32597 every other permutation operand. */
32598 for (i = 0; i < nelt; i += 2)
32600 perm2[i] = d->perm[i] & mask;
32601 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
32603 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
32606 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
32609 for (i = 0; i < nelt; i += 4)
32611 perm2[i + 0] = d->perm[i + 0] & mask;
32612 perm2[i + 1] = d->perm[i + 1] & mask;
32613 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
32614 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
32617 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
32622 /* Finally, try the fully general two operand permute. */
32623 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
32626 /* Recognize interleave style patterns with reversed operands. */
32627 if (d->op0 != d->op1)
32629 for (i = 0; i < nelt; ++i)
32631 unsigned e = d->perm[i];
32639 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
32643 /* Try the SSE4.1 blend variable merge instructions. */
32644 if (expand_vec_perm_blend (d))
32647 /* Try one of the AVX vpermil variable permutations. */
32648 if (expand_vec_perm_vpermil (d))
32651 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
32652 if (expand_vec_perm_pshufb (d))
32658 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
32659 in terms of a pair of pshuflw + pshufhw instructions. */
32662 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
32664 unsigned char perm2[MAX_VECT_LEN];
32668 if (d->vmode != V8HImode || d->op0 != d->op1)
32671 /* The two permutations only operate in 64-bit lanes. */
32672 for (i = 0; i < 4; ++i)
32673 if (d->perm[i] >= 4)
32675 for (i = 4; i < 8; ++i)
32676 if (d->perm[i] < 4)
32682 /* Emit the pshuflw. */
32683 memcpy (perm2, d->perm, 4);
32684 for (i = 4; i < 8; ++i)
32686 ok = expand_vselect (d->target, d->op0, perm2, 8);
32689 /* Emit the pshufhw. */
32690 memcpy (perm2 + 4, d->perm + 4, 4);
32691 for (i = 0; i < 4; ++i)
32693 ok = expand_vselect (d->target, d->target, perm2, 8);
32699 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
32700 the permutation using the SSSE3 palignr instruction. This succeeds
32701 when all of the elements in PERM fit within one vector and we merely
32702 need to shift them down so that a single vector permutation has a
32703 chance to succeed. */
32706 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
32708 unsigned i, nelt = d->nelt;
32713 /* Even with AVX, palignr only operates on 128-bit vectors. */
32714 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
32717 min = nelt, max = 0;
32718 for (i = 0; i < nelt; ++i)
32720 unsigned e = d->perm[i];
32726 if (min == 0 || max - min >= nelt)
32729 /* Given that we have SSSE3, we know we'll be able to implement the
32730 single operand permutation after the palignr with pshufb. */
32734 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
32735 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
32736 gen_lowpart (TImode, d->op1),
32737 gen_lowpart (TImode, d->op0), shift));
32739 d->op0 = d->op1 = d->target;
32742 for (i = 0; i < nelt; ++i)
32744 unsigned e = d->perm[i] - min;
32750 /* Test for the degenerate case where the alignment by itself
32751 produces the desired permutation. */
32755 ok = expand_vec_perm_1 (d);
32761 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
32762 a two vector permutation into a single vector permutation by using
32763 an interleave operation to merge the vectors. */
32766 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
32768 struct expand_vec_perm_d dremap, dfinal;
32769 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
32770 unsigned contents, h1, h2, h3, h4;
32771 unsigned char remap[2 * MAX_VECT_LEN];
32775 if (d->op0 == d->op1)
32778 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
32779 lanes. We can use similar techniques with the vperm2f128 instruction,
32780 but it requires slightly different logic. */
32781 if (GET_MODE_SIZE (d->vmode) != 16)
32784 /* Examine from whence the elements come. */
32786 for (i = 0; i < nelt; ++i)
32787 contents |= 1u << d->perm[i];
32789 /* Split the two input vectors into 4 halves. */
32790 h1 = (1u << nelt2) - 1;
32795 memset (remap, 0xff, sizeof (remap));
32798 /* If the elements from the low halves use interleave low, and similarly
32799 for interleave high. If the elements are from mis-matched halves, we
32800 can use shufps for V4SF/V4SI or do a DImode shuffle. */
32801 if ((contents & (h1 | h3)) == contents)
32803 for (i = 0; i < nelt2; ++i)
32806 remap[i + nelt] = i * 2 + 1;
32807 dremap.perm[i * 2] = i;
32808 dremap.perm[i * 2 + 1] = i + nelt;
32811 else if ((contents & (h2 | h4)) == contents)
32813 for (i = 0; i < nelt2; ++i)
32815 remap[i + nelt2] = i * 2;
32816 remap[i + nelt + nelt2] = i * 2 + 1;
32817 dremap.perm[i * 2] = i + nelt2;
32818 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
32821 else if ((contents & (h1 | h4)) == contents)
32823 for (i = 0; i < nelt2; ++i)
32826 remap[i + nelt + nelt2] = i + nelt2;
32827 dremap.perm[i] = i;
32828 dremap.perm[i + nelt2] = i + nelt + nelt2;
32832 dremap.vmode = V2DImode;
32834 dremap.perm[0] = 0;
32835 dremap.perm[1] = 3;
32838 else if ((contents & (h2 | h3)) == contents)
32840 for (i = 0; i < nelt2; ++i)
32842 remap[i + nelt2] = i;
32843 remap[i + nelt] = i + nelt2;
32844 dremap.perm[i] = i + nelt2;
32845 dremap.perm[i + nelt2] = i + nelt;
32849 dremap.vmode = V2DImode;
32851 dremap.perm[0] = 1;
32852 dremap.perm[1] = 2;
32858 /* Use the remapping array set up above to move the elements from their
32859 swizzled locations into their final destinations. */
32861 for (i = 0; i < nelt; ++i)
32863 unsigned e = remap[d->perm[i]];
32864 gcc_assert (e < nelt);
32865 dfinal.perm[i] = e;
32867 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
32868 dfinal.op1 = dfinal.op0;
32869 dremap.target = dfinal.op0;
32871 /* Test if the final remap can be done with a single insn. For V4SFmode or
32872 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
32874 ok = expand_vec_perm_1 (&dfinal);
32875 seq = get_insns ();
32881 if (dremap.vmode != dfinal.vmode)
32883 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
32884 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
32885 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
32888 ok = expand_vec_perm_1 (&dremap);
32895 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
32896 permutation with two pshufb insns and an ior. We should have already
32897 failed all two instruction sequences. */
32900 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
32902 rtx rperm[2][16], vperm, l, h, op, m128;
32903 unsigned int i, nelt, eltsz;
32905 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
32907 gcc_assert (d->op0 != d->op1);
32910 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
32912 /* Generate two permutation masks. If the required element is within
32913 the given vector it is shuffled into the proper lane. If the required
32914 element is in the other vector, force a zero into the lane by setting
32915 bit 7 in the permutation mask. */
32916 m128 = GEN_INT (-128);
32917 for (i = 0; i < nelt; ++i)
32919 unsigned j, e = d->perm[i];
32920 unsigned which = (e >= nelt);
32924 for (j = 0; j < eltsz; ++j)
32926 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
32927 rperm[1-which][i*eltsz + j] = m128;
32931 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
32932 vperm = force_reg (V16QImode, vperm);
32934 l = gen_reg_rtx (V16QImode);
32935 op = gen_lowpart (V16QImode, d->op0);
32936 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
32938 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
32939 vperm = force_reg (V16QImode, vperm);
32941 h = gen_reg_rtx (V16QImode);
32942 op = gen_lowpart (V16QImode, d->op1);
32943 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
32945 op = gen_lowpart (V16QImode, d->target);
32946 emit_insn (gen_iorv16qi3 (op, l, h));
32951 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
32952 and extract-odd permutations. */
32955 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
32962 t1 = gen_reg_rtx (V4DFmode);
32963 t2 = gen_reg_rtx (V4DFmode);
32965 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
32966 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
32967 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
32969 /* Now an unpck[lh]pd will produce the result required. */
32971 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
32973 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
32979 int mask = odd ? 0xdd : 0x88;
32981 t1 = gen_reg_rtx (V8SFmode);
32982 t2 = gen_reg_rtx (V8SFmode);
32983 t3 = gen_reg_rtx (V8SFmode);
32985 /* Shuffle within the 128-bit lanes to produce:
32986 { 0 2 8 a 4 6 c e } | { 1 3 9 b 5 7 d f }. */
32987 emit_insn (gen_avx_shufps256 (t1, d->op0, d->op1,
32990 /* Shuffle the lanes around to produce:
32991 { 4 6 c e 0 2 8 a } and { 5 7 d f 1 3 9 b }. */
32992 emit_insn (gen_avx_vperm2f128v8sf3 (t2, t1, t1,
32995 /* Shuffle within the 128-bit lanes to produce:
32996 { 0 2 4 6 4 6 0 2 } | { 1 3 5 7 5 7 1 3 }. */
32997 emit_insn (gen_avx_shufps256 (t3, t1, t2, GEN_INT (0x44)));
32999 /* Shuffle within the 128-bit lanes to produce:
33000 { 8 a c e c e 8 a } | { 9 b d f d f 9 b }. */
33001 emit_insn (gen_avx_shufps256 (t2, t1, t2, GEN_INT (0xee)));
33003 /* Shuffle the lanes around to produce:
33004 { 0 2 4 6 8 a c e } | { 1 3 5 7 9 b d f }. */
33005 emit_insn (gen_avx_vperm2f128v8sf3 (d->target, t3, t2,
33014 /* These are always directly implementable by expand_vec_perm_1. */
33015 gcc_unreachable ();
33019 return expand_vec_perm_pshufb2 (d);
33022 /* We need 2*log2(N)-1 operations to achieve odd/even
33023 with interleave. */
33024 t1 = gen_reg_rtx (V8HImode);
33025 t2 = gen_reg_rtx (V8HImode);
33026 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
33027 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
33028 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
33029 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
33031 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
33033 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
33040 return expand_vec_perm_pshufb2 (d);
33043 t1 = gen_reg_rtx (V16QImode);
33044 t2 = gen_reg_rtx (V16QImode);
33045 t3 = gen_reg_rtx (V16QImode);
33046 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
33047 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
33048 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
33049 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
33050 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
33051 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
33053 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
33055 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
33061 gcc_unreachable ();
33067 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
33068 extract-even and extract-odd permutations. */
33071 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
33073 unsigned i, odd, nelt = d->nelt;
33076 if (odd != 0 && odd != 1)
33079 for (i = 1; i < nelt; ++i)
33080 if (d->perm[i] != 2 * i + odd)
33083 return expand_vec_perm_even_odd_1 (d, odd);
33086 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
33087 permutations. We assume that expand_vec_perm_1 has already failed. */
33090 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
33092 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
33093 enum machine_mode vmode = d->vmode;
33094 unsigned char perm2[4];
33102 /* These are special-cased in sse.md so that we can optionally
33103 use the vbroadcast instruction. They expand to two insns
33104 if the input happens to be in a register. */
33105 gcc_unreachable ();
33111 /* These are always implementable using standard shuffle patterns. */
33112 gcc_unreachable ();
33116 /* These can be implemented via interleave. We save one insn by
33117 stopping once we have promoted to V4SImode and then use pshufd. */
33120 optab otab = vec_interleave_low_optab;
33124 otab = vec_interleave_high_optab;
33129 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
33130 vmode = get_mode_wider_vector (vmode);
33131 op0 = gen_lowpart (vmode, op0);
33133 while (vmode != V4SImode);
33135 memset (perm2, elt, 4);
33136 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
33141 gcc_unreachable ();
33145 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
33146 broadcast permutations. */
33149 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
33151 unsigned i, elt, nelt = d->nelt;
33153 if (d->op0 != d->op1)
33157 for (i = 1; i < nelt; ++i)
33158 if (d->perm[i] != elt)
33161 return expand_vec_perm_broadcast_1 (d);
33164 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
33165 With all of the interface bits taken care of, perform the expansion
33166 in D and return true on success. */
33169 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
33171 /* Try a single instruction expansion. */
33172 if (expand_vec_perm_1 (d))
33175 /* Try sequences of two instructions. */
33177 if (expand_vec_perm_pshuflw_pshufhw (d))
33180 if (expand_vec_perm_palignr (d))
33183 if (expand_vec_perm_interleave2 (d))
33186 if (expand_vec_perm_broadcast (d))
33189 /* Try sequences of three instructions. */
33191 if (expand_vec_perm_pshufb2 (d))
33194 /* ??? Look for narrow permutations whose element orderings would
33195 allow the promotion to a wider mode. */
33197 /* ??? Look for sequences of interleave or a wider permute that place
33198 the data into the correct lanes for a half-vector shuffle like
33199 pshuf[lh]w or vpermilps. */
33201 /* ??? Look for sequences of interleave that produce the desired results.
33202 The combinatorics of punpck[lh] get pretty ugly... */
33204 if (expand_vec_perm_even_odd (d))
33210 /* Extract the values from the vector CST into the permutation array in D.
33211 Return 0 on error, 1 if all values from the permutation come from the
33212 first vector, 2 if all values from the second vector, and 3 otherwise. */
33215 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
33217 tree list = TREE_VECTOR_CST_ELTS (cst);
33218 unsigned i, nelt = d->nelt;
33221 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
33223 unsigned HOST_WIDE_INT e;
33225 if (!host_integerp (TREE_VALUE (list), 1))
33227 e = tree_low_cst (TREE_VALUE (list), 1);
33231 ret |= (e < nelt ? 1 : 2);
33234 gcc_assert (list == NULL);
33236 /* For all elements from second vector, fold the elements to first. */
33238 for (i = 0; i < nelt; ++i)
33239 d->perm[i] -= nelt;
33245 ix86_expand_vec_perm_builtin (tree exp)
33247 struct expand_vec_perm_d d;
33248 tree arg0, arg1, arg2;
33250 arg0 = CALL_EXPR_ARG (exp, 0);
33251 arg1 = CALL_EXPR_ARG (exp, 1);
33252 arg2 = CALL_EXPR_ARG (exp, 2);
33254 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
33255 d.nelt = GET_MODE_NUNITS (d.vmode);
33256 d.testing_p = false;
33257 gcc_assert (VECTOR_MODE_P (d.vmode));
33259 if (TREE_CODE (arg2) != VECTOR_CST)
33261 error_at (EXPR_LOCATION (exp),
33262 "vector permutation requires vector constant");
33266 switch (extract_vec_perm_cst (&d, arg2))
33272 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
33276 if (!operand_equal_p (arg0, arg1, 0))
33278 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
33279 d.op0 = force_reg (d.vmode, d.op0);
33280 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
33281 d.op1 = force_reg (d.vmode, d.op1);
33285 /* The elements of PERM do not suggest that only the first operand
33286 is used, but both operands are identical. Allow easier matching
33287 of the permutation by folding the permutation into the single
33290 unsigned i, nelt = d.nelt;
33291 for (i = 0; i < nelt; ++i)
33292 if (d.perm[i] >= nelt)
33298 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
33299 d.op0 = force_reg (d.vmode, d.op0);
33304 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
33305 d.op0 = force_reg (d.vmode, d.op0);
33310 d.target = gen_reg_rtx (d.vmode);
33311 if (ix86_expand_vec_perm_builtin_1 (&d))
33314 /* For compiler generated permutations, we should never got here, because
33315 the compiler should also be checking the ok hook. But since this is a
33316 builtin the user has access too, so don't abort. */
33320 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
33323 sorry ("vector permutation (%d %d %d %d)",
33324 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
33327 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
33328 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
33329 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
33332 sorry ("vector permutation "
33333 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
33334 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
33335 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
33336 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
33337 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
33340 gcc_unreachable ();
33343 return CONST0_RTX (d.vmode);
33346 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
33349 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
33351 struct expand_vec_perm_d d;
33355 d.vmode = TYPE_MODE (vec_type);
33356 d.nelt = GET_MODE_NUNITS (d.vmode);
33357 d.testing_p = true;
33359 /* Given sufficient ISA support we can just return true here
33360 for selected vector modes. */
33361 if (GET_MODE_SIZE (d.vmode) == 16)
33363 /* All implementable with a single vpperm insn. */
33366 /* All implementable with 2 pshufb + 1 ior. */
33369 /* All implementable with shufpd or unpck[lh]pd. */
33374 vec_mask = extract_vec_perm_cst (&d, mask);
33376 /* This hook is cannot be called in response to something that the
33377 user does (unlike the builtin expander) so we shouldn't ever see
33378 an error generated from the extract. */
33379 gcc_assert (vec_mask > 0 && vec_mask <= 3);
33380 one_vec = (vec_mask != 3);
33382 /* Implementable with shufps or pshufd. */
33383 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
33386 /* Otherwise we have to go through the motions and see if we can
33387 figure out how to generate the requested permutation. */
33388 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
33389 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
33391 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
33394 ret = ix86_expand_vec_perm_builtin_1 (&d);
33401 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
33403 struct expand_vec_perm_d d;
33409 d.vmode = GET_MODE (targ);
33410 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
33411 d.testing_p = false;
33413 for (i = 0; i < nelt; ++i)
33414 d.perm[i] = i * 2 + odd;
33416 /* We'll either be able to implement the permutation directly... */
33417 if (expand_vec_perm_1 (&d))
33420 /* ... or we use the special-case patterns. */
33421 expand_vec_perm_even_odd_1 (&d, odd);
33424 /* This function returns the calling abi specific va_list type node.
33425 It returns the FNDECL specific va_list type. */
33428 ix86_fn_abi_va_list (tree fndecl)
33431 return va_list_type_node;
33432 gcc_assert (fndecl != NULL_TREE);
33434 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
33435 return ms_va_list_type_node;
33437 return sysv_va_list_type_node;
33440 /* Returns the canonical va_list type specified by TYPE. If there
33441 is no valid TYPE provided, it return NULL_TREE. */
33444 ix86_canonical_va_list_type (tree type)
33448 /* Resolve references and pointers to va_list type. */
33449 if (TREE_CODE (type) == MEM_REF)
33450 type = TREE_TYPE (type);
33451 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
33452 type = TREE_TYPE (type);
33453 else if (POINTER_TYPE_P (type) && TREE_CODE (TREE_TYPE (type)) == ARRAY_TYPE)
33454 type = TREE_TYPE (type);
33458 wtype = va_list_type_node;
33459 gcc_assert (wtype != NULL_TREE);
33461 if (TREE_CODE (wtype) == ARRAY_TYPE)
33463 /* If va_list is an array type, the argument may have decayed
33464 to a pointer type, e.g. by being passed to another function.
33465 In that case, unwrap both types so that we can compare the
33466 underlying records. */
33467 if (TREE_CODE (htype) == ARRAY_TYPE
33468 || POINTER_TYPE_P (htype))
33470 wtype = TREE_TYPE (wtype);
33471 htype = TREE_TYPE (htype);
33474 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
33475 return va_list_type_node;
33476 wtype = sysv_va_list_type_node;
33477 gcc_assert (wtype != NULL_TREE);
33479 if (TREE_CODE (wtype) == ARRAY_TYPE)
33481 /* If va_list is an array type, the argument may have decayed
33482 to a pointer type, e.g. by being passed to another function.
33483 In that case, unwrap both types so that we can compare the
33484 underlying records. */
33485 if (TREE_CODE (htype) == ARRAY_TYPE
33486 || POINTER_TYPE_P (htype))
33488 wtype = TREE_TYPE (wtype);
33489 htype = TREE_TYPE (htype);
33492 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
33493 return sysv_va_list_type_node;
33494 wtype = ms_va_list_type_node;
33495 gcc_assert (wtype != NULL_TREE);
33497 if (TREE_CODE (wtype) == ARRAY_TYPE)
33499 /* If va_list is an array type, the argument may have decayed
33500 to a pointer type, e.g. by being passed to another function.
33501 In that case, unwrap both types so that we can compare the
33502 underlying records. */
33503 if (TREE_CODE (htype) == ARRAY_TYPE
33504 || POINTER_TYPE_P (htype))
33506 wtype = TREE_TYPE (wtype);
33507 htype = TREE_TYPE (htype);
33510 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
33511 return ms_va_list_type_node;
33514 return std_canonical_va_list_type (type);
33517 /* Iterate through the target-specific builtin types for va_list.
33518 IDX denotes the iterator, *PTREE is set to the result type of
33519 the va_list builtin, and *PNAME to its internal type.
33520 Returns zero if there is no element for this index, otherwise
33521 IDX should be increased upon the next call.
33522 Note, do not iterate a base builtin's name like __builtin_va_list.
33523 Used from c_common_nodes_and_builtins. */
33526 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
33536 *ptree = ms_va_list_type_node;
33537 *pname = "__builtin_ms_va_list";
33541 *ptree = sysv_va_list_type_node;
33542 *pname = "__builtin_sysv_va_list";
33550 #undef TARGET_SCHED_DISPATCH
33551 #define TARGET_SCHED_DISPATCH has_dispatch
33552 #undef TARGET_SCHED_DISPATCH_DO
33553 #define TARGET_SCHED_DISPATCH_DO do_dispatch
33555 /* The size of the dispatch window is the total number of bytes of
33556 object code allowed in a window. */
33557 #define DISPATCH_WINDOW_SIZE 16
33559 /* Number of dispatch windows considered for scheduling. */
33560 #define MAX_DISPATCH_WINDOWS 3
33562 /* Maximum number of instructions in a window. */
33565 /* Maximum number of immediate operands in a window. */
33568 /* Maximum number of immediate bits allowed in a window. */
33569 #define MAX_IMM_SIZE 128
33571 /* Maximum number of 32 bit immediates allowed in a window. */
33572 #define MAX_IMM_32 4
33574 /* Maximum number of 64 bit immediates allowed in a window. */
33575 #define MAX_IMM_64 2
33577 /* Maximum total of loads or prefetches allowed in a window. */
33580 /* Maximum total of stores allowed in a window. */
33581 #define MAX_STORE 1
33587 /* Dispatch groups. Istructions that affect the mix in a dispatch window. */
33588 enum dispatch_group {
33603 /* Number of allowable groups in a dispatch window. It is an array
33604 indexed by dispatch_group enum. 100 is used as a big number,
33605 because the number of these kind of operations does not have any
33606 effect in dispatch window, but we need them for other reasons in
33608 static unsigned int num_allowable_groups[disp_last] = {
33609 0, 2, 1, 1, 2, 4, 4, 2, 1, BIG, BIG
33612 char group_name[disp_last + 1][16] = {
33613 "disp_no_group", "disp_load", "disp_store", "disp_load_store",
33614 "disp_prefetch", "disp_imm", "disp_imm_32", "disp_imm_64",
33615 "disp_branch", "disp_cmp", "disp_jcc", "disp_last"
33618 /* Instruction path. */
33621 path_single, /* Single micro op. */
33622 path_double, /* Double micro op. */
33623 path_multi, /* Instructions with more than 2 micro op.. */
33627 /* sched_insn_info defines a window to the instructions scheduled in
33628 the basic block. It contains a pointer to the insn_info table and
33629 the instruction scheduled.
33631 Windows are allocated for each basic block and are linked
33633 typedef struct sched_insn_info_s {
33635 enum dispatch_group group;
33636 enum insn_path path;
33641 /* Linked list of dispatch windows. This is a two way list of
33642 dispatch windows of a basic block. It contains information about
33643 the number of uops in the window and the total number of
33644 instructions and of bytes in the object code for this dispatch
33646 typedef struct dispatch_windows_s {
33647 int num_insn; /* Number of insn in the window. */
33648 int num_uops; /* Number of uops in the window. */
33649 int window_size; /* Number of bytes in the window. */
33650 int window_num; /* Window number between 0 or 1. */
33651 int num_imm; /* Number of immediates in an insn. */
33652 int num_imm_32; /* Number of 32 bit immediates in an insn. */
33653 int num_imm_64; /* Number of 64 bit immediates in an insn. */
33654 int imm_size; /* Total immediates in the window. */
33655 int num_loads; /* Total memory loads in the window. */
33656 int num_stores; /* Total memory stores in the window. */
33657 int violation; /* Violation exists in window. */
33658 sched_insn_info *window; /* Pointer to the window. */
33659 struct dispatch_windows_s *next;
33660 struct dispatch_windows_s *prev;
33661 } dispatch_windows;
33663 /* Immediate valuse used in an insn. */
33664 typedef struct imm_info_s
33671 static dispatch_windows *dispatch_window_list;
33672 static dispatch_windows *dispatch_window_list1;
33674 /* Get dispatch group of insn. */
33676 static enum dispatch_group
33677 get_mem_group (rtx insn)
33679 enum attr_memory memory;
33681 if (INSN_CODE (insn) < 0)
33682 return disp_no_group;
33683 memory = get_attr_memory (insn);
33684 if (memory == MEMORY_STORE)
33687 if (memory == MEMORY_LOAD)
33690 if (memory == MEMORY_BOTH)
33691 return disp_load_store;
33693 return disp_no_group;
33696 /* Return true if insn is a compare instruction. */
33701 enum attr_type type;
33703 type = get_attr_type (insn);
33704 return (type == TYPE_TEST
33705 || type == TYPE_ICMP
33706 || type == TYPE_FCMP
33707 || GET_CODE (PATTERN (insn)) == COMPARE);
33710 /* Return true if a dispatch violation encountered. */
33713 dispatch_violation (void)
33715 if (dispatch_window_list->next)
33716 return dispatch_window_list->next->violation;
33717 return dispatch_window_list->violation;
33720 /* Return true if insn is a branch instruction. */
33723 is_branch (rtx insn)
33725 return (CALL_P (insn) || JUMP_P (insn));
33728 /* Return true if insn is a prefetch instruction. */
33731 is_prefetch (rtx insn)
33733 return NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == PREFETCH;
33736 /* This function initializes a dispatch window and the list container holding a
33737 pointer to the window. */
33740 init_window (int window_num)
33743 dispatch_windows *new_list;
33745 if (window_num == 0)
33746 new_list = dispatch_window_list;
33748 new_list = dispatch_window_list1;
33750 new_list->num_insn = 0;
33751 new_list->num_uops = 0;
33752 new_list->window_size = 0;
33753 new_list->next = NULL;
33754 new_list->prev = NULL;
33755 new_list->window_num = window_num;
33756 new_list->num_imm = 0;
33757 new_list->num_imm_32 = 0;
33758 new_list->num_imm_64 = 0;
33759 new_list->imm_size = 0;
33760 new_list->num_loads = 0;
33761 new_list->num_stores = 0;
33762 new_list->violation = false;
33764 for (i = 0; i < MAX_INSN; i++)
33766 new_list->window[i].insn = NULL;
33767 new_list->window[i].group = disp_no_group;
33768 new_list->window[i].path = no_path;
33769 new_list->window[i].byte_len = 0;
33770 new_list->window[i].imm_bytes = 0;
33775 /* This function allocates and initializes a dispatch window and the
33776 list container holding a pointer to the window. */
33778 static dispatch_windows *
33779 allocate_window (void)
33781 dispatch_windows *new_list = XNEW (struct dispatch_windows_s);
33782 new_list->window = XNEWVEC (struct sched_insn_info_s, MAX_INSN + 1);
33787 /* This routine initializes the dispatch scheduling information. It
33788 initiates building dispatch scheduler tables and constructs the
33789 first dispatch window. */
33792 init_dispatch_sched (void)
33794 /* Allocate a dispatch list and a window. */
33795 dispatch_window_list = allocate_window ();
33796 dispatch_window_list1 = allocate_window ();
33801 /* This function returns true if a branch is detected. End of a basic block
33802 does not have to be a branch, but here we assume only branches end a
33806 is_end_basic_block (enum dispatch_group group)
33808 return group == disp_branch;
33811 /* This function is called when the end of a window processing is reached. */
33814 process_end_window (void)
33816 gcc_assert (dispatch_window_list->num_insn <= MAX_INSN);
33817 if (dispatch_window_list->next)
33819 gcc_assert (dispatch_window_list1->num_insn <= MAX_INSN);
33820 gcc_assert (dispatch_window_list->window_size
33821 + dispatch_window_list1->window_size <= 48);
33827 /* Allocates a new dispatch window and adds it to WINDOW_LIST.
33828 WINDOW_NUM is either 0 or 1. A maximum of two windows are generated
33829 for 48 bytes of instructions. Note that these windows are not dispatch
33830 windows that their sizes are DISPATCH_WINDOW_SIZE. */
33832 static dispatch_windows *
33833 allocate_next_window (int window_num)
33835 if (window_num == 0)
33837 if (dispatch_window_list->next)
33840 return dispatch_window_list;
33843 dispatch_window_list->next = dispatch_window_list1;
33844 dispatch_window_list1->prev = dispatch_window_list;
33846 return dispatch_window_list1;
33849 /* Increment the number of immediate operands of an instruction. */
33852 find_constant_1 (rtx *in_rtx, imm_info *imm_values)
33857 switch ( GET_CODE (*in_rtx))
33862 (imm_values->imm)++;
33863 if (x86_64_immediate_operand (*in_rtx, SImode))
33864 (imm_values->imm32)++;
33866 (imm_values->imm64)++;
33870 (imm_values->imm)++;
33871 (imm_values->imm64)++;
33875 if (LABEL_KIND (*in_rtx) == LABEL_NORMAL)
33877 (imm_values->imm)++;
33878 (imm_values->imm32)++;
33889 /* Compute number of immediate operands of an instruction. */
33892 find_constant (rtx in_rtx, imm_info *imm_values)
33894 for_each_rtx (INSN_P (in_rtx) ? &PATTERN (in_rtx) : &in_rtx,
33895 (rtx_function) find_constant_1, (void *) imm_values);
33898 /* Return total size of immediate operands of an instruction along with number
33899 of corresponding immediate-operands. It initializes its parameters to zero
33900 befor calling FIND_CONSTANT.
33901 INSN is the input instruction. IMM is the total of immediates.
33902 IMM32 is the number of 32 bit immediates. IMM64 is the number of 64
33906 get_num_immediates (rtx insn, int *imm, int *imm32, int *imm64)
33908 imm_info imm_values = {0, 0, 0};
33910 find_constant (insn, &imm_values);
33911 *imm = imm_values.imm;
33912 *imm32 = imm_values.imm32;
33913 *imm64 = imm_values.imm64;
33914 return imm_values.imm32 * 4 + imm_values.imm64 * 8;
33917 /* This function indicates if an operand of an instruction is an
33921 has_immediate (rtx insn)
33923 int num_imm_operand;
33924 int num_imm32_operand;
33925 int num_imm64_operand;
33928 return get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
33929 &num_imm64_operand);
33933 /* Return single or double path for instructions. */
33935 static enum insn_path
33936 get_insn_path (rtx insn)
33938 enum attr_amdfam10_decode path = get_attr_amdfam10_decode (insn);
33940 if ((int)path == 0)
33941 return path_single;
33943 if ((int)path == 1)
33944 return path_double;
33949 /* Return insn dispatch group. */
33951 static enum dispatch_group
33952 get_insn_group (rtx insn)
33954 enum dispatch_group group = get_mem_group (insn);
33958 if (is_branch (insn))
33959 return disp_branch;
33964 if (has_immediate (insn))
33967 if (is_prefetch (insn))
33968 return disp_prefetch;
33970 return disp_no_group;
33973 /* Count number of GROUP restricted instructions in a dispatch
33974 window WINDOW_LIST. */
33977 count_num_restricted (rtx insn, dispatch_windows *window_list)
33979 enum dispatch_group group = get_insn_group (insn);
33981 int num_imm_operand;
33982 int num_imm32_operand;
33983 int num_imm64_operand;
33985 if (group == disp_no_group)
33988 if (group == disp_imm)
33990 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
33991 &num_imm64_operand);
33992 if (window_list->imm_size + imm_size > MAX_IMM_SIZE
33993 || num_imm_operand + window_list->num_imm > MAX_IMM
33994 || (num_imm32_operand > 0
33995 && (window_list->num_imm_32 + num_imm32_operand > MAX_IMM_32
33996 || window_list->num_imm_64 * 2 + num_imm32_operand > MAX_IMM_32))
33997 || (num_imm64_operand > 0
33998 && (window_list->num_imm_64 + num_imm64_operand > MAX_IMM_64
33999 || window_list->num_imm_32 + num_imm64_operand * 2 > MAX_IMM_32))
34000 || (window_list->imm_size + imm_size == MAX_IMM_SIZE
34001 && num_imm64_operand > 0
34002 && ((window_list->num_imm_64 > 0
34003 && window_list->num_insn >= 2)
34004 || window_list->num_insn >= 3)))
34010 if ((group == disp_load_store
34011 && (window_list->num_loads >= MAX_LOAD
34012 || window_list->num_stores >= MAX_STORE))
34013 || ((group == disp_load
34014 || group == disp_prefetch)
34015 && window_list->num_loads >= MAX_LOAD)
34016 || (group == disp_store
34017 && window_list->num_stores >= MAX_STORE))
34023 /* This function returns true if insn satisfies dispatch rules on the
34024 last window scheduled. */
34027 fits_dispatch_window (rtx insn)
34029 dispatch_windows *window_list = dispatch_window_list;
34030 dispatch_windows *window_list_next = dispatch_window_list->next;
34031 unsigned int num_restrict;
34032 enum dispatch_group group = get_insn_group (insn);
34033 enum insn_path path = get_insn_path (insn);
34036 /* Make disp_cmp and disp_jcc get scheduled at the latest. These
34037 instructions should be given the lowest priority in the
34038 scheduling process in Haifa scheduler to make sure they will be
34039 scheduled in the same dispatch window as the refrence to them. */
34040 if (group == disp_jcc || group == disp_cmp)
34043 /* Check nonrestricted. */
34044 if (group == disp_no_group || group == disp_branch)
34047 /* Get last dispatch window. */
34048 if (window_list_next)
34049 window_list = window_list_next;
34051 if (window_list->window_num == 1)
34053 sum = window_list->prev->window_size + window_list->window_size;
34056 || (min_insn_size (insn) + sum) >= 48)
34057 /* Window 1 is full. Go for next window. */
34061 num_restrict = count_num_restricted (insn, window_list);
34063 if (num_restrict > num_allowable_groups[group])
34066 /* See if it fits in the first window. */
34067 if (window_list->window_num == 0)
34069 /* The first widow should have only single and double path
34071 if (path == path_double
34072 && (window_list->num_uops + 2) > MAX_INSN)
34074 else if (path != path_single)
34080 /* Add an instruction INSN with NUM_UOPS micro-operations to the
34081 dispatch window WINDOW_LIST. */
34084 add_insn_window (rtx insn, dispatch_windows *window_list, int num_uops)
34086 int byte_len = min_insn_size (insn);
34087 int num_insn = window_list->num_insn;
34089 sched_insn_info *window = window_list->window;
34090 enum dispatch_group group = get_insn_group (insn);
34091 enum insn_path path = get_insn_path (insn);
34092 int num_imm_operand;
34093 int num_imm32_operand;
34094 int num_imm64_operand;
34096 if (!window_list->violation && group != disp_cmp
34097 && !fits_dispatch_window (insn))
34098 window_list->violation = true;
34100 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34101 &num_imm64_operand);
34103 /* Initialize window with new instruction. */
34104 window[num_insn].insn = insn;
34105 window[num_insn].byte_len = byte_len;
34106 window[num_insn].group = group;
34107 window[num_insn].path = path;
34108 window[num_insn].imm_bytes = imm_size;
34110 window_list->window_size += byte_len;
34111 window_list->num_insn = num_insn + 1;
34112 window_list->num_uops = window_list->num_uops + num_uops;
34113 window_list->imm_size += imm_size;
34114 window_list->num_imm += num_imm_operand;
34115 window_list->num_imm_32 += num_imm32_operand;
34116 window_list->num_imm_64 += num_imm64_operand;
34118 if (group == disp_store)
34119 window_list->num_stores += 1;
34120 else if (group == disp_load
34121 || group == disp_prefetch)
34122 window_list->num_loads += 1;
34123 else if (group == disp_load_store)
34125 window_list->num_stores += 1;
34126 window_list->num_loads += 1;
34130 /* Adds a scheduled instruction, INSN, to the current dispatch window.
34131 If the total bytes of instructions or the number of instructions in
34132 the window exceed allowable, it allocates a new window. */
34135 add_to_dispatch_window (rtx insn)
34138 dispatch_windows *window_list;
34139 dispatch_windows *next_list;
34140 dispatch_windows *window0_list;
34141 enum insn_path path;
34142 enum dispatch_group insn_group;
34150 if (INSN_CODE (insn) < 0)
34153 byte_len = min_insn_size (insn);
34154 window_list = dispatch_window_list;
34155 next_list = window_list->next;
34156 path = get_insn_path (insn);
34157 insn_group = get_insn_group (insn);
34159 /* Get the last dispatch window. */
34161 window_list = dispatch_window_list->next;
34163 if (path == path_single)
34165 else if (path == path_double)
34168 insn_num_uops = (int) path;
34170 /* If current window is full, get a new window.
34171 Window number zero is full, if MAX_INSN uops are scheduled in it.
34172 Window number one is full, if window zero's bytes plus window
34173 one's bytes is 32, or if the bytes of the new instruction added
34174 to the total makes it greater than 48, or it has already MAX_INSN
34175 instructions in it. */
34176 num_insn = window_list->num_insn;
34177 num_uops = window_list->num_uops;
34178 window_num = window_list->window_num;
34179 insn_fits = fits_dispatch_window (insn);
34181 if (num_insn >= MAX_INSN
34182 || num_uops + insn_num_uops > MAX_INSN
34185 window_num = ~window_num & 1;
34186 window_list = allocate_next_window (window_num);
34189 if (window_num == 0)
34191 add_insn_window (insn, window_list, insn_num_uops);
34192 if (window_list->num_insn >= MAX_INSN
34193 && insn_group == disp_branch)
34195 process_end_window ();
34199 else if (window_num == 1)
34201 window0_list = window_list->prev;
34202 sum = window0_list->window_size + window_list->window_size;
34204 || (byte_len + sum) >= 48)
34206 process_end_window ();
34207 window_list = dispatch_window_list;
34210 add_insn_window (insn, window_list, insn_num_uops);
34213 gcc_unreachable ();
34215 if (is_end_basic_block (insn_group))
34217 /* End of basic block is reached do end-basic-block process. */
34218 process_end_window ();
34223 /* Print the dispatch window, WINDOW_NUM, to FILE. */
34225 DEBUG_FUNCTION static void
34226 debug_dispatch_window_file (FILE *file, int window_num)
34228 dispatch_windows *list;
34231 if (window_num == 0)
34232 list = dispatch_window_list;
34234 list = dispatch_window_list1;
34236 fprintf (file, "Window #%d:\n", list->window_num);
34237 fprintf (file, " num_insn = %d, num_uops = %d, window_size = %d\n",
34238 list->num_insn, list->num_uops, list->window_size);
34239 fprintf (file, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
34240 list->num_imm, list->num_imm_32, list->num_imm_64, list->imm_size);
34242 fprintf (file, " num_loads = %d, num_stores = %d\n", list->num_loads,
34244 fprintf (file, " insn info:\n");
34246 for (i = 0; i < MAX_INSN; i++)
34248 if (!list->window[i].insn)
34250 fprintf (file, " group[%d] = %s, insn[%d] = %p, path[%d] = %d byte_len[%d] = %d, imm_bytes[%d] = %d\n",
34251 i, group_name[list->window[i].group],
34252 i, (void *)list->window[i].insn,
34253 i, list->window[i].path,
34254 i, list->window[i].byte_len,
34255 i, list->window[i].imm_bytes);
34259 /* Print to stdout a dispatch window. */
34261 DEBUG_FUNCTION void
34262 debug_dispatch_window (int window_num)
34264 debug_dispatch_window_file (stdout, window_num);
34267 /* Print INSN dispatch information to FILE. */
34269 DEBUG_FUNCTION static void
34270 debug_insn_dispatch_info_file (FILE *file, rtx insn)
34273 enum insn_path path;
34274 enum dispatch_group group;
34276 int num_imm_operand;
34277 int num_imm32_operand;
34278 int num_imm64_operand;
34280 if (INSN_CODE (insn) < 0)
34283 byte_len = min_insn_size (insn);
34284 path = get_insn_path (insn);
34285 group = get_insn_group (insn);
34286 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34287 &num_imm64_operand);
34289 fprintf (file, " insn info:\n");
34290 fprintf (file, " group = %s, path = %d, byte_len = %d\n",
34291 group_name[group], path, byte_len);
34292 fprintf (file, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
34293 num_imm_operand, num_imm32_operand, num_imm64_operand, imm_size);
34296 /* Print to STDERR the status of the ready list with respect to
34297 dispatch windows. */
34299 DEBUG_FUNCTION void
34300 debug_ready_dispatch (void)
34303 int no_ready = number_in_ready ();
34305 fprintf (stdout, "Number of ready: %d\n", no_ready);
34307 for (i = 0; i < no_ready; i++)
34308 debug_insn_dispatch_info_file (stdout, get_ready_element (i));
34311 /* This routine is the driver of the dispatch scheduler. */
34314 do_dispatch (rtx insn, int mode)
34316 if (mode == DISPATCH_INIT)
34317 init_dispatch_sched ();
34318 else if (mode == ADD_TO_DISPATCH_WINDOW)
34319 add_to_dispatch_window (insn);
34322 /* Return TRUE if Dispatch Scheduling is supported. */
34325 has_dispatch (rtx insn, int action)
34327 if (ix86_tune == PROCESSOR_BDVER1 && flag_dispatch_scheduler)
34333 case IS_DISPATCH_ON:
34338 return is_cmp (insn);
34340 case DISPATCH_VIOLATION:
34341 return dispatch_violation ();
34343 case FITS_DISPATCH_WINDOW:
34344 return fits_dispatch_window (insn);
34350 /* ??? No autovectorization into MMX or 3DNOW until we can reliably
34351 place emms and femms instructions. */
34353 static enum machine_mode
34354 ix86_preferred_simd_mode (enum machine_mode mode)
34356 /* Disable double precision vectorizer if needed. */
34357 if (mode == DFmode && !TARGET_VECTORIZE_DOUBLE)
34360 if (!TARGET_AVX && !TARGET_SSE)
34366 return TARGET_AVX ? V8SFmode : V4SFmode;
34368 return TARGET_AVX ? V4DFmode : V2DFmode;
34384 /* If AVX is enabled then try vectorizing with both 256bit and 128bit
34387 static unsigned int
34388 ix86_autovectorize_vector_sizes (void)
34390 return TARGET_AVX ? 32 | 16 : 0;
34393 /* Initialize the GCC target structure. */
34394 #undef TARGET_RETURN_IN_MEMORY
34395 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
34397 #undef TARGET_LEGITIMIZE_ADDRESS
34398 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
34400 #undef TARGET_ATTRIBUTE_TABLE
34401 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
34402 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
34403 # undef TARGET_MERGE_DECL_ATTRIBUTES
34404 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
34407 #undef TARGET_COMP_TYPE_ATTRIBUTES
34408 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
34410 #undef TARGET_INIT_BUILTINS
34411 #define TARGET_INIT_BUILTINS ix86_init_builtins
34412 #undef TARGET_BUILTIN_DECL
34413 #define TARGET_BUILTIN_DECL ix86_builtin_decl
34414 #undef TARGET_EXPAND_BUILTIN
34415 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
34417 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
34418 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
34419 ix86_builtin_vectorized_function
34421 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
34422 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
34424 #undef TARGET_BUILTIN_RECIPROCAL
34425 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
34427 #undef TARGET_ASM_FUNCTION_EPILOGUE
34428 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
34430 #undef TARGET_ENCODE_SECTION_INFO
34431 #ifndef SUBTARGET_ENCODE_SECTION_INFO
34432 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
34434 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
34437 #undef TARGET_ASM_OPEN_PAREN
34438 #define TARGET_ASM_OPEN_PAREN ""
34439 #undef TARGET_ASM_CLOSE_PAREN
34440 #define TARGET_ASM_CLOSE_PAREN ""
34442 #undef TARGET_ASM_BYTE_OP
34443 #define TARGET_ASM_BYTE_OP ASM_BYTE
34445 #undef TARGET_ASM_ALIGNED_HI_OP
34446 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
34447 #undef TARGET_ASM_ALIGNED_SI_OP
34448 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
34450 #undef TARGET_ASM_ALIGNED_DI_OP
34451 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
34454 #undef TARGET_PROFILE_BEFORE_PROLOGUE
34455 #define TARGET_PROFILE_BEFORE_PROLOGUE ix86_profile_before_prologue
34457 #undef TARGET_ASM_UNALIGNED_HI_OP
34458 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
34459 #undef TARGET_ASM_UNALIGNED_SI_OP
34460 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
34461 #undef TARGET_ASM_UNALIGNED_DI_OP
34462 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
34464 #undef TARGET_PRINT_OPERAND
34465 #define TARGET_PRINT_OPERAND ix86_print_operand
34466 #undef TARGET_PRINT_OPERAND_ADDRESS
34467 #define TARGET_PRINT_OPERAND_ADDRESS ix86_print_operand_address
34468 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
34469 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ix86_print_operand_punct_valid_p
34470 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
34471 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA i386_asm_output_addr_const_extra
34473 #undef TARGET_SCHED_INIT_GLOBAL
34474 #define TARGET_SCHED_INIT_GLOBAL ix86_sched_init_global
34475 #undef TARGET_SCHED_ADJUST_COST
34476 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
34477 #undef TARGET_SCHED_ISSUE_RATE
34478 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
34479 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
34480 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
34481 ia32_multipass_dfa_lookahead
34483 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
34484 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
34487 #undef TARGET_HAVE_TLS
34488 #define TARGET_HAVE_TLS true
34490 #undef TARGET_CANNOT_FORCE_CONST_MEM
34491 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
34492 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
34493 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
34495 #undef TARGET_DELEGITIMIZE_ADDRESS
34496 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
34498 #undef TARGET_MS_BITFIELD_LAYOUT_P
34499 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
34502 #undef TARGET_BINDS_LOCAL_P
34503 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
34505 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
34506 #undef TARGET_BINDS_LOCAL_P
34507 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
34510 #undef TARGET_ASM_OUTPUT_MI_THUNK
34511 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
34512 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
34513 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
34515 #undef TARGET_ASM_FILE_START
34516 #define TARGET_ASM_FILE_START x86_file_start
34518 #undef TARGET_DEFAULT_TARGET_FLAGS
34519 #define TARGET_DEFAULT_TARGET_FLAGS \
34521 | TARGET_SUBTARGET_DEFAULT \
34522 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
34524 #undef TARGET_HANDLE_OPTION
34525 #define TARGET_HANDLE_OPTION ix86_handle_option
34527 #undef TARGET_OPTION_OVERRIDE
34528 #define TARGET_OPTION_OVERRIDE ix86_option_override
34529 #undef TARGET_OPTION_OPTIMIZATION_TABLE
34530 #define TARGET_OPTION_OPTIMIZATION_TABLE ix86_option_optimization_table
34531 #undef TARGET_OPTION_INIT_STRUCT
34532 #define TARGET_OPTION_INIT_STRUCT ix86_option_init_struct
34534 #undef TARGET_REGISTER_MOVE_COST
34535 #define TARGET_REGISTER_MOVE_COST ix86_register_move_cost
34536 #undef TARGET_MEMORY_MOVE_COST
34537 #define TARGET_MEMORY_MOVE_COST ix86_memory_move_cost
34538 #undef TARGET_RTX_COSTS
34539 #define TARGET_RTX_COSTS ix86_rtx_costs
34540 #undef TARGET_ADDRESS_COST
34541 #define TARGET_ADDRESS_COST ix86_address_cost
34543 #undef TARGET_FIXED_CONDITION_CODE_REGS
34544 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
34545 #undef TARGET_CC_MODES_COMPATIBLE
34546 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
34548 #undef TARGET_MACHINE_DEPENDENT_REORG
34549 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
34551 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
34552 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
34554 #undef TARGET_BUILD_BUILTIN_VA_LIST
34555 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
34557 #undef TARGET_ENUM_VA_LIST_P
34558 #define TARGET_ENUM_VA_LIST_P ix86_enum_va_list
34560 #undef TARGET_FN_ABI_VA_LIST
34561 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
34563 #undef TARGET_CANONICAL_VA_LIST_TYPE
34564 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
34566 #undef TARGET_EXPAND_BUILTIN_VA_START
34567 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
34569 #undef TARGET_MD_ASM_CLOBBERS
34570 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
34572 #undef TARGET_PROMOTE_PROTOTYPES
34573 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
34574 #undef TARGET_STRUCT_VALUE_RTX
34575 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
34576 #undef TARGET_SETUP_INCOMING_VARARGS
34577 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
34578 #undef TARGET_MUST_PASS_IN_STACK
34579 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
34580 #undef TARGET_FUNCTION_ARG_ADVANCE
34581 #define TARGET_FUNCTION_ARG_ADVANCE ix86_function_arg_advance
34582 #undef TARGET_FUNCTION_ARG
34583 #define TARGET_FUNCTION_ARG ix86_function_arg
34584 #undef TARGET_FUNCTION_ARG_BOUNDARY
34585 #define TARGET_FUNCTION_ARG_BOUNDARY ix86_function_arg_boundary
34586 #undef TARGET_PASS_BY_REFERENCE
34587 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
34588 #undef TARGET_INTERNAL_ARG_POINTER
34589 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
34590 #undef TARGET_UPDATE_STACK_BOUNDARY
34591 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
34592 #undef TARGET_GET_DRAP_RTX
34593 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
34594 #undef TARGET_STRICT_ARGUMENT_NAMING
34595 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
34596 #undef TARGET_STATIC_CHAIN
34597 #define TARGET_STATIC_CHAIN ix86_static_chain
34598 #undef TARGET_TRAMPOLINE_INIT
34599 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
34600 #undef TARGET_RETURN_POPS_ARGS
34601 #define TARGET_RETURN_POPS_ARGS ix86_return_pops_args
34603 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
34604 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
34606 #undef TARGET_SCALAR_MODE_SUPPORTED_P
34607 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
34609 #undef TARGET_VECTOR_MODE_SUPPORTED_P
34610 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
34612 #undef TARGET_C_MODE_FOR_SUFFIX
34613 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
34616 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
34617 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
34620 #ifdef SUBTARGET_INSERT_ATTRIBUTES
34621 #undef TARGET_INSERT_ATTRIBUTES
34622 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
34625 #undef TARGET_MANGLE_TYPE
34626 #define TARGET_MANGLE_TYPE ix86_mangle_type
34628 #undef TARGET_STACK_PROTECT_FAIL
34629 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
34631 #undef TARGET_SUPPORTS_SPLIT_STACK
34632 #define TARGET_SUPPORTS_SPLIT_STACK ix86_supports_split_stack
34634 #undef TARGET_FUNCTION_VALUE
34635 #define TARGET_FUNCTION_VALUE ix86_function_value
34637 #undef TARGET_FUNCTION_VALUE_REGNO_P
34638 #define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
34640 #undef TARGET_SECONDARY_RELOAD
34641 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
34643 #undef TARGET_PREFERRED_RELOAD_CLASS
34644 #define TARGET_PREFERRED_RELOAD_CLASS ix86_preferred_reload_class
34645 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
34646 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS ix86_preferred_output_reload_class
34647 #undef TARGET_CLASS_LIKELY_SPILLED_P
34648 #define TARGET_CLASS_LIKELY_SPILLED_P ix86_class_likely_spilled_p
34650 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
34651 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
34652 ix86_builtin_vectorization_cost
34653 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
34654 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
34655 ix86_vectorize_builtin_vec_perm
34656 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
34657 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
34658 ix86_vectorize_builtin_vec_perm_ok
34659 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
34660 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
34661 ix86_preferred_simd_mode
34662 #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
34663 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
34664 ix86_autovectorize_vector_sizes
34666 #undef TARGET_SET_CURRENT_FUNCTION
34667 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
34669 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
34670 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
34672 #undef TARGET_OPTION_SAVE
34673 #define TARGET_OPTION_SAVE ix86_function_specific_save
34675 #undef TARGET_OPTION_RESTORE
34676 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
34678 #undef TARGET_OPTION_PRINT
34679 #define TARGET_OPTION_PRINT ix86_function_specific_print
34681 #undef TARGET_CAN_INLINE_P
34682 #define TARGET_CAN_INLINE_P ix86_can_inline_p
34684 #undef TARGET_EXPAND_TO_RTL_HOOK
34685 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
34687 #undef TARGET_LEGITIMATE_ADDRESS_P
34688 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
34690 #undef TARGET_IRA_COVER_CLASSES
34691 #define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
34693 #undef TARGET_FRAME_POINTER_REQUIRED
34694 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
34696 #undef TARGET_CAN_ELIMINATE
34697 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
34699 #undef TARGET_EXTRA_LIVE_ON_ENTRY
34700 #define TARGET_EXTRA_LIVE_ON_ENTRY ix86_live_on_entry
34702 #undef TARGET_ASM_CODE_END
34703 #define TARGET_ASM_CODE_END ix86_code_end
34705 struct gcc_target targetm = TARGET_INITIALIZER;
34707 #include "gt-i386.h"