1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
34 #include "insn-codes.h"
35 #include "insn-attr.h"
42 #include "diagnostic-core.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "langhooks.h"
53 #include "tm-constrs.h"
57 #include "dwarf2out.h"
58 #include "sched-int.h"
63 enum upper_128bits_state
70 typedef struct block_info_def
72 /* State of the upper 128bits of AVX registers at exit. */
73 enum upper_128bits_state state;
74 /* TRUE if state of the upper 128bits of AVX registers is unchanged
77 /* TRUE if block has been processed. */
79 /* TRUE if block has been scanned. */
81 /* Previous state of the upper 128bits of AVX registers at entry. */
82 enum upper_128bits_state prev;
85 #define BLOCK_INFO(B) ((block_info) (B)->aux)
87 enum call_avx256_state
89 /* Callee returns 256bit AVX register. */
90 callee_return_avx256 = -1,
91 /* Callee returns and passes 256bit AVX register. */
92 callee_return_pass_avx256,
93 /* Callee passes 256bit AVX register. */
95 /* Callee doesn't return nor passe 256bit AVX register, or no
96 256bit AVX register in function return. */
98 /* vzeroupper intrinsic. */
102 /* Check if a 256bit AVX register is referenced in stores. */
105 check_avx256_stores (rtx dest, const_rtx set, void *data)
108 && VALID_AVX256_REG_MODE (GET_MODE (dest)))
109 || (GET_CODE (set) == SET
110 && REG_P (SET_SRC (set))
111 && VALID_AVX256_REG_MODE (GET_MODE (SET_SRC (set)))))
113 enum upper_128bits_state *state
114 = (enum upper_128bits_state *) data;
119 /* Helper function for move_or_delete_vzeroupper_1. Look for vzeroupper
120 in basic block BB. Delete it if upper 128bit AVX registers are
121 unused. If it isn't deleted, move it to just before a jump insn.
123 STATE is state of the upper 128bits of AVX registers at entry. */
126 move_or_delete_vzeroupper_2 (basic_block bb,
127 enum upper_128bits_state state)
130 rtx vzeroupper_insn = NULL_RTX;
135 if (BLOCK_INFO (bb)->unchanged)
138 fprintf (dump_file, " [bb %i] unchanged: upper 128bits: %d\n",
141 BLOCK_INFO (bb)->state = state;
145 if (BLOCK_INFO (bb)->scanned && BLOCK_INFO (bb)->prev == state)
148 fprintf (dump_file, " [bb %i] scanned: upper 128bits: %d\n",
149 bb->index, BLOCK_INFO (bb)->state);
153 BLOCK_INFO (bb)->prev = state;
156 fprintf (dump_file, " [bb %i] entry: upper 128bits: %d\n",
161 /* BB_END changes when it is deleted. */
162 bb_end = BB_END (bb);
164 while (insn != bb_end)
166 insn = NEXT_INSN (insn);
168 if (!NONDEBUG_INSN_P (insn))
171 /* Move vzeroupper before jump/call. */
172 if (JUMP_P (insn) || CALL_P (insn))
174 if (!vzeroupper_insn)
177 if (PREV_INSN (insn) != vzeroupper_insn)
181 fprintf (dump_file, "Move vzeroupper after:\n");
182 print_rtl_single (dump_file, PREV_INSN (insn));
183 fprintf (dump_file, "before:\n");
184 print_rtl_single (dump_file, insn);
186 reorder_insns_nobb (vzeroupper_insn, vzeroupper_insn,
189 vzeroupper_insn = NULL_RTX;
193 pat = PATTERN (insn);
195 /* Check insn for vzeroupper intrinsic. */
196 if (GET_CODE (pat) == UNSPEC_VOLATILE
197 && XINT (pat, 1) == UNSPECV_VZEROUPPER)
201 /* Found vzeroupper intrinsic. */
202 fprintf (dump_file, "Found vzeroupper:\n");
203 print_rtl_single (dump_file, insn);
208 /* Check insn for vzeroall intrinsic. */
209 if (GET_CODE (pat) == PARALLEL
210 && GET_CODE (XVECEXP (pat, 0, 0)) == UNSPEC_VOLATILE
211 && XINT (XVECEXP (pat, 0, 0), 1) == UNSPECV_VZEROALL)
216 /* Delete pending vzeroupper insertion. */
219 delete_insn (vzeroupper_insn);
220 vzeroupper_insn = NULL_RTX;
223 else if (state != used)
225 note_stores (pat, check_avx256_stores, &state);
232 /* Process vzeroupper intrinsic. */
233 avx256 = INTVAL (XVECEXP (pat, 0, 0));
237 /* Since the upper 128bits are cleared, callee must not pass
238 256bit AVX register. We only need to check if callee
239 returns 256bit AVX register. */
240 if (avx256 == callee_return_avx256)
246 /* Remove unnecessary vzeroupper since upper 128bits are
250 fprintf (dump_file, "Delete redundant vzeroupper:\n");
251 print_rtl_single (dump_file, insn);
257 /* Set state to UNUSED if callee doesn't return 256bit AVX
259 if (avx256 != callee_return_pass_avx256)
262 if (avx256 == callee_return_pass_avx256
263 || avx256 == callee_pass_avx256)
265 /* Must remove vzeroupper since callee passes in 256bit
269 fprintf (dump_file, "Delete callee pass vzeroupper:\n");
270 print_rtl_single (dump_file, insn);
276 vzeroupper_insn = insn;
282 BLOCK_INFO (bb)->state = state;
283 BLOCK_INFO (bb)->unchanged = unchanged;
284 BLOCK_INFO (bb)->scanned = true;
287 fprintf (dump_file, " [bb %i] exit: %s: upper 128bits: %d\n",
288 bb->index, unchanged ? "unchanged" : "changed",
292 /* Helper function for move_or_delete_vzeroupper. Process vzeroupper
293 in BLOCK and check its predecessor blocks. Treat UNKNOWN state
294 as USED if UNKNOWN_IS_UNUSED is true. Return TRUE if the exit
298 move_or_delete_vzeroupper_1 (basic_block block, bool unknown_is_unused)
302 enum upper_128bits_state state, old_state, new_state;
306 fprintf (dump_file, " Process [bb %i]: status: %d\n",
307 block->index, BLOCK_INFO (block)->processed);
309 if (BLOCK_INFO (block)->processed)
314 /* Check all predecessor edges of this block. */
315 seen_unknown = false;
316 FOR_EACH_EDGE (e, ei, block->preds)
320 switch (BLOCK_INFO (e->src)->state)
323 if (!unknown_is_unused)
337 old_state = BLOCK_INFO (block)->state;
338 move_or_delete_vzeroupper_2 (block, state);
339 new_state = BLOCK_INFO (block)->state;
341 if (state != unknown || new_state == used)
342 BLOCK_INFO (block)->processed = true;
344 /* Need to rescan if the upper 128bits of AVX registers are changed
346 if (new_state != old_state)
348 if (new_state == used)
349 cfun->machine->rescan_vzeroupper_p = 1;
356 /* Go through the instruction stream looking for vzeroupper. Delete
357 it if upper 128bit AVX registers are unused. If it isn't deleted,
358 move it to just before a jump insn. */
361 move_or_delete_vzeroupper (void)
366 fibheap_t worklist, pending, fibheap_swap;
367 sbitmap visited, in_worklist, in_pending, sbitmap_swap;
372 /* Set up block info for each basic block. */
373 alloc_aux_for_blocks (sizeof (struct block_info_def));
375 /* Process outgoing edges of entry point. */
377 fprintf (dump_file, "Process outgoing edges of entry point\n");
379 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
381 move_or_delete_vzeroupper_2 (e->dest,
382 cfun->machine->caller_pass_avx256_p
384 BLOCK_INFO (e->dest)->processed = true;
387 /* Compute reverse completion order of depth first search of the CFG
388 so that the data-flow runs faster. */
389 rc_order = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
390 bb_order = XNEWVEC (int, last_basic_block);
391 pre_and_rev_post_order_compute (NULL, rc_order, false);
392 for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++)
393 bb_order[rc_order[i]] = i;
396 worklist = fibheap_new ();
397 pending = fibheap_new ();
398 visited = sbitmap_alloc (last_basic_block);
399 in_worklist = sbitmap_alloc (last_basic_block);
400 in_pending = sbitmap_alloc (last_basic_block);
401 sbitmap_zero (in_worklist);
403 /* Don't check outgoing edges of entry point. */
404 sbitmap_ones (in_pending);
406 if (BLOCK_INFO (bb)->processed)
407 RESET_BIT (in_pending, bb->index);
410 move_or_delete_vzeroupper_1 (bb, false);
411 fibheap_insert (pending, bb_order[bb->index], bb);
415 fprintf (dump_file, "Check remaining basic blocks\n");
417 while (!fibheap_empty (pending))
419 fibheap_swap = pending;
421 worklist = fibheap_swap;
422 sbitmap_swap = in_pending;
423 in_pending = in_worklist;
424 in_worklist = sbitmap_swap;
426 sbitmap_zero (visited);
428 cfun->machine->rescan_vzeroupper_p = 0;
430 while (!fibheap_empty (worklist))
432 bb = (basic_block) fibheap_extract_min (worklist);
433 RESET_BIT (in_worklist, bb->index);
434 gcc_assert (!TEST_BIT (visited, bb->index));
435 if (!TEST_BIT (visited, bb->index))
439 SET_BIT (visited, bb->index);
441 if (move_or_delete_vzeroupper_1 (bb, false))
442 FOR_EACH_EDGE (e, ei, bb->succs)
444 if (e->dest == EXIT_BLOCK_PTR
445 || BLOCK_INFO (e->dest)->processed)
448 if (TEST_BIT (visited, e->dest->index))
450 if (!TEST_BIT (in_pending, e->dest->index))
452 /* Send E->DEST to next round. */
453 SET_BIT (in_pending, e->dest->index);
454 fibheap_insert (pending,
455 bb_order[e->dest->index],
459 else if (!TEST_BIT (in_worklist, e->dest->index))
461 /* Add E->DEST to current round. */
462 SET_BIT (in_worklist, e->dest->index);
463 fibheap_insert (worklist, bb_order[e->dest->index],
470 if (!cfun->machine->rescan_vzeroupper_p)
475 fibheap_delete (worklist);
476 fibheap_delete (pending);
477 sbitmap_free (visited);
478 sbitmap_free (in_worklist);
479 sbitmap_free (in_pending);
482 fprintf (dump_file, "Process remaining basic blocks\n");
485 move_or_delete_vzeroupper_1 (bb, true);
487 free_aux_for_blocks ();
490 static rtx legitimize_dllimport_symbol (rtx, bool);
492 #ifndef CHECK_STACK_LIMIT
493 #define CHECK_STACK_LIMIT (-1)
496 /* Return index of given mode in mult and division cost tables. */
497 #define MODE_INDEX(mode) \
498 ((mode) == QImode ? 0 \
499 : (mode) == HImode ? 1 \
500 : (mode) == SImode ? 2 \
501 : (mode) == DImode ? 3 \
504 /* Processor costs (relative to an add) */
505 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
506 #define COSTS_N_BYTES(N) ((N) * 2)
508 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
511 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
512 COSTS_N_BYTES (2), /* cost of an add instruction */
513 COSTS_N_BYTES (3), /* cost of a lea instruction */
514 COSTS_N_BYTES (2), /* variable shift costs */
515 COSTS_N_BYTES (3), /* constant shift costs */
516 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
517 COSTS_N_BYTES (3), /* HI */
518 COSTS_N_BYTES (3), /* SI */
519 COSTS_N_BYTES (3), /* DI */
520 COSTS_N_BYTES (5)}, /* other */
521 0, /* cost of multiply per each bit set */
522 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
523 COSTS_N_BYTES (3), /* HI */
524 COSTS_N_BYTES (3), /* SI */
525 COSTS_N_BYTES (3), /* DI */
526 COSTS_N_BYTES (5)}, /* other */
527 COSTS_N_BYTES (3), /* cost of movsx */
528 COSTS_N_BYTES (3), /* cost of movzx */
529 0, /* "large" insn */
531 2, /* cost for loading QImode using movzbl */
532 {2, 2, 2}, /* cost of loading integer registers
533 in QImode, HImode and SImode.
534 Relative to reg-reg move (2). */
535 {2, 2, 2}, /* cost of storing integer registers */
536 2, /* cost of reg,reg fld/fst */
537 {2, 2, 2}, /* cost of loading fp registers
538 in SFmode, DFmode and XFmode */
539 {2, 2, 2}, /* cost of storing fp registers
540 in SFmode, DFmode and XFmode */
541 3, /* cost of moving MMX register */
542 {3, 3}, /* cost of loading MMX registers
543 in SImode and DImode */
544 {3, 3}, /* cost of storing MMX registers
545 in SImode and DImode */
546 3, /* cost of moving SSE register */
547 {3, 3, 3}, /* cost of loading SSE registers
548 in SImode, DImode and TImode */
549 {3, 3, 3}, /* cost of storing SSE registers
550 in SImode, DImode and TImode */
551 3, /* MMX or SSE register to integer */
552 0, /* size of l1 cache */
553 0, /* size of l2 cache */
554 0, /* size of prefetch block */
555 0, /* number of parallel prefetches */
557 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
558 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
559 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
560 COSTS_N_BYTES (2), /* cost of FABS instruction. */
561 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
562 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
563 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
564 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
565 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
566 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
567 1, /* scalar_stmt_cost. */
568 1, /* scalar load_cost. */
569 1, /* scalar_store_cost. */
570 1, /* vec_stmt_cost. */
571 1, /* vec_to_scalar_cost. */
572 1, /* scalar_to_vec_cost. */
573 1, /* vec_align_load_cost. */
574 1, /* vec_unalign_load_cost. */
575 1, /* vec_store_cost. */
576 1, /* cond_taken_branch_cost. */
577 1, /* cond_not_taken_branch_cost. */
580 /* Processor costs (relative to an add) */
582 struct processor_costs i386_cost = { /* 386 specific costs */
583 COSTS_N_INSNS (1), /* cost of an add instruction */
584 COSTS_N_INSNS (1), /* cost of a lea instruction */
585 COSTS_N_INSNS (3), /* variable shift costs */
586 COSTS_N_INSNS (2), /* constant shift costs */
587 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
588 COSTS_N_INSNS (6), /* HI */
589 COSTS_N_INSNS (6), /* SI */
590 COSTS_N_INSNS (6), /* DI */
591 COSTS_N_INSNS (6)}, /* other */
592 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
593 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
594 COSTS_N_INSNS (23), /* HI */
595 COSTS_N_INSNS (23), /* SI */
596 COSTS_N_INSNS (23), /* DI */
597 COSTS_N_INSNS (23)}, /* other */
598 COSTS_N_INSNS (3), /* cost of movsx */
599 COSTS_N_INSNS (2), /* cost of movzx */
600 15, /* "large" insn */
602 4, /* cost for loading QImode using movzbl */
603 {2, 4, 2}, /* cost of loading integer registers
604 in QImode, HImode and SImode.
605 Relative to reg-reg move (2). */
606 {2, 4, 2}, /* cost of storing integer registers */
607 2, /* cost of reg,reg fld/fst */
608 {8, 8, 8}, /* cost of loading fp registers
609 in SFmode, DFmode and XFmode */
610 {8, 8, 8}, /* cost of storing fp registers
611 in SFmode, DFmode and XFmode */
612 2, /* cost of moving MMX register */
613 {4, 8}, /* cost of loading MMX registers
614 in SImode and DImode */
615 {4, 8}, /* cost of storing MMX registers
616 in SImode and DImode */
617 2, /* cost of moving SSE register */
618 {4, 8, 16}, /* cost of loading SSE registers
619 in SImode, DImode and TImode */
620 {4, 8, 16}, /* cost of storing SSE registers
621 in SImode, DImode and TImode */
622 3, /* MMX or SSE register to integer */
623 0, /* size of l1 cache */
624 0, /* size of l2 cache */
625 0, /* size of prefetch block */
626 0, /* number of parallel prefetches */
628 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
629 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
630 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
631 COSTS_N_INSNS (22), /* cost of FABS instruction. */
632 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
633 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
634 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
635 DUMMY_STRINGOP_ALGS},
636 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
637 DUMMY_STRINGOP_ALGS},
638 1, /* scalar_stmt_cost. */
639 1, /* scalar load_cost. */
640 1, /* scalar_store_cost. */
641 1, /* vec_stmt_cost. */
642 1, /* vec_to_scalar_cost. */
643 1, /* scalar_to_vec_cost. */
644 1, /* vec_align_load_cost. */
645 2, /* vec_unalign_load_cost. */
646 1, /* vec_store_cost. */
647 3, /* cond_taken_branch_cost. */
648 1, /* cond_not_taken_branch_cost. */
652 struct processor_costs i486_cost = { /* 486 specific costs */
653 COSTS_N_INSNS (1), /* cost of an add instruction */
654 COSTS_N_INSNS (1), /* cost of a lea instruction */
655 COSTS_N_INSNS (3), /* variable shift costs */
656 COSTS_N_INSNS (2), /* constant shift costs */
657 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
658 COSTS_N_INSNS (12), /* HI */
659 COSTS_N_INSNS (12), /* SI */
660 COSTS_N_INSNS (12), /* DI */
661 COSTS_N_INSNS (12)}, /* other */
662 1, /* cost of multiply per each bit set */
663 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
664 COSTS_N_INSNS (40), /* HI */
665 COSTS_N_INSNS (40), /* SI */
666 COSTS_N_INSNS (40), /* DI */
667 COSTS_N_INSNS (40)}, /* other */
668 COSTS_N_INSNS (3), /* cost of movsx */
669 COSTS_N_INSNS (2), /* cost of movzx */
670 15, /* "large" insn */
672 4, /* cost for loading QImode using movzbl */
673 {2, 4, 2}, /* cost of loading integer registers
674 in QImode, HImode and SImode.
675 Relative to reg-reg move (2). */
676 {2, 4, 2}, /* cost of storing integer registers */
677 2, /* cost of reg,reg fld/fst */
678 {8, 8, 8}, /* cost of loading fp registers
679 in SFmode, DFmode and XFmode */
680 {8, 8, 8}, /* cost of storing fp registers
681 in SFmode, DFmode and XFmode */
682 2, /* cost of moving MMX register */
683 {4, 8}, /* cost of loading MMX registers
684 in SImode and DImode */
685 {4, 8}, /* cost of storing MMX registers
686 in SImode and DImode */
687 2, /* cost of moving SSE register */
688 {4, 8, 16}, /* cost of loading SSE registers
689 in SImode, DImode and TImode */
690 {4, 8, 16}, /* cost of storing SSE registers
691 in SImode, DImode and TImode */
692 3, /* MMX or SSE register to integer */
693 4, /* size of l1 cache. 486 has 8kB cache
694 shared for code and data, so 4kB is
695 not really precise. */
696 4, /* size of l2 cache */
697 0, /* size of prefetch block */
698 0, /* number of parallel prefetches */
700 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
701 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
702 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
703 COSTS_N_INSNS (3), /* cost of FABS instruction. */
704 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
705 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
706 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
707 DUMMY_STRINGOP_ALGS},
708 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
709 DUMMY_STRINGOP_ALGS},
710 1, /* scalar_stmt_cost. */
711 1, /* scalar load_cost. */
712 1, /* scalar_store_cost. */
713 1, /* vec_stmt_cost. */
714 1, /* vec_to_scalar_cost. */
715 1, /* scalar_to_vec_cost. */
716 1, /* vec_align_load_cost. */
717 2, /* vec_unalign_load_cost. */
718 1, /* vec_store_cost. */
719 3, /* cond_taken_branch_cost. */
720 1, /* cond_not_taken_branch_cost. */
724 struct processor_costs pentium_cost = {
725 COSTS_N_INSNS (1), /* cost of an add instruction */
726 COSTS_N_INSNS (1), /* cost of a lea instruction */
727 COSTS_N_INSNS (4), /* variable shift costs */
728 COSTS_N_INSNS (1), /* constant shift costs */
729 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
730 COSTS_N_INSNS (11), /* HI */
731 COSTS_N_INSNS (11), /* SI */
732 COSTS_N_INSNS (11), /* DI */
733 COSTS_N_INSNS (11)}, /* other */
734 0, /* cost of multiply per each bit set */
735 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
736 COSTS_N_INSNS (25), /* HI */
737 COSTS_N_INSNS (25), /* SI */
738 COSTS_N_INSNS (25), /* DI */
739 COSTS_N_INSNS (25)}, /* other */
740 COSTS_N_INSNS (3), /* cost of movsx */
741 COSTS_N_INSNS (2), /* cost of movzx */
742 8, /* "large" insn */
744 6, /* cost for loading QImode using movzbl */
745 {2, 4, 2}, /* cost of loading integer registers
746 in QImode, HImode and SImode.
747 Relative to reg-reg move (2). */
748 {2, 4, 2}, /* cost of storing integer registers */
749 2, /* cost of reg,reg fld/fst */
750 {2, 2, 6}, /* cost of loading fp registers
751 in SFmode, DFmode and XFmode */
752 {4, 4, 6}, /* cost of storing fp registers
753 in SFmode, DFmode and XFmode */
754 8, /* cost of moving MMX register */
755 {8, 8}, /* cost of loading MMX registers
756 in SImode and DImode */
757 {8, 8}, /* cost of storing MMX registers
758 in SImode and DImode */
759 2, /* cost of moving SSE register */
760 {4, 8, 16}, /* cost of loading SSE registers
761 in SImode, DImode and TImode */
762 {4, 8, 16}, /* cost of storing SSE registers
763 in SImode, DImode and TImode */
764 3, /* MMX or SSE register to integer */
765 8, /* size of l1 cache. */
766 8, /* size of l2 cache */
767 0, /* size of prefetch block */
768 0, /* number of parallel prefetches */
770 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
771 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
772 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
773 COSTS_N_INSNS (1), /* cost of FABS instruction. */
774 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
775 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
776 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
777 DUMMY_STRINGOP_ALGS},
778 {{libcall, {{-1, rep_prefix_4_byte}}},
779 DUMMY_STRINGOP_ALGS},
780 1, /* scalar_stmt_cost. */
781 1, /* scalar load_cost. */
782 1, /* scalar_store_cost. */
783 1, /* vec_stmt_cost. */
784 1, /* vec_to_scalar_cost. */
785 1, /* scalar_to_vec_cost. */
786 1, /* vec_align_load_cost. */
787 2, /* vec_unalign_load_cost. */
788 1, /* vec_store_cost. */
789 3, /* cond_taken_branch_cost. */
790 1, /* cond_not_taken_branch_cost. */
794 struct processor_costs pentiumpro_cost = {
795 COSTS_N_INSNS (1), /* cost of an add instruction */
796 COSTS_N_INSNS (1), /* cost of a lea instruction */
797 COSTS_N_INSNS (1), /* variable shift costs */
798 COSTS_N_INSNS (1), /* constant shift costs */
799 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
800 COSTS_N_INSNS (4), /* HI */
801 COSTS_N_INSNS (4), /* SI */
802 COSTS_N_INSNS (4), /* DI */
803 COSTS_N_INSNS (4)}, /* other */
804 0, /* cost of multiply per each bit set */
805 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
806 COSTS_N_INSNS (17), /* HI */
807 COSTS_N_INSNS (17), /* SI */
808 COSTS_N_INSNS (17), /* DI */
809 COSTS_N_INSNS (17)}, /* other */
810 COSTS_N_INSNS (1), /* cost of movsx */
811 COSTS_N_INSNS (1), /* cost of movzx */
812 8, /* "large" insn */
814 2, /* cost for loading QImode using movzbl */
815 {4, 4, 4}, /* cost of loading integer registers
816 in QImode, HImode and SImode.
817 Relative to reg-reg move (2). */
818 {2, 2, 2}, /* cost of storing integer registers */
819 2, /* cost of reg,reg fld/fst */
820 {2, 2, 6}, /* cost of loading fp registers
821 in SFmode, DFmode and XFmode */
822 {4, 4, 6}, /* cost of storing fp registers
823 in SFmode, DFmode and XFmode */
824 2, /* cost of moving MMX register */
825 {2, 2}, /* cost of loading MMX registers
826 in SImode and DImode */
827 {2, 2}, /* cost of storing MMX registers
828 in SImode and DImode */
829 2, /* cost of moving SSE register */
830 {2, 2, 8}, /* cost of loading SSE registers
831 in SImode, DImode and TImode */
832 {2, 2, 8}, /* cost of storing SSE registers
833 in SImode, DImode and TImode */
834 3, /* MMX or SSE register to integer */
835 8, /* size of l1 cache. */
836 256, /* size of l2 cache */
837 32, /* size of prefetch block */
838 6, /* number of parallel prefetches */
840 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
841 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
842 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
843 COSTS_N_INSNS (2), /* cost of FABS instruction. */
844 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
845 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
846 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes
847 (we ensure the alignment). For small blocks inline loop is still a
848 noticeable win, for bigger blocks either rep movsl or rep movsb is
849 way to go. Rep movsb has apparently more expensive startup time in CPU,
850 but after 4K the difference is down in the noise. */
851 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
852 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
853 DUMMY_STRINGOP_ALGS},
854 {{rep_prefix_4_byte, {{1024, unrolled_loop},
855 {8192, rep_prefix_4_byte}, {-1, libcall}}},
856 DUMMY_STRINGOP_ALGS},
857 1, /* scalar_stmt_cost. */
858 1, /* scalar load_cost. */
859 1, /* scalar_store_cost. */
860 1, /* vec_stmt_cost. */
861 1, /* vec_to_scalar_cost. */
862 1, /* scalar_to_vec_cost. */
863 1, /* vec_align_load_cost. */
864 2, /* vec_unalign_load_cost. */
865 1, /* vec_store_cost. */
866 3, /* cond_taken_branch_cost. */
867 1, /* cond_not_taken_branch_cost. */
871 struct processor_costs geode_cost = {
872 COSTS_N_INSNS (1), /* cost of an add instruction */
873 COSTS_N_INSNS (1), /* cost of a lea instruction */
874 COSTS_N_INSNS (2), /* variable shift costs */
875 COSTS_N_INSNS (1), /* constant shift costs */
876 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
877 COSTS_N_INSNS (4), /* HI */
878 COSTS_N_INSNS (7), /* SI */
879 COSTS_N_INSNS (7), /* DI */
880 COSTS_N_INSNS (7)}, /* other */
881 0, /* cost of multiply per each bit set */
882 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
883 COSTS_N_INSNS (23), /* HI */
884 COSTS_N_INSNS (39), /* SI */
885 COSTS_N_INSNS (39), /* DI */
886 COSTS_N_INSNS (39)}, /* other */
887 COSTS_N_INSNS (1), /* cost of movsx */
888 COSTS_N_INSNS (1), /* cost of movzx */
889 8, /* "large" insn */
891 1, /* cost for loading QImode using movzbl */
892 {1, 1, 1}, /* cost of loading integer registers
893 in QImode, HImode and SImode.
894 Relative to reg-reg move (2). */
895 {1, 1, 1}, /* cost of storing integer registers */
896 1, /* cost of reg,reg fld/fst */
897 {1, 1, 1}, /* cost of loading fp registers
898 in SFmode, DFmode and XFmode */
899 {4, 6, 6}, /* cost of storing fp registers
900 in SFmode, DFmode and XFmode */
902 1, /* cost of moving MMX register */
903 {1, 1}, /* cost of loading MMX registers
904 in SImode and DImode */
905 {1, 1}, /* cost of storing MMX registers
906 in SImode and DImode */
907 1, /* cost of moving SSE register */
908 {1, 1, 1}, /* cost of loading SSE registers
909 in SImode, DImode and TImode */
910 {1, 1, 1}, /* cost of storing SSE registers
911 in SImode, DImode and TImode */
912 1, /* MMX or SSE register to integer */
913 64, /* size of l1 cache. */
914 128, /* size of l2 cache. */
915 32, /* size of prefetch block */
916 1, /* number of parallel prefetches */
918 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
919 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
920 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
921 COSTS_N_INSNS (1), /* cost of FABS instruction. */
922 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
923 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
924 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
925 DUMMY_STRINGOP_ALGS},
926 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
927 DUMMY_STRINGOP_ALGS},
928 1, /* scalar_stmt_cost. */
929 1, /* scalar load_cost. */
930 1, /* scalar_store_cost. */
931 1, /* vec_stmt_cost. */
932 1, /* vec_to_scalar_cost. */
933 1, /* scalar_to_vec_cost. */
934 1, /* vec_align_load_cost. */
935 2, /* vec_unalign_load_cost. */
936 1, /* vec_store_cost. */
937 3, /* cond_taken_branch_cost. */
938 1, /* cond_not_taken_branch_cost. */
942 struct processor_costs k6_cost = {
943 COSTS_N_INSNS (1), /* cost of an add instruction */
944 COSTS_N_INSNS (2), /* cost of a lea instruction */
945 COSTS_N_INSNS (1), /* variable shift costs */
946 COSTS_N_INSNS (1), /* constant shift costs */
947 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
948 COSTS_N_INSNS (3), /* HI */
949 COSTS_N_INSNS (3), /* SI */
950 COSTS_N_INSNS (3), /* DI */
951 COSTS_N_INSNS (3)}, /* other */
952 0, /* cost of multiply per each bit set */
953 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
954 COSTS_N_INSNS (18), /* HI */
955 COSTS_N_INSNS (18), /* SI */
956 COSTS_N_INSNS (18), /* DI */
957 COSTS_N_INSNS (18)}, /* other */
958 COSTS_N_INSNS (2), /* cost of movsx */
959 COSTS_N_INSNS (2), /* cost of movzx */
960 8, /* "large" insn */
962 3, /* cost for loading QImode using movzbl */
963 {4, 5, 4}, /* cost of loading integer registers
964 in QImode, HImode and SImode.
965 Relative to reg-reg move (2). */
966 {2, 3, 2}, /* cost of storing integer registers */
967 4, /* cost of reg,reg fld/fst */
968 {6, 6, 6}, /* cost of loading fp registers
969 in SFmode, DFmode and XFmode */
970 {4, 4, 4}, /* cost of storing fp registers
971 in SFmode, DFmode and XFmode */
972 2, /* cost of moving MMX register */
973 {2, 2}, /* cost of loading MMX registers
974 in SImode and DImode */
975 {2, 2}, /* cost of storing MMX registers
976 in SImode and DImode */
977 2, /* cost of moving SSE register */
978 {2, 2, 8}, /* cost of loading SSE registers
979 in SImode, DImode and TImode */
980 {2, 2, 8}, /* cost of storing SSE registers
981 in SImode, DImode and TImode */
982 6, /* MMX or SSE register to integer */
983 32, /* size of l1 cache. */
984 32, /* size of l2 cache. Some models
985 have integrated l2 cache, but
986 optimizing for k6 is not important
987 enough to worry about that. */
988 32, /* size of prefetch block */
989 1, /* number of parallel prefetches */
991 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
992 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
993 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
994 COSTS_N_INSNS (2), /* cost of FABS instruction. */
995 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
996 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
997 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
998 DUMMY_STRINGOP_ALGS},
999 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
1000 DUMMY_STRINGOP_ALGS},
1001 1, /* scalar_stmt_cost. */
1002 1, /* scalar load_cost. */
1003 1, /* scalar_store_cost. */
1004 1, /* vec_stmt_cost. */
1005 1, /* vec_to_scalar_cost. */
1006 1, /* scalar_to_vec_cost. */
1007 1, /* vec_align_load_cost. */
1008 2, /* vec_unalign_load_cost. */
1009 1, /* vec_store_cost. */
1010 3, /* cond_taken_branch_cost. */
1011 1, /* cond_not_taken_branch_cost. */
1015 struct processor_costs athlon_cost = {
1016 COSTS_N_INSNS (1), /* cost of an add instruction */
1017 COSTS_N_INSNS (2), /* cost of a lea instruction */
1018 COSTS_N_INSNS (1), /* variable shift costs */
1019 COSTS_N_INSNS (1), /* constant shift costs */
1020 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
1021 COSTS_N_INSNS (5), /* HI */
1022 COSTS_N_INSNS (5), /* SI */
1023 COSTS_N_INSNS (5), /* DI */
1024 COSTS_N_INSNS (5)}, /* other */
1025 0, /* cost of multiply per each bit set */
1026 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1027 COSTS_N_INSNS (26), /* HI */
1028 COSTS_N_INSNS (42), /* SI */
1029 COSTS_N_INSNS (74), /* DI */
1030 COSTS_N_INSNS (74)}, /* other */
1031 COSTS_N_INSNS (1), /* cost of movsx */
1032 COSTS_N_INSNS (1), /* cost of movzx */
1033 8, /* "large" insn */
1035 4, /* cost for loading QImode using movzbl */
1036 {3, 4, 3}, /* cost of loading integer registers
1037 in QImode, HImode and SImode.
1038 Relative to reg-reg move (2). */
1039 {3, 4, 3}, /* cost of storing integer registers */
1040 4, /* cost of reg,reg fld/fst */
1041 {4, 4, 12}, /* cost of loading fp registers
1042 in SFmode, DFmode and XFmode */
1043 {6, 6, 8}, /* cost of storing fp registers
1044 in SFmode, DFmode and XFmode */
1045 2, /* cost of moving MMX register */
1046 {4, 4}, /* cost of loading MMX registers
1047 in SImode and DImode */
1048 {4, 4}, /* cost of storing MMX registers
1049 in SImode and DImode */
1050 2, /* cost of moving SSE register */
1051 {4, 4, 6}, /* cost of loading SSE registers
1052 in SImode, DImode and TImode */
1053 {4, 4, 5}, /* cost of storing SSE registers
1054 in SImode, DImode and TImode */
1055 5, /* MMX or SSE register to integer */
1056 64, /* size of l1 cache. */
1057 256, /* size of l2 cache. */
1058 64, /* size of prefetch block */
1059 6, /* number of parallel prefetches */
1060 5, /* Branch cost */
1061 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1062 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1063 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
1064 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1065 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1066 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1067 /* For some reason, Athlon deals better with REP prefix (relative to loops)
1068 compared to K8. Alignment becomes important after 8 bytes for memcpy and
1069 128 bytes for memset. */
1070 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
1071 DUMMY_STRINGOP_ALGS},
1072 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
1073 DUMMY_STRINGOP_ALGS},
1074 1, /* scalar_stmt_cost. */
1075 1, /* scalar load_cost. */
1076 1, /* scalar_store_cost. */
1077 1, /* vec_stmt_cost. */
1078 1, /* vec_to_scalar_cost. */
1079 1, /* scalar_to_vec_cost. */
1080 1, /* vec_align_load_cost. */
1081 2, /* vec_unalign_load_cost. */
1082 1, /* vec_store_cost. */
1083 3, /* cond_taken_branch_cost. */
1084 1, /* cond_not_taken_branch_cost. */
1088 struct processor_costs k8_cost = {
1089 COSTS_N_INSNS (1), /* cost of an add instruction */
1090 COSTS_N_INSNS (2), /* cost of a lea instruction */
1091 COSTS_N_INSNS (1), /* variable shift costs */
1092 COSTS_N_INSNS (1), /* constant shift costs */
1093 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1094 COSTS_N_INSNS (4), /* HI */
1095 COSTS_N_INSNS (3), /* SI */
1096 COSTS_N_INSNS (4), /* DI */
1097 COSTS_N_INSNS (5)}, /* other */
1098 0, /* cost of multiply per each bit set */
1099 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1100 COSTS_N_INSNS (26), /* HI */
1101 COSTS_N_INSNS (42), /* SI */
1102 COSTS_N_INSNS (74), /* DI */
1103 COSTS_N_INSNS (74)}, /* other */
1104 COSTS_N_INSNS (1), /* cost of movsx */
1105 COSTS_N_INSNS (1), /* cost of movzx */
1106 8, /* "large" insn */
1108 4, /* cost for loading QImode using movzbl */
1109 {3, 4, 3}, /* cost of loading integer registers
1110 in QImode, HImode and SImode.
1111 Relative to reg-reg move (2). */
1112 {3, 4, 3}, /* cost of storing integer registers */
1113 4, /* cost of reg,reg fld/fst */
1114 {4, 4, 12}, /* cost of loading fp registers
1115 in SFmode, DFmode and XFmode */
1116 {6, 6, 8}, /* cost of storing fp registers
1117 in SFmode, DFmode and XFmode */
1118 2, /* cost of moving MMX register */
1119 {3, 3}, /* cost of loading MMX registers
1120 in SImode and DImode */
1121 {4, 4}, /* cost of storing MMX registers
1122 in SImode and DImode */
1123 2, /* cost of moving SSE register */
1124 {4, 3, 6}, /* cost of loading SSE registers
1125 in SImode, DImode and TImode */
1126 {4, 4, 5}, /* cost of storing SSE registers
1127 in SImode, DImode and TImode */
1128 5, /* MMX or SSE register to integer */
1129 64, /* size of l1 cache. */
1130 512, /* size of l2 cache. */
1131 64, /* size of prefetch block */
1132 /* New AMD processors never drop prefetches; if they cannot be performed
1133 immediately, they are queued. We set number of simultaneous prefetches
1134 to a large constant to reflect this (it probably is not a good idea not
1135 to limit number of prefetches at all, as their execution also takes some
1137 100, /* number of parallel prefetches */
1138 3, /* Branch cost */
1139 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1140 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1141 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1142 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1143 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1144 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1145 /* K8 has optimized REP instruction for medium sized blocks, but for very
1146 small blocks it is better to use loop. For large blocks, libcall can
1147 do nontemporary accesses and beat inline considerably. */
1148 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1149 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1150 {{libcall, {{8, loop}, {24, unrolled_loop},
1151 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1152 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1153 4, /* scalar_stmt_cost. */
1154 2, /* scalar load_cost. */
1155 2, /* scalar_store_cost. */
1156 5, /* vec_stmt_cost. */
1157 0, /* vec_to_scalar_cost. */
1158 2, /* scalar_to_vec_cost. */
1159 2, /* vec_align_load_cost. */
1160 3, /* vec_unalign_load_cost. */
1161 3, /* vec_store_cost. */
1162 3, /* cond_taken_branch_cost. */
1163 2, /* cond_not_taken_branch_cost. */
1166 struct processor_costs amdfam10_cost = {
1167 COSTS_N_INSNS (1), /* cost of an add instruction */
1168 COSTS_N_INSNS (2), /* cost of a lea instruction */
1169 COSTS_N_INSNS (1), /* variable shift costs */
1170 COSTS_N_INSNS (1), /* constant shift costs */
1171 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1172 COSTS_N_INSNS (4), /* HI */
1173 COSTS_N_INSNS (3), /* SI */
1174 COSTS_N_INSNS (4), /* DI */
1175 COSTS_N_INSNS (5)}, /* other */
1176 0, /* cost of multiply per each bit set */
1177 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1178 COSTS_N_INSNS (35), /* HI */
1179 COSTS_N_INSNS (51), /* SI */
1180 COSTS_N_INSNS (83), /* DI */
1181 COSTS_N_INSNS (83)}, /* other */
1182 COSTS_N_INSNS (1), /* cost of movsx */
1183 COSTS_N_INSNS (1), /* cost of movzx */
1184 8, /* "large" insn */
1186 4, /* cost for loading QImode using movzbl */
1187 {3, 4, 3}, /* cost of loading integer registers
1188 in QImode, HImode and SImode.
1189 Relative to reg-reg move (2). */
1190 {3, 4, 3}, /* cost of storing integer registers */
1191 4, /* cost of reg,reg fld/fst */
1192 {4, 4, 12}, /* cost of loading fp registers
1193 in SFmode, DFmode and XFmode */
1194 {6, 6, 8}, /* cost of storing fp registers
1195 in SFmode, DFmode and XFmode */
1196 2, /* cost of moving MMX register */
1197 {3, 3}, /* cost of loading MMX registers
1198 in SImode and DImode */
1199 {4, 4}, /* cost of storing MMX registers
1200 in SImode and DImode */
1201 2, /* cost of moving SSE register */
1202 {4, 4, 3}, /* cost of loading SSE registers
1203 in SImode, DImode and TImode */
1204 {4, 4, 5}, /* cost of storing SSE registers
1205 in SImode, DImode and TImode */
1206 3, /* MMX or SSE register to integer */
1208 MOVD reg64, xmmreg Double FSTORE 4
1209 MOVD reg32, xmmreg Double FSTORE 4
1211 MOVD reg64, xmmreg Double FADD 3
1213 MOVD reg32, xmmreg Double FADD 3
1215 64, /* size of l1 cache. */
1216 512, /* size of l2 cache. */
1217 64, /* size of prefetch block */
1218 /* New AMD processors never drop prefetches; if they cannot be performed
1219 immediately, they are queued. We set number of simultaneous prefetches
1220 to a large constant to reflect this (it probably is not a good idea not
1221 to limit number of prefetches at all, as their execution also takes some
1223 100, /* number of parallel prefetches */
1224 2, /* Branch cost */
1225 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1226 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1227 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1228 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1229 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1230 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1232 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
1233 very small blocks it is better to use loop. For large blocks, libcall can
1234 do nontemporary accesses and beat inline considerably. */
1235 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1236 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1237 {{libcall, {{8, loop}, {24, unrolled_loop},
1238 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1239 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1240 4, /* scalar_stmt_cost. */
1241 2, /* scalar load_cost. */
1242 2, /* scalar_store_cost. */
1243 6, /* vec_stmt_cost. */
1244 0, /* vec_to_scalar_cost. */
1245 2, /* scalar_to_vec_cost. */
1246 2, /* vec_align_load_cost. */
1247 2, /* vec_unalign_load_cost. */
1248 2, /* vec_store_cost. */
1249 2, /* cond_taken_branch_cost. */
1250 1, /* cond_not_taken_branch_cost. */
1253 struct processor_costs bdver1_cost = {
1254 COSTS_N_INSNS (1), /* cost of an add instruction */
1255 COSTS_N_INSNS (1), /* cost of a lea instruction */
1256 COSTS_N_INSNS (1), /* variable shift costs */
1257 COSTS_N_INSNS (1), /* constant shift costs */
1258 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
1259 COSTS_N_INSNS (4), /* HI */
1260 COSTS_N_INSNS (4), /* SI */
1261 COSTS_N_INSNS (6), /* DI */
1262 COSTS_N_INSNS (6)}, /* other */
1263 0, /* cost of multiply per each bit set */
1264 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1265 COSTS_N_INSNS (35), /* HI */
1266 COSTS_N_INSNS (51), /* SI */
1267 COSTS_N_INSNS (83), /* DI */
1268 COSTS_N_INSNS (83)}, /* other */
1269 COSTS_N_INSNS (1), /* cost of movsx */
1270 COSTS_N_INSNS (1), /* cost of movzx */
1271 8, /* "large" insn */
1273 4, /* cost for loading QImode using movzbl */
1274 {5, 5, 4}, /* cost of loading integer registers
1275 in QImode, HImode and SImode.
1276 Relative to reg-reg move (2). */
1277 {4, 4, 4}, /* cost of storing integer registers */
1278 2, /* cost of reg,reg fld/fst */
1279 {5, 5, 12}, /* cost of loading fp registers
1280 in SFmode, DFmode and XFmode */
1281 {4, 4, 8}, /* cost of storing fp registers
1282 in SFmode, DFmode and XFmode */
1283 2, /* cost of moving MMX register */
1284 {4, 4}, /* cost of loading MMX registers
1285 in SImode and DImode */
1286 {4, 4}, /* cost of storing MMX registers
1287 in SImode and DImode */
1288 2, /* cost of moving SSE register */
1289 {4, 4, 4}, /* cost of loading SSE registers
1290 in SImode, DImode and TImode */
1291 {4, 4, 4}, /* cost of storing SSE registers
1292 in SImode, DImode and TImode */
1293 2, /* MMX or SSE register to integer */
1295 MOVD reg64, xmmreg Double FSTORE 4
1296 MOVD reg32, xmmreg Double FSTORE 4
1298 MOVD reg64, xmmreg Double FADD 3
1300 MOVD reg32, xmmreg Double FADD 3
1302 16, /* size of l1 cache. */
1303 2048, /* size of l2 cache. */
1304 64, /* size of prefetch block */
1305 /* New AMD processors never drop prefetches; if they cannot be performed
1306 immediately, they are queued. We set number of simultaneous prefetches
1307 to a large constant to reflect this (it probably is not a good idea not
1308 to limit number of prefetches at all, as their execution also takes some
1310 100, /* number of parallel prefetches */
1311 2, /* Branch cost */
1312 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1313 COSTS_N_INSNS (6), /* cost of FMUL instruction. */
1314 COSTS_N_INSNS (42), /* cost of FDIV instruction. */
1315 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1316 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1317 COSTS_N_INSNS (52), /* cost of FSQRT instruction. */
1319 /* BDVER1 has optimized REP instruction for medium sized blocks, but for
1320 very small blocks it is better to use loop. For large blocks, libcall
1321 can do nontemporary accesses and beat inline considerably. */
1322 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1323 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1324 {{libcall, {{8, loop}, {24, unrolled_loop},
1325 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1326 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1327 6, /* scalar_stmt_cost. */
1328 4, /* scalar load_cost. */
1329 4, /* scalar_store_cost. */
1330 6, /* vec_stmt_cost. */
1331 0, /* vec_to_scalar_cost. */
1332 2, /* scalar_to_vec_cost. */
1333 4, /* vec_align_load_cost. */
1334 4, /* vec_unalign_load_cost. */
1335 4, /* vec_store_cost. */
1336 2, /* cond_taken_branch_cost. */
1337 1, /* cond_not_taken_branch_cost. */
1340 struct processor_costs btver1_cost = {
1341 COSTS_N_INSNS (1), /* cost of an add instruction */
1342 COSTS_N_INSNS (2), /* cost of a lea instruction */
1343 COSTS_N_INSNS (1), /* variable shift costs */
1344 COSTS_N_INSNS (1), /* constant shift costs */
1345 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1346 COSTS_N_INSNS (4), /* HI */
1347 COSTS_N_INSNS (3), /* SI */
1348 COSTS_N_INSNS (4), /* DI */
1349 COSTS_N_INSNS (5)}, /* other */
1350 0, /* cost of multiply per each bit set */
1351 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1352 COSTS_N_INSNS (35), /* HI */
1353 COSTS_N_INSNS (51), /* SI */
1354 COSTS_N_INSNS (83), /* DI */
1355 COSTS_N_INSNS (83)}, /* other */
1356 COSTS_N_INSNS (1), /* cost of movsx */
1357 COSTS_N_INSNS (1), /* cost of movzx */
1358 8, /* "large" insn */
1360 4, /* cost for loading QImode using movzbl */
1361 {3, 4, 3}, /* cost of loading integer registers
1362 in QImode, HImode and SImode.
1363 Relative to reg-reg move (2). */
1364 {3, 4, 3}, /* cost of storing integer registers */
1365 4, /* cost of reg,reg fld/fst */
1366 {4, 4, 12}, /* cost of loading fp registers
1367 in SFmode, DFmode and XFmode */
1368 {6, 6, 8}, /* cost of storing fp registers
1369 in SFmode, DFmode and XFmode */
1370 2, /* cost of moving MMX register */
1371 {3, 3}, /* cost of loading MMX registers
1372 in SImode and DImode */
1373 {4, 4}, /* cost of storing MMX registers
1374 in SImode and DImode */
1375 2, /* cost of moving SSE register */
1376 {4, 4, 3}, /* cost of loading SSE registers
1377 in SImode, DImode and TImode */
1378 {4, 4, 5}, /* cost of storing SSE registers
1379 in SImode, DImode and TImode */
1380 3, /* MMX or SSE register to integer */
1382 MOVD reg64, xmmreg Double FSTORE 4
1383 MOVD reg32, xmmreg Double FSTORE 4
1385 MOVD reg64, xmmreg Double FADD 3
1387 MOVD reg32, xmmreg Double FADD 3
1389 32, /* size of l1 cache. */
1390 512, /* size of l2 cache. */
1391 64, /* size of prefetch block */
1392 100, /* number of parallel prefetches */
1393 2, /* Branch cost */
1394 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1395 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1396 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1397 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1398 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1399 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1401 /* BTVER1 has optimized REP instruction for medium sized blocks, but for
1402 very small blocks it is better to use loop. For large blocks, libcall can
1403 do nontemporary accesses and beat inline considerably. */
1404 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1405 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1406 {{libcall, {{8, loop}, {24, unrolled_loop},
1407 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1408 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1409 4, /* scalar_stmt_cost. */
1410 2, /* scalar load_cost. */
1411 2, /* scalar_store_cost. */
1412 6, /* vec_stmt_cost. */
1413 0, /* vec_to_scalar_cost. */
1414 2, /* scalar_to_vec_cost. */
1415 2, /* vec_align_load_cost. */
1416 2, /* vec_unalign_load_cost. */
1417 2, /* vec_store_cost. */
1418 2, /* cond_taken_branch_cost. */
1419 1, /* cond_not_taken_branch_cost. */
1423 struct processor_costs pentium4_cost = {
1424 COSTS_N_INSNS (1), /* cost of an add instruction */
1425 COSTS_N_INSNS (3), /* cost of a lea instruction */
1426 COSTS_N_INSNS (4), /* variable shift costs */
1427 COSTS_N_INSNS (4), /* constant shift costs */
1428 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
1429 COSTS_N_INSNS (15), /* HI */
1430 COSTS_N_INSNS (15), /* SI */
1431 COSTS_N_INSNS (15), /* DI */
1432 COSTS_N_INSNS (15)}, /* other */
1433 0, /* cost of multiply per each bit set */
1434 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
1435 COSTS_N_INSNS (56), /* HI */
1436 COSTS_N_INSNS (56), /* SI */
1437 COSTS_N_INSNS (56), /* DI */
1438 COSTS_N_INSNS (56)}, /* other */
1439 COSTS_N_INSNS (1), /* cost of movsx */
1440 COSTS_N_INSNS (1), /* cost of movzx */
1441 16, /* "large" insn */
1443 2, /* cost for loading QImode using movzbl */
1444 {4, 5, 4}, /* cost of loading integer registers
1445 in QImode, HImode and SImode.
1446 Relative to reg-reg move (2). */
1447 {2, 3, 2}, /* cost of storing integer registers */
1448 2, /* cost of reg,reg fld/fst */
1449 {2, 2, 6}, /* cost of loading fp registers
1450 in SFmode, DFmode and XFmode */
1451 {4, 4, 6}, /* cost of storing fp registers
1452 in SFmode, DFmode and XFmode */
1453 2, /* cost of moving MMX register */
1454 {2, 2}, /* cost of loading MMX registers
1455 in SImode and DImode */
1456 {2, 2}, /* cost of storing MMX registers
1457 in SImode and DImode */
1458 12, /* cost of moving SSE register */
1459 {12, 12, 12}, /* cost of loading SSE registers
1460 in SImode, DImode and TImode */
1461 {2, 2, 8}, /* cost of storing SSE registers
1462 in SImode, DImode and TImode */
1463 10, /* MMX or SSE register to integer */
1464 8, /* size of l1 cache. */
1465 256, /* size of l2 cache. */
1466 64, /* size of prefetch block */
1467 6, /* number of parallel prefetches */
1468 2, /* Branch cost */
1469 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
1470 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
1471 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
1472 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1473 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1474 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
1475 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1476 DUMMY_STRINGOP_ALGS},
1477 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1479 DUMMY_STRINGOP_ALGS},
1480 1, /* scalar_stmt_cost. */
1481 1, /* scalar load_cost. */
1482 1, /* scalar_store_cost. */
1483 1, /* vec_stmt_cost. */
1484 1, /* vec_to_scalar_cost. */
1485 1, /* scalar_to_vec_cost. */
1486 1, /* vec_align_load_cost. */
1487 2, /* vec_unalign_load_cost. */
1488 1, /* vec_store_cost. */
1489 3, /* cond_taken_branch_cost. */
1490 1, /* cond_not_taken_branch_cost. */
1494 struct processor_costs nocona_cost = {
1495 COSTS_N_INSNS (1), /* cost of an add instruction */
1496 COSTS_N_INSNS (1), /* cost of a lea instruction */
1497 COSTS_N_INSNS (1), /* variable shift costs */
1498 COSTS_N_INSNS (1), /* constant shift costs */
1499 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
1500 COSTS_N_INSNS (10), /* HI */
1501 COSTS_N_INSNS (10), /* SI */
1502 COSTS_N_INSNS (10), /* DI */
1503 COSTS_N_INSNS (10)}, /* other */
1504 0, /* cost of multiply per each bit set */
1505 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
1506 COSTS_N_INSNS (66), /* HI */
1507 COSTS_N_INSNS (66), /* SI */
1508 COSTS_N_INSNS (66), /* DI */
1509 COSTS_N_INSNS (66)}, /* other */
1510 COSTS_N_INSNS (1), /* cost of movsx */
1511 COSTS_N_INSNS (1), /* cost of movzx */
1512 16, /* "large" insn */
1513 17, /* MOVE_RATIO */
1514 4, /* cost for loading QImode using movzbl */
1515 {4, 4, 4}, /* cost of loading integer registers
1516 in QImode, HImode and SImode.
1517 Relative to reg-reg move (2). */
1518 {4, 4, 4}, /* cost of storing integer registers */
1519 3, /* cost of reg,reg fld/fst */
1520 {12, 12, 12}, /* cost of loading fp registers
1521 in SFmode, DFmode and XFmode */
1522 {4, 4, 4}, /* cost of storing fp registers
1523 in SFmode, DFmode and XFmode */
1524 6, /* cost of moving MMX register */
1525 {12, 12}, /* cost of loading MMX registers
1526 in SImode and DImode */
1527 {12, 12}, /* cost of storing MMX registers
1528 in SImode and DImode */
1529 6, /* cost of moving SSE register */
1530 {12, 12, 12}, /* cost of loading SSE registers
1531 in SImode, DImode and TImode */
1532 {12, 12, 12}, /* cost of storing SSE registers
1533 in SImode, DImode and TImode */
1534 8, /* MMX or SSE register to integer */
1535 8, /* size of l1 cache. */
1536 1024, /* size of l2 cache. */
1537 128, /* size of prefetch block */
1538 8, /* number of parallel prefetches */
1539 1, /* Branch cost */
1540 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1541 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1542 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
1543 COSTS_N_INSNS (3), /* cost of FABS instruction. */
1544 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
1545 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
1546 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1547 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
1548 {100000, unrolled_loop}, {-1, libcall}}}},
1549 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1551 {libcall, {{24, loop}, {64, unrolled_loop},
1552 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1553 1, /* scalar_stmt_cost. */
1554 1, /* scalar load_cost. */
1555 1, /* scalar_store_cost. */
1556 1, /* vec_stmt_cost. */
1557 1, /* vec_to_scalar_cost. */
1558 1, /* scalar_to_vec_cost. */
1559 1, /* vec_align_load_cost. */
1560 2, /* vec_unalign_load_cost. */
1561 1, /* vec_store_cost. */
1562 3, /* cond_taken_branch_cost. */
1563 1, /* cond_not_taken_branch_cost. */
1567 struct processor_costs atom_cost = {
1568 COSTS_N_INSNS (1), /* cost of an add instruction */
1569 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1570 COSTS_N_INSNS (1), /* variable shift costs */
1571 COSTS_N_INSNS (1), /* constant shift costs */
1572 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1573 COSTS_N_INSNS (4), /* HI */
1574 COSTS_N_INSNS (3), /* SI */
1575 COSTS_N_INSNS (4), /* DI */
1576 COSTS_N_INSNS (2)}, /* other */
1577 0, /* cost of multiply per each bit set */
1578 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1579 COSTS_N_INSNS (26), /* HI */
1580 COSTS_N_INSNS (42), /* SI */
1581 COSTS_N_INSNS (74), /* DI */
1582 COSTS_N_INSNS (74)}, /* other */
1583 COSTS_N_INSNS (1), /* cost of movsx */
1584 COSTS_N_INSNS (1), /* cost of movzx */
1585 8, /* "large" insn */
1586 17, /* MOVE_RATIO */
1587 2, /* cost for loading QImode using movzbl */
1588 {4, 4, 4}, /* cost of loading integer registers
1589 in QImode, HImode and SImode.
1590 Relative to reg-reg move (2). */
1591 {4, 4, 4}, /* cost of storing integer registers */
1592 4, /* cost of reg,reg fld/fst */
1593 {12, 12, 12}, /* cost of loading fp registers
1594 in SFmode, DFmode and XFmode */
1595 {6, 6, 8}, /* cost of storing fp registers
1596 in SFmode, DFmode and XFmode */
1597 2, /* cost of moving MMX register */
1598 {8, 8}, /* cost of loading MMX registers
1599 in SImode and DImode */
1600 {8, 8}, /* cost of storing MMX registers
1601 in SImode and DImode */
1602 2, /* cost of moving SSE register */
1603 {8, 8, 8}, /* cost of loading SSE registers
1604 in SImode, DImode and TImode */
1605 {8, 8, 8}, /* cost of storing SSE registers
1606 in SImode, DImode and TImode */
1607 5, /* MMX or SSE register to integer */
1608 32, /* size of l1 cache. */
1609 256, /* size of l2 cache. */
1610 64, /* size of prefetch block */
1611 6, /* number of parallel prefetches */
1612 3, /* Branch cost */
1613 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1614 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1615 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1616 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1617 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1618 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1619 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1620 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1621 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1622 {{libcall, {{8, loop}, {15, unrolled_loop},
1623 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1624 {libcall, {{24, loop}, {32, unrolled_loop},
1625 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1626 1, /* scalar_stmt_cost. */
1627 1, /* scalar load_cost. */
1628 1, /* scalar_store_cost. */
1629 1, /* vec_stmt_cost. */
1630 1, /* vec_to_scalar_cost. */
1631 1, /* scalar_to_vec_cost. */
1632 1, /* vec_align_load_cost. */
1633 2, /* vec_unalign_load_cost. */
1634 1, /* vec_store_cost. */
1635 3, /* cond_taken_branch_cost. */
1636 1, /* cond_not_taken_branch_cost. */
1639 /* Generic64 should produce code tuned for Nocona and K8. */
1641 struct processor_costs generic64_cost = {
1642 COSTS_N_INSNS (1), /* cost of an add instruction */
1643 /* On all chips taken into consideration lea is 2 cycles and more. With
1644 this cost however our current implementation of synth_mult results in
1645 use of unnecessary temporary registers causing regression on several
1646 SPECfp benchmarks. */
1647 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1648 COSTS_N_INSNS (1), /* variable shift costs */
1649 COSTS_N_INSNS (1), /* constant shift costs */
1650 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1651 COSTS_N_INSNS (4), /* HI */
1652 COSTS_N_INSNS (3), /* SI */
1653 COSTS_N_INSNS (4), /* DI */
1654 COSTS_N_INSNS (2)}, /* other */
1655 0, /* cost of multiply per each bit set */
1656 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1657 COSTS_N_INSNS (26), /* HI */
1658 COSTS_N_INSNS (42), /* SI */
1659 COSTS_N_INSNS (74), /* DI */
1660 COSTS_N_INSNS (74)}, /* other */
1661 COSTS_N_INSNS (1), /* cost of movsx */
1662 COSTS_N_INSNS (1), /* cost of movzx */
1663 8, /* "large" insn */
1664 17, /* MOVE_RATIO */
1665 4, /* cost for loading QImode using movzbl */
1666 {4, 4, 4}, /* cost of loading integer registers
1667 in QImode, HImode and SImode.
1668 Relative to reg-reg move (2). */
1669 {4, 4, 4}, /* cost of storing integer registers */
1670 4, /* cost of reg,reg fld/fst */
1671 {12, 12, 12}, /* cost of loading fp registers
1672 in SFmode, DFmode and XFmode */
1673 {6, 6, 8}, /* cost of storing fp registers
1674 in SFmode, DFmode and XFmode */
1675 2, /* cost of moving MMX register */
1676 {8, 8}, /* cost of loading MMX registers
1677 in SImode and DImode */
1678 {8, 8}, /* cost of storing MMX registers
1679 in SImode and DImode */
1680 2, /* cost of moving SSE register */
1681 {8, 8, 8}, /* cost of loading SSE registers
1682 in SImode, DImode and TImode */
1683 {8, 8, 8}, /* cost of storing SSE registers
1684 in SImode, DImode and TImode */
1685 5, /* MMX or SSE register to integer */
1686 32, /* size of l1 cache. */
1687 512, /* size of l2 cache. */
1688 64, /* size of prefetch block */
1689 6, /* number of parallel prefetches */
1690 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this
1691 value is increased to perhaps more appropriate value of 5. */
1692 3, /* Branch cost */
1693 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1694 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1695 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1696 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1697 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1698 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1699 {DUMMY_STRINGOP_ALGS,
1700 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1701 {DUMMY_STRINGOP_ALGS,
1702 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1703 1, /* scalar_stmt_cost. */
1704 1, /* scalar load_cost. */
1705 1, /* scalar_store_cost. */
1706 1, /* vec_stmt_cost. */
1707 1, /* vec_to_scalar_cost. */
1708 1, /* scalar_to_vec_cost. */
1709 1, /* vec_align_load_cost. */
1710 2, /* vec_unalign_load_cost. */
1711 1, /* vec_store_cost. */
1712 3, /* cond_taken_branch_cost. */
1713 1, /* cond_not_taken_branch_cost. */
1716 /* Generic32 should produce code tuned for PPro, Pentium4, Nocona,
1719 struct processor_costs generic32_cost = {
1720 COSTS_N_INSNS (1), /* cost of an add instruction */
1721 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1722 COSTS_N_INSNS (1), /* variable shift costs */
1723 COSTS_N_INSNS (1), /* constant shift costs */
1724 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1725 COSTS_N_INSNS (4), /* HI */
1726 COSTS_N_INSNS (3), /* SI */
1727 COSTS_N_INSNS (4), /* DI */
1728 COSTS_N_INSNS (2)}, /* other */
1729 0, /* cost of multiply per each bit set */
1730 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1731 COSTS_N_INSNS (26), /* HI */
1732 COSTS_N_INSNS (42), /* SI */
1733 COSTS_N_INSNS (74), /* DI */
1734 COSTS_N_INSNS (74)}, /* other */
1735 COSTS_N_INSNS (1), /* cost of movsx */
1736 COSTS_N_INSNS (1), /* cost of movzx */
1737 8, /* "large" insn */
1738 17, /* MOVE_RATIO */
1739 4, /* cost for loading QImode using movzbl */
1740 {4, 4, 4}, /* cost of loading integer registers
1741 in QImode, HImode and SImode.
1742 Relative to reg-reg move (2). */
1743 {4, 4, 4}, /* cost of storing integer registers */
1744 4, /* cost of reg,reg fld/fst */
1745 {12, 12, 12}, /* cost of loading fp registers
1746 in SFmode, DFmode and XFmode */
1747 {6, 6, 8}, /* cost of storing fp registers
1748 in SFmode, DFmode and XFmode */
1749 2, /* cost of moving MMX register */
1750 {8, 8}, /* cost of loading MMX registers
1751 in SImode and DImode */
1752 {8, 8}, /* cost of storing MMX registers
1753 in SImode and DImode */
1754 2, /* cost of moving SSE register */
1755 {8, 8, 8}, /* cost of loading SSE registers
1756 in SImode, DImode and TImode */
1757 {8, 8, 8}, /* cost of storing SSE registers
1758 in SImode, DImode and TImode */
1759 5, /* MMX or SSE register to integer */
1760 32, /* size of l1 cache. */
1761 256, /* size of l2 cache. */
1762 64, /* size of prefetch block */
1763 6, /* number of parallel prefetches */
1764 3, /* Branch cost */
1765 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1766 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1767 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1768 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1769 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1770 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1771 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1772 DUMMY_STRINGOP_ALGS},
1773 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1774 DUMMY_STRINGOP_ALGS},
1775 1, /* scalar_stmt_cost. */
1776 1, /* scalar load_cost. */
1777 1, /* scalar_store_cost. */
1778 1, /* vec_stmt_cost. */
1779 1, /* vec_to_scalar_cost. */
1780 1, /* scalar_to_vec_cost. */
1781 1, /* vec_align_load_cost. */
1782 2, /* vec_unalign_load_cost. */
1783 1, /* vec_store_cost. */
1784 3, /* cond_taken_branch_cost. */
1785 1, /* cond_not_taken_branch_cost. */
1788 const struct processor_costs *ix86_cost = &pentium_cost;
1790 /* Processor feature/optimization bitmasks. */
1791 #define m_386 (1<<PROCESSOR_I386)
1792 #define m_486 (1<<PROCESSOR_I486)
1793 #define m_PENT (1<<PROCESSOR_PENTIUM)
1794 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1795 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1796 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1797 #define m_CORE2_32 (1<<PROCESSOR_CORE2_32)
1798 #define m_CORE2_64 (1<<PROCESSOR_CORE2_64)
1799 #define m_COREI7_32 (1<<PROCESSOR_COREI7_32)
1800 #define m_COREI7_64 (1<<PROCESSOR_COREI7_64)
1801 #define m_COREI7 (m_COREI7_32 | m_COREI7_64)
1802 #define m_CORE2I7_32 (m_CORE2_32 | m_COREI7_32)
1803 #define m_CORE2I7_64 (m_CORE2_64 | m_COREI7_64)
1804 #define m_CORE2I7 (m_CORE2I7_32 | m_CORE2I7_64)
1805 #define m_ATOM (1<<PROCESSOR_ATOM)
1807 #define m_GEODE (1<<PROCESSOR_GEODE)
1808 #define m_K6 (1<<PROCESSOR_K6)
1809 #define m_K6_GEODE (m_K6 | m_GEODE)
1810 #define m_K8 (1<<PROCESSOR_K8)
1811 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1812 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1813 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1814 #define m_BDVER1 (1<<PROCESSOR_BDVER1)
1815 #define m_BTVER1 (1<<PROCESSOR_BTVER1)
1816 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10 | m_BDVER1 | m_BTVER1)
1818 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1819 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1821 /* Generic instruction choice should be common subset of supported CPUs
1822 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1823 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1825 /* Feature tests against the various tunings. */
1826 unsigned char ix86_tune_features[X86_TUNE_LAST];
1828 /* Feature tests against the various tunings used to create ix86_tune_features
1829 based on the processor mask. */
1830 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1831 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1832 negatively, so enabling for Generic64 seems like good code size
1833 tradeoff. We can't enable it for 32bit generic because it does not
1834 work well with PPro base chips. */
1835 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2I7_64 | m_GENERIC64,
1837 /* X86_TUNE_PUSH_MEMORY */
1838 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1839 | m_NOCONA | m_CORE2I7 | m_GENERIC,
1841 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1844 /* X86_TUNE_UNROLL_STRLEN */
1845 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1846 | m_CORE2I7 | m_GENERIC,
1848 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1849 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1850 | m_CORE2I7 | m_GENERIC,
1852 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1853 on simulation result. But after P4 was made, no performance benefit
1854 was observed with branch hints. It also increases the code size.
1855 As a result, icc never generates branch hints. */
1858 /* X86_TUNE_DOUBLE_WITH_ADD */
1861 /* X86_TUNE_USE_SAHF */
1862 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER1 | m_BTVER1
1863 | m_PENT4 | m_NOCONA | m_CORE2I7 | m_GENERIC,
1865 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1866 partial dependencies. */
1867 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1868 | m_CORE2I7 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1870 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1871 register stalls on Generic32 compilation setting as well. However
1872 in current implementation the partial register stalls are not eliminated
1873 very well - they can be introduced via subregs synthesized by combine
1874 and can happen in caller/callee saving sequences. Because this option
1875 pays back little on PPro based chips and is in conflict with partial reg
1876 dependencies used by Athlon/P4 based chips, it is better to leave it off
1877 for generic32 for now. */
1880 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1881 m_CORE2I7 | m_GENERIC,
1883 /* X86_TUNE_USE_HIMODE_FIOP */
1884 m_386 | m_486 | m_K6_GEODE,
1886 /* X86_TUNE_USE_SIMODE_FIOP */
1887 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2I7 | m_GENERIC),
1889 /* X86_TUNE_USE_MOV0 */
1892 /* X86_TUNE_USE_CLTD */
1893 ~(m_PENT | m_ATOM | m_K6 | m_CORE2I7 | m_GENERIC),
1895 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1898 /* X86_TUNE_SPLIT_LONG_MOVES */
1901 /* X86_TUNE_READ_MODIFY_WRITE */
1904 /* X86_TUNE_READ_MODIFY */
1907 /* X86_TUNE_PROMOTE_QIMODE */
1908 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1909 | m_CORE2I7 | m_GENERIC /* | m_PENT4 ? */,
1911 /* X86_TUNE_FAST_PREFIX */
1912 ~(m_PENT | m_486 | m_386),
1914 /* X86_TUNE_SINGLE_STRINGOP */
1915 m_386 | m_PENT4 | m_NOCONA,
1917 /* X86_TUNE_QIMODE_MATH */
1920 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1921 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1922 might be considered for Generic32 if our scheme for avoiding partial
1923 stalls was more effective. */
1926 /* X86_TUNE_PROMOTE_QI_REGS */
1929 /* X86_TUNE_PROMOTE_HI_REGS */
1932 /* X86_TUNE_SINGLE_POP: Enable if single pop insn is preferred
1933 over esp addition. */
1934 m_386 | m_486 | m_PENT | m_PPRO,
1936 /* X86_TUNE_DOUBLE_POP: Enable if double pop insn is preferred
1937 over esp addition. */
1940 /* X86_TUNE_SINGLE_PUSH: Enable if single push insn is preferred
1941 over esp subtraction. */
1942 m_386 | m_486 | m_PENT | m_K6_GEODE,
1944 /* X86_TUNE_DOUBLE_PUSH. Enable if double push insn is preferred
1945 over esp subtraction. */
1946 m_PENT | m_K6_GEODE,
1948 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1949 for DFmode copies */
1950 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2I7
1951 | m_GENERIC | m_GEODE),
1953 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1954 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2I7 | m_GENERIC,
1956 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1957 conflict here in between PPro/Pentium4 based chips that thread 128bit
1958 SSE registers as single units versus K8 based chips that divide SSE
1959 registers to two 64bit halves. This knob promotes all store destinations
1960 to be 128bit to allow register renaming on 128bit SSE units, but usually
1961 results in one extra microop on 64bit SSE units. Experimental results
1962 shows that disabling this option on P4 brings over 20% SPECfp regression,
1963 while enabling it on K8 brings roughly 2.4% regression that can be partly
1964 masked by careful scheduling of moves. */
1965 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2I7 | m_GENERIC
1966 | m_AMDFAM10 | m_BDVER1,
1968 /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL */
1969 m_AMDFAM10 | m_BDVER1 | m_BTVER1 | m_COREI7,
1971 /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL */
1972 m_BDVER1 | m_COREI7,
1974 /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL */
1977 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1978 are resolved on SSE register parts instead of whole registers, so we may
1979 maintain just lower part of scalar values in proper format leaving the
1980 upper part undefined. */
1983 /* X86_TUNE_SSE_TYPELESS_STORES */
1986 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1987 m_PPRO | m_PENT4 | m_NOCONA,
1989 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1990 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2I7 | m_GENERIC,
1992 /* X86_TUNE_PROLOGUE_USING_MOVE */
1993 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2I7 | m_GENERIC,
1995 /* X86_TUNE_EPILOGUE_USING_MOVE */
1996 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2I7 | m_GENERIC,
1998 /* X86_TUNE_SHIFT1 */
2001 /* X86_TUNE_USE_FFREEP */
2004 /* X86_TUNE_INTER_UNIT_MOVES */
2005 ~(m_AMD_MULTIPLE | m_GENERIC),
2007 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
2008 ~(m_AMDFAM10 | m_BDVER1),
2010 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
2011 than 4 branch instructions in the 16 byte window. */
2012 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2I7
2015 /* X86_TUNE_SCHEDULE */
2016 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2I7
2019 /* X86_TUNE_USE_BT */
2020 m_AMD_MULTIPLE | m_ATOM | m_CORE2I7 | m_GENERIC,
2022 /* X86_TUNE_USE_INCDEC */
2023 ~(m_PENT4 | m_NOCONA | m_CORE2I7 | m_GENERIC | m_ATOM),
2025 /* X86_TUNE_PAD_RETURNS */
2026 m_AMD_MULTIPLE | m_CORE2I7 | m_GENERIC,
2028 /* X86_TUNE_PAD_SHORT_FUNCTION: Pad short funtion. */
2031 /* X86_TUNE_EXT_80387_CONSTANTS */
2032 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
2033 | m_CORE2I7 | m_GENERIC,
2035 /* X86_TUNE_SHORTEN_X87_SSE */
2038 /* X86_TUNE_AVOID_VECTOR_DECODE */
2039 m_K8 | m_CORE2I7_64 | m_GENERIC64,
2041 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
2042 and SImode multiply, but 386 and 486 do HImode multiply faster. */
2045 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
2046 vector path on AMD machines. */
2047 m_K8 | m_CORE2I7_64 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1 | m_BTVER1,
2049 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
2051 m_K8 | m_CORE2I7_64 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1 | m_BTVER1,
2053 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
2057 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
2058 but one byte longer. */
2061 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
2062 operand that cannot be represented using a modRM byte. The XOR
2063 replacement is long decoded, so this split helps here as well. */
2066 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
2068 m_AMDFAM10 | m_CORE2I7 | m_GENERIC,
2070 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
2071 from integer to FP. */
2074 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
2075 with a subsequent conditional jump instruction into a single
2076 compare-and-branch uop. */
2079 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
2080 will impact LEA instruction selection. */
2083 /* X86_TUNE_VECTORIZE_DOUBLE: Enable double precision vector
2088 /* Feature tests against the various architecture variations. */
2089 unsigned char ix86_arch_features[X86_ARCH_LAST];
2091 /* Feature tests against the various architecture variations, used to create
2092 ix86_arch_features based on the processor mask. */
2093 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
2094 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
2095 ~(m_386 | m_486 | m_PENT | m_K6),
2097 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
2100 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
2103 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
2106 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
2110 static const unsigned int x86_accumulate_outgoing_args
2111 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2I7
2114 static const unsigned int x86_arch_always_fancy_math_387
2115 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
2116 | m_NOCONA | m_CORE2I7 | m_GENERIC;
2118 static enum stringop_alg stringop_alg = no_stringop;
2120 /* In case the average insn count for single function invocation is
2121 lower than this constant, emit fast (but longer) prologue and
2123 #define FAST_PROLOGUE_INSN_COUNT 20
2125 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
2126 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
2127 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
2128 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
2130 /* Array of the smallest class containing reg number REGNO, indexed by
2131 REGNO. Used by REGNO_REG_CLASS in i386.h. */
2133 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
2135 /* ax, dx, cx, bx */
2136 AREG, DREG, CREG, BREG,
2137 /* si, di, bp, sp */
2138 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
2140 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
2141 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
2144 /* flags, fpsr, fpcr, frame */
2145 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
2147 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
2150 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
2153 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
2154 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
2155 /* SSE REX registers */
2156 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
2160 /* The "default" register map used in 32bit mode. */
2162 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
2164 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
2165 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
2166 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2167 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
2168 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
2169 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
2170 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
2173 /* The "default" register map used in 64bit mode. */
2175 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
2177 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
2178 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
2179 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2180 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
2181 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
2182 8,9,10,11,12,13,14,15, /* extended integer registers */
2183 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
2186 /* Define the register numbers to be used in Dwarf debugging information.
2187 The SVR4 reference port C compiler uses the following register numbers
2188 in its Dwarf output code:
2189 0 for %eax (gcc regno = 0)
2190 1 for %ecx (gcc regno = 2)
2191 2 for %edx (gcc regno = 1)
2192 3 for %ebx (gcc regno = 3)
2193 4 for %esp (gcc regno = 7)
2194 5 for %ebp (gcc regno = 6)
2195 6 for %esi (gcc regno = 4)
2196 7 for %edi (gcc regno = 5)
2197 The following three DWARF register numbers are never generated by
2198 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
2199 believes these numbers have these meanings.
2200 8 for %eip (no gcc equivalent)
2201 9 for %eflags (gcc regno = 17)
2202 10 for %trapno (no gcc equivalent)
2203 It is not at all clear how we should number the FP stack registers
2204 for the x86 architecture. If the version of SDB on x86/svr4 were
2205 a bit less brain dead with respect to floating-point then we would
2206 have a precedent to follow with respect to DWARF register numbers
2207 for x86 FP registers, but the SDB on x86/svr4 is so completely
2208 broken with respect to FP registers that it is hardly worth thinking
2209 of it as something to strive for compatibility with.
2210 The version of x86/svr4 SDB I have at the moment does (partially)
2211 seem to believe that DWARF register number 11 is associated with
2212 the x86 register %st(0), but that's about all. Higher DWARF
2213 register numbers don't seem to be associated with anything in
2214 particular, and even for DWARF regno 11, SDB only seems to under-
2215 stand that it should say that a variable lives in %st(0) (when
2216 asked via an `=' command) if we said it was in DWARF regno 11,
2217 but SDB still prints garbage when asked for the value of the
2218 variable in question (via a `/' command).
2219 (Also note that the labels SDB prints for various FP stack regs
2220 when doing an `x' command are all wrong.)
2221 Note that these problems generally don't affect the native SVR4
2222 C compiler because it doesn't allow the use of -O with -g and
2223 because when it is *not* optimizing, it allocates a memory
2224 location for each floating-point variable, and the memory
2225 location is what gets described in the DWARF AT_location
2226 attribute for the variable in question.
2227 Regardless of the severe mental illness of the x86/svr4 SDB, we
2228 do something sensible here and we use the following DWARF
2229 register numbers. Note that these are all stack-top-relative
2231 11 for %st(0) (gcc regno = 8)
2232 12 for %st(1) (gcc regno = 9)
2233 13 for %st(2) (gcc regno = 10)
2234 14 for %st(3) (gcc regno = 11)
2235 15 for %st(4) (gcc regno = 12)
2236 16 for %st(5) (gcc regno = 13)
2237 17 for %st(6) (gcc regno = 14)
2238 18 for %st(7) (gcc regno = 15)
2240 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
2242 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
2243 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
2244 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2245 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
2246 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
2247 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
2248 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
2251 /* Define parameter passing and return registers. */
2253 static int const x86_64_int_parameter_registers[6] =
2255 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
2258 static int const x86_64_ms_abi_int_parameter_registers[4] =
2260 CX_REG, DX_REG, R8_REG, R9_REG
2263 static int const x86_64_int_return_registers[4] =
2265 AX_REG, DX_REG, DI_REG, SI_REG
2268 /* Define the structure for the machine field in struct function. */
2270 struct GTY(()) stack_local_entry {
2271 unsigned short mode;
2274 struct stack_local_entry *next;
2277 /* Structure describing stack frame layout.
2278 Stack grows downward:
2284 saved static chain if ix86_static_chain_on_stack
2286 saved frame pointer if frame_pointer_needed
2287 <- HARD_FRAME_POINTER
2293 <- sse_regs_save_offset
2296 [va_arg registers] |
2300 [padding2] | = to_allocate
2309 int outgoing_arguments_size;
2310 HOST_WIDE_INT frame;
2312 /* The offsets relative to ARG_POINTER. */
2313 HOST_WIDE_INT frame_pointer_offset;
2314 HOST_WIDE_INT hard_frame_pointer_offset;
2315 HOST_WIDE_INT stack_pointer_offset;
2316 HOST_WIDE_INT hfp_save_offset;
2317 HOST_WIDE_INT reg_save_offset;
2318 HOST_WIDE_INT sse_reg_save_offset;
2320 /* When save_regs_using_mov is set, emit prologue using
2321 move instead of push instructions. */
2322 bool save_regs_using_mov;
2325 /* Code model option. */
2326 enum cmodel ix86_cmodel;
2328 enum asm_dialect ix86_asm_dialect = ASM_ATT;
2330 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
2332 /* Which unit we are generating floating point math for. */
2333 enum fpmath_unit ix86_fpmath;
2335 /* Which cpu are we scheduling for. */
2336 enum attr_cpu ix86_schedule;
2338 /* Which cpu are we optimizing for. */
2339 enum processor_type ix86_tune;
2341 /* Which instruction set architecture to use. */
2342 enum processor_type ix86_arch;
2344 /* true if sse prefetch instruction is not NOOP. */
2345 int x86_prefetch_sse;
2347 /* ix86_regparm_string as a number */
2348 static int ix86_regparm;
2350 /* -mstackrealign option */
2351 static const char ix86_force_align_arg_pointer_string[]
2352 = "force_align_arg_pointer";
2354 static rtx (*ix86_gen_leave) (void);
2355 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
2356 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
2357 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
2358 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
2359 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
2360 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
2361 static rtx (*ix86_gen_allocate_stack_worker) (rtx, rtx);
2362 static rtx (*ix86_gen_adjust_stack_and_probe) (rtx, rtx, rtx);
2363 static rtx (*ix86_gen_probe_stack_range) (rtx, rtx, rtx);
2365 /* Preferred alignment for stack boundary in bits. */
2366 unsigned int ix86_preferred_stack_boundary;
2368 /* Alignment for incoming stack boundary in bits specified at
2370 static unsigned int ix86_user_incoming_stack_boundary;
2372 /* Default alignment for incoming stack boundary in bits. */
2373 static unsigned int ix86_default_incoming_stack_boundary;
2375 /* Alignment for incoming stack boundary in bits. */
2376 unsigned int ix86_incoming_stack_boundary;
2378 /* The abi used by target. */
2379 enum calling_abi ix86_abi;
2381 /* Values 1-5: see jump.c */
2382 int ix86_branch_cost;
2384 /* Calling abi specific va_list type nodes. */
2385 static GTY(()) tree sysv_va_list_type_node;
2386 static GTY(()) tree ms_va_list_type_node;
2388 /* Variables which are this size or smaller are put in the data/bss
2389 or ldata/lbss sections. */
2391 int ix86_section_threshold = 65536;
2393 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
2394 char internal_label_prefix[16];
2395 int internal_label_prefix_len;
2397 /* Fence to use after loop using movnt. */
2400 /* Register class used for passing given 64bit part of the argument.
2401 These represent classes as documented by the PS ABI, with the exception
2402 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
2403 use SF or DFmode move instead of DImode to avoid reformatting penalties.
2405 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
2406 whenever possible (upper half does contain padding). */
2407 enum x86_64_reg_class
2410 X86_64_INTEGER_CLASS,
2411 X86_64_INTEGERSI_CLASS,
2418 X86_64_COMPLEX_X87_CLASS,
2422 #define MAX_CLASSES 4
2424 /* Table of constants used by fldpi, fldln2, etc.... */
2425 static REAL_VALUE_TYPE ext_80387_constants_table [5];
2426 static bool ext_80387_constants_init = 0;
2429 static struct machine_function * ix86_init_machine_status (void);
2430 static rtx ix86_function_value (const_tree, const_tree, bool);
2431 static bool ix86_function_value_regno_p (const unsigned int);
2432 static unsigned int ix86_function_arg_boundary (enum machine_mode,
2434 static rtx ix86_static_chain (const_tree, bool);
2435 static int ix86_function_regparm (const_tree, const_tree);
2436 static void ix86_compute_frame_layout (struct ix86_frame *);
2437 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
2439 static void ix86_add_new_builtins (int);
2440 static rtx ix86_expand_vec_perm_builtin (tree);
2441 static tree ix86_canonical_va_list_type (tree);
2442 static void predict_jump (int);
2443 static unsigned int split_stack_prologue_scratch_regno (void);
2444 static bool i386_asm_output_addr_const_extra (FILE *, rtx);
2446 enum ix86_function_specific_strings
2448 IX86_FUNCTION_SPECIFIC_ARCH,
2449 IX86_FUNCTION_SPECIFIC_TUNE,
2450 IX86_FUNCTION_SPECIFIC_FPMATH,
2451 IX86_FUNCTION_SPECIFIC_MAX
2454 static char *ix86_target_string (int, int, const char *, const char *,
2455 const char *, bool);
2456 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
2457 static void ix86_function_specific_save (struct cl_target_option *);
2458 static void ix86_function_specific_restore (struct cl_target_option *);
2459 static void ix86_function_specific_print (FILE *, int,
2460 struct cl_target_option *);
2461 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
2462 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
2463 static bool ix86_can_inline_p (tree, tree);
2464 static void ix86_set_current_function (tree);
2465 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
2467 static enum calling_abi ix86_function_abi (const_tree);
2470 #ifndef SUBTARGET32_DEFAULT_CPU
2471 #define SUBTARGET32_DEFAULT_CPU "i386"
2474 /* The svr4 ABI for the i386 says that records and unions are returned
2476 #ifndef DEFAULT_PCC_STRUCT_RETURN
2477 #define DEFAULT_PCC_STRUCT_RETURN 1
2480 /* Whether -mtune= or -march= were specified */
2481 static int ix86_tune_defaulted;
2482 static int ix86_arch_specified;
2484 /* Define a set of ISAs which are available when a given ISA is
2485 enabled. MMX and SSE ISAs are handled separately. */
2487 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
2488 #define OPTION_MASK_ISA_3DNOW_SET \
2489 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
2491 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
2492 #define OPTION_MASK_ISA_SSE2_SET \
2493 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
2494 #define OPTION_MASK_ISA_SSE3_SET \
2495 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
2496 #define OPTION_MASK_ISA_SSSE3_SET \
2497 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
2498 #define OPTION_MASK_ISA_SSE4_1_SET \
2499 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
2500 #define OPTION_MASK_ISA_SSE4_2_SET \
2501 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
2502 #define OPTION_MASK_ISA_AVX_SET \
2503 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
2504 #define OPTION_MASK_ISA_FMA_SET \
2505 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
2507 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
2509 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
2511 #define OPTION_MASK_ISA_SSE4A_SET \
2512 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
2513 #define OPTION_MASK_ISA_FMA4_SET \
2514 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
2515 | OPTION_MASK_ISA_AVX_SET)
2516 #define OPTION_MASK_ISA_XOP_SET \
2517 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
2518 #define OPTION_MASK_ISA_LWP_SET \
2521 /* AES and PCLMUL need SSE2 because they use xmm registers */
2522 #define OPTION_MASK_ISA_AES_SET \
2523 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
2524 #define OPTION_MASK_ISA_PCLMUL_SET \
2525 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
2527 #define OPTION_MASK_ISA_ABM_SET \
2528 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
2530 #define OPTION_MASK_ISA_BMI_SET OPTION_MASK_ISA_BMI
2531 #define OPTION_MASK_ISA_TBM_SET OPTION_MASK_ISA_TBM
2532 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
2533 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
2534 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
2535 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
2536 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
2538 #define OPTION_MASK_ISA_FSGSBASE_SET OPTION_MASK_ISA_FSGSBASE
2539 #define OPTION_MASK_ISA_RDRND_SET OPTION_MASK_ISA_RDRND
2540 #define OPTION_MASK_ISA_F16C_SET \
2541 (OPTION_MASK_ISA_F16C | OPTION_MASK_ISA_AVX_SET)
2543 /* Define a set of ISAs which aren't available when a given ISA is
2544 disabled. MMX and SSE ISAs are handled separately. */
2546 #define OPTION_MASK_ISA_MMX_UNSET \
2547 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
2548 #define OPTION_MASK_ISA_3DNOW_UNSET \
2549 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
2550 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
2552 #define OPTION_MASK_ISA_SSE_UNSET \
2553 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
2554 #define OPTION_MASK_ISA_SSE2_UNSET \
2555 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
2556 #define OPTION_MASK_ISA_SSE3_UNSET \
2557 (OPTION_MASK_ISA_SSE3 \
2558 | OPTION_MASK_ISA_SSSE3_UNSET \
2559 | OPTION_MASK_ISA_SSE4A_UNSET )
2560 #define OPTION_MASK_ISA_SSSE3_UNSET \
2561 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2562 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2563 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2564 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2565 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2566 #define OPTION_MASK_ISA_AVX_UNSET \
2567 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2568 | OPTION_MASK_ISA_FMA4_UNSET | OPTION_MASK_ISA_F16C_UNSET)
2569 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2571 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2573 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2575 #define OPTION_MASK_ISA_SSE4A_UNSET \
2576 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2578 #define OPTION_MASK_ISA_FMA4_UNSET \
2579 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2580 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2581 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2583 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2584 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2585 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2586 #define OPTION_MASK_ISA_BMI_UNSET OPTION_MASK_ISA_BMI
2587 #define OPTION_MASK_ISA_TBM_UNSET OPTION_MASK_ISA_TBM
2588 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2589 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2590 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2591 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2592 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2594 #define OPTION_MASK_ISA_FSGSBASE_UNSET OPTION_MASK_ISA_FSGSBASE
2595 #define OPTION_MASK_ISA_RDRND_UNSET OPTION_MASK_ISA_RDRND
2596 #define OPTION_MASK_ISA_F16C_UNSET OPTION_MASK_ISA_F16C
2598 /* Vectorization library interface and handlers. */
2599 static tree (*ix86_veclib_handler) (enum built_in_function, tree, tree);
2601 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2602 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2604 /* Processor target table, indexed by processor number */
2607 const struct processor_costs *cost; /* Processor costs */
2608 const int align_loop; /* Default alignments. */
2609 const int align_loop_max_skip;
2610 const int align_jump;
2611 const int align_jump_max_skip;
2612 const int align_func;
2615 static const struct ptt processor_target_table[PROCESSOR_max] =
2617 {&i386_cost, 4, 3, 4, 3, 4},
2618 {&i486_cost, 16, 15, 16, 15, 16},
2619 {&pentium_cost, 16, 7, 16, 7, 16},
2620 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2621 {&geode_cost, 0, 0, 0, 0, 0},
2622 {&k6_cost, 32, 7, 32, 7, 32},
2623 {&athlon_cost, 16, 7, 16, 7, 16},
2624 {&pentium4_cost, 0, 0, 0, 0, 0},
2625 {&k8_cost, 16, 7, 16, 7, 16},
2626 {&nocona_cost, 0, 0, 0, 0, 0},
2627 /* Core 2 32-bit. */
2628 {&generic32_cost, 16, 10, 16, 10, 16},
2629 /* Core 2 64-bit. */
2630 {&generic64_cost, 16, 10, 16, 10, 16},
2631 /* Core i7 32-bit. */
2632 {&generic32_cost, 16, 10, 16, 10, 16},
2633 /* Core i7 64-bit. */
2634 {&generic64_cost, 16, 10, 16, 10, 16},
2635 {&generic32_cost, 16, 7, 16, 7, 16},
2636 {&generic64_cost, 16, 10, 16, 10, 16},
2637 {&amdfam10_cost, 32, 24, 32, 7, 32},
2638 {&bdver1_cost, 32, 24, 32, 7, 32},
2639 {&btver1_cost, 32, 24, 32, 7, 32},
2640 {&atom_cost, 16, 7, 16, 7, 16}
2643 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2672 /* Return true if a red-zone is in use. */
2675 ix86_using_red_zone (void)
2677 return TARGET_RED_ZONE && !TARGET_64BIT_MS_ABI;
2680 /* Implement TARGET_HANDLE_OPTION. */
2683 ix86_handle_option (struct gcc_options *opts,
2684 struct gcc_options *opts_set ATTRIBUTE_UNUSED,
2685 const struct cl_decoded_option *decoded,
2686 location_t loc ATTRIBUTE_UNUSED)
2688 size_t code = decoded->opt_index;
2689 int value = decoded->value;
2696 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2697 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2701 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2702 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2709 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2710 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2714 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2715 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2725 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2726 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2730 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2731 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2738 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2739 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2743 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2744 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2751 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2752 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2756 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2757 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2764 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2765 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2769 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2770 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2777 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2778 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2782 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2783 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2790 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2791 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2795 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2796 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2803 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2804 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2808 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2809 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2816 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2817 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2821 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2822 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2827 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2828 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2832 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2833 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2839 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2840 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2844 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2845 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2852 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2853 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2857 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2858 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2865 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2866 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2870 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2871 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2878 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2879 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2883 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2884 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2891 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2892 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2896 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2897 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2904 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_BMI_SET;
2905 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_BMI_SET;
2909 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_BMI_UNSET;
2910 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_BMI_UNSET;
2917 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_TBM_SET;
2918 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_TBM_SET;
2922 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_TBM_UNSET;
2923 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_TBM_UNSET;
2930 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2931 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2935 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2936 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2943 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2944 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2948 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2949 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2956 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2957 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2961 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2962 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2969 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2970 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2974 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2975 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2982 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2983 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2987 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2988 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2995 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2996 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
3000 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
3001 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
3008 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
3009 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
3013 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
3014 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
3021 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_FSGSBASE_SET;
3022 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_FSGSBASE_SET;
3026 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_FSGSBASE_UNSET;
3027 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_FSGSBASE_UNSET;
3034 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_RDRND_SET;
3035 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_RDRND_SET;
3039 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_RDRND_UNSET;
3040 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_RDRND_UNSET;
3047 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_F16C_SET;
3048 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_F16C_SET;
3052 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_F16C_UNSET;
3053 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_F16C_UNSET;
3062 /* Return a string that documents the current -m options. The caller is
3063 responsible for freeing the string. */
3066 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
3067 const char *fpmath, bool add_nl_p)
3069 struct ix86_target_opts
3071 const char *option; /* option string */
3072 int mask; /* isa mask options */
3075 /* This table is ordered so that options like -msse4.2 that imply
3076 preceding options while match those first. */
3077 static struct ix86_target_opts isa_opts[] =
3079 { "-m64", OPTION_MASK_ISA_64BIT },
3080 { "-mfma4", OPTION_MASK_ISA_FMA4 },
3081 { "-mfma", OPTION_MASK_ISA_FMA },
3082 { "-mxop", OPTION_MASK_ISA_XOP },
3083 { "-mlwp", OPTION_MASK_ISA_LWP },
3084 { "-msse4a", OPTION_MASK_ISA_SSE4A },
3085 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
3086 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
3087 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
3088 { "-msse3", OPTION_MASK_ISA_SSE3 },
3089 { "-msse2", OPTION_MASK_ISA_SSE2 },
3090 { "-msse", OPTION_MASK_ISA_SSE },
3091 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
3092 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
3093 { "-mmmx", OPTION_MASK_ISA_MMX },
3094 { "-mabm", OPTION_MASK_ISA_ABM },
3095 { "-mbmi", OPTION_MASK_ISA_BMI },
3096 { "-mtbm", OPTION_MASK_ISA_TBM },
3097 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
3098 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
3099 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
3100 { "-maes", OPTION_MASK_ISA_AES },
3101 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
3102 { "-mfsgsbase", OPTION_MASK_ISA_FSGSBASE },
3103 { "-mrdrnd", OPTION_MASK_ISA_RDRND },
3104 { "-mf16c", OPTION_MASK_ISA_F16C },
3108 static struct ix86_target_opts flag_opts[] =
3110 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
3111 { "-m80387", MASK_80387 },
3112 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
3113 { "-malign-double", MASK_ALIGN_DOUBLE },
3114 { "-mcld", MASK_CLD },
3115 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
3116 { "-mieee-fp", MASK_IEEE_FP },
3117 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
3118 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
3119 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
3120 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
3121 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
3122 { "-mno-push-args", MASK_NO_PUSH_ARGS },
3123 { "-mno-red-zone", MASK_NO_RED_ZONE },
3124 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
3125 { "-mrecip", MASK_RECIP },
3126 { "-mrtd", MASK_RTD },
3127 { "-msseregparm", MASK_SSEREGPARM },
3128 { "-mstack-arg-probe", MASK_STACK_PROBE },
3129 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
3130 { "-mvect8-ret-in-mem", MASK_VECT8_RETURNS },
3131 { "-m8bit-idiv", MASK_USE_8BIT_IDIV },
3132 { "-mvzeroupper", MASK_VZEROUPPER },
3133 { "-mavx256-split-unaligned-load", MASK_AVX256_SPLIT_UNALIGNED_LOAD},
3134 { "-mavx256-split-unaligned-store", MASK_AVX256_SPLIT_UNALIGNED_STORE},
3137 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
3140 char target_other[40];
3149 memset (opts, '\0', sizeof (opts));
3151 /* Add -march= option. */
3154 opts[num][0] = "-march=";
3155 opts[num++][1] = arch;
3158 /* Add -mtune= option. */
3161 opts[num][0] = "-mtune=";
3162 opts[num++][1] = tune;
3165 /* Pick out the options in isa options. */
3166 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
3168 if ((isa & isa_opts[i].mask) != 0)
3170 opts[num++][0] = isa_opts[i].option;
3171 isa &= ~ isa_opts[i].mask;
3175 if (isa && add_nl_p)
3177 opts[num++][0] = isa_other;
3178 sprintf (isa_other, "(other isa: %#x)", isa);
3181 /* Add flag options. */
3182 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
3184 if ((flags & flag_opts[i].mask) != 0)
3186 opts[num++][0] = flag_opts[i].option;
3187 flags &= ~ flag_opts[i].mask;
3191 if (flags && add_nl_p)
3193 opts[num++][0] = target_other;
3194 sprintf (target_other, "(other flags: %#x)", flags);
3197 /* Add -fpmath= option. */
3200 opts[num][0] = "-mfpmath=";
3201 opts[num++][1] = fpmath;
3208 gcc_assert (num < ARRAY_SIZE (opts));
3210 /* Size the string. */
3212 sep_len = (add_nl_p) ? 3 : 1;
3213 for (i = 0; i < num; i++)
3216 for (j = 0; j < 2; j++)
3218 len += strlen (opts[i][j]);
3221 /* Build the string. */
3222 ret = ptr = (char *) xmalloc (len);
3225 for (i = 0; i < num; i++)
3229 for (j = 0; j < 2; j++)
3230 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
3237 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
3245 for (j = 0; j < 2; j++)
3248 memcpy (ptr, opts[i][j], len2[j]);
3250 line_len += len2[j];
3255 gcc_assert (ret + len >= ptr);
3260 /* Return TRUE if software prefetching is beneficial for the
3264 software_prefetching_beneficial_p (void)
3268 case PROCESSOR_GEODE:
3270 case PROCESSOR_ATHLON:
3272 case PROCESSOR_AMDFAM10:
3273 case PROCESSOR_BTVER1:
3281 /* Return true, if profiling code should be emitted before
3282 prologue. Otherwise it returns false.
3283 Note: For x86 with "hotfix" it is sorried. */
3285 ix86_profile_before_prologue (void)
3287 return flag_fentry != 0;
3290 /* Function that is callable from the debugger to print the current
3293 ix86_debug_options (void)
3295 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
3296 ix86_arch_string, ix86_tune_string,
3297 ix86_fpmath_string, true);
3301 fprintf (stderr, "%s\n\n", opts);
3305 fputs ("<no options>\n\n", stderr);
3310 /* Override various settings based on options. If MAIN_ARGS_P, the
3311 options are from the command line, otherwise they are from
3315 ix86_option_override_internal (bool main_args_p)
3318 unsigned int ix86_arch_mask, ix86_tune_mask;
3319 const bool ix86_tune_specified = (ix86_tune_string != NULL);
3324 /* Comes from final.c -- no real reason to change it. */
3325 #define MAX_CODE_ALIGN 16
3333 PTA_PREFETCH_SSE = 1 << 4,
3335 PTA_3DNOW_A = 1 << 6,
3339 PTA_POPCNT = 1 << 10,
3341 PTA_SSE4A = 1 << 12,
3342 PTA_NO_SAHF = 1 << 13,
3343 PTA_SSE4_1 = 1 << 14,
3344 PTA_SSE4_2 = 1 << 15,
3346 PTA_PCLMUL = 1 << 17,
3349 PTA_MOVBE = 1 << 20,
3353 PTA_FSGSBASE = 1 << 24,
3354 PTA_RDRND = 1 << 25,
3358 /* if this reaches 32, need to widen struct pta flags below */
3363 const char *const name; /* processor name or nickname. */
3364 const enum processor_type processor;
3365 const enum attr_cpu schedule;
3366 const unsigned /*enum pta_flags*/ flags;
3368 const processor_alias_table[] =
3370 {"i386", PROCESSOR_I386, CPU_NONE, 0},
3371 {"i486", PROCESSOR_I486, CPU_NONE, 0},
3372 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
3373 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
3374 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
3375 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
3376 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
3377 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
3378 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
3379 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
3380 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
3381 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
3382 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
3384 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
3386 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
3387 PTA_MMX | PTA_SSE | PTA_SSE2},
3388 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
3389 PTA_MMX |PTA_SSE | PTA_SSE2},
3390 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
3391 PTA_MMX | PTA_SSE | PTA_SSE2},
3392 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
3393 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
3394 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
3395 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3396 | PTA_CX16 | PTA_NO_SAHF},
3397 {"core2", PROCESSOR_CORE2_64, CPU_CORE2,
3398 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3399 | PTA_SSSE3 | PTA_CX16},
3400 {"corei7", PROCESSOR_COREI7_64, CPU_COREI7,
3401 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3402 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_CX16},
3403 {"corei7-avx", PROCESSOR_COREI7_64, CPU_COREI7,
3404 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3405 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AVX
3406 | PTA_CX16 | PTA_POPCNT | PTA_AES | PTA_PCLMUL},
3407 {"atom", PROCESSOR_ATOM, CPU_ATOM,
3408 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3409 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
3410 {"geode", PROCESSOR_GEODE, CPU_GEODE,
3411 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
3412 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
3413 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
3414 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
3415 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
3416 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
3417 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
3418 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
3419 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
3420 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
3421 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
3422 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
3423 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
3424 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
3425 {"x86-64", PROCESSOR_K8, CPU_K8,
3426 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
3427 {"k8", PROCESSOR_K8, CPU_K8,
3428 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3429 | PTA_SSE2 | PTA_NO_SAHF},
3430 {"k8-sse3", PROCESSOR_K8, CPU_K8,
3431 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3432 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3433 {"opteron", PROCESSOR_K8, CPU_K8,
3434 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3435 | PTA_SSE2 | PTA_NO_SAHF},
3436 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
3437 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3438 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3439 {"athlon64", PROCESSOR_K8, CPU_K8,
3440 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3441 | PTA_SSE2 | PTA_NO_SAHF},
3442 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
3443 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3444 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3445 {"athlon-fx", PROCESSOR_K8, CPU_K8,
3446 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3447 | PTA_SSE2 | PTA_NO_SAHF},
3448 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
3449 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3450 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
3451 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
3452 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3453 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
3454 {"bdver1", PROCESSOR_BDVER1, CPU_BDVER1,
3455 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3456 | PTA_SSE4A | PTA_CX16 | PTA_ABM | PTA_SSSE3 | PTA_SSE4_1
3457 | PTA_SSE4_2 | PTA_AES | PTA_PCLMUL | PTA_AVX | PTA_FMA4
3458 | PTA_XOP | PTA_LWP},
3459 {"btver1", PROCESSOR_BTVER1, CPU_GENERIC64,
3460 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3461 | PTA_SSSE3 | PTA_SSE4A |PTA_ABM | PTA_CX16},
3462 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
3463 0 /* flags are only used for -march switch. */ },
3464 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
3465 PTA_64BIT /* flags are only used for -march switch. */ },
3468 int const pta_size = ARRAY_SIZE (processor_alias_table);
3470 /* Set up prefix/suffix so the error messages refer to either the command
3471 line argument, or the attribute(target). */
3480 prefix = "option(\"";
3485 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3486 SUBTARGET_OVERRIDE_OPTIONS;
3489 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3490 SUBSUBTARGET_OVERRIDE_OPTIONS;
3493 /* -fPIC is the default for x86_64. */
3494 if (TARGET_MACHO && TARGET_64BIT)
3497 /* Need to check -mtune=generic first. */
3498 if (ix86_tune_string)
3500 if (!strcmp (ix86_tune_string, "generic")
3501 || !strcmp (ix86_tune_string, "i686")
3502 /* As special support for cross compilers we read -mtune=native
3503 as -mtune=generic. With native compilers we won't see the
3504 -mtune=native, as it was changed by the driver. */
3505 || !strcmp (ix86_tune_string, "native"))
3508 ix86_tune_string = "generic64";
3510 ix86_tune_string = "generic32";
3512 /* If this call is for setting the option attribute, allow the
3513 generic32/generic64 that was previously set. */
3514 else if (!main_args_p
3515 && (!strcmp (ix86_tune_string, "generic32")
3516 || !strcmp (ix86_tune_string, "generic64")))
3518 else if (!strncmp (ix86_tune_string, "generic", 7))
3519 error ("bad value (%s) for %stune=%s %s",
3520 ix86_tune_string, prefix, suffix, sw);
3521 else if (!strcmp (ix86_tune_string, "x86-64"))
3522 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated; use "
3523 "%stune=k8%s or %stune=generic%s instead as appropriate",
3524 prefix, suffix, prefix, suffix, prefix, suffix);
3528 if (ix86_arch_string)
3529 ix86_tune_string = ix86_arch_string;
3530 if (!ix86_tune_string)
3532 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
3533 ix86_tune_defaulted = 1;
3536 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
3537 need to use a sensible tune option. */
3538 if (!strcmp (ix86_tune_string, "generic")
3539 || !strcmp (ix86_tune_string, "x86-64")
3540 || !strcmp (ix86_tune_string, "i686"))
3543 ix86_tune_string = "generic64";
3545 ix86_tune_string = "generic32";
3549 if (ix86_stringop_string)
3551 if (!strcmp (ix86_stringop_string, "rep_byte"))
3552 stringop_alg = rep_prefix_1_byte;
3553 else if (!strcmp (ix86_stringop_string, "libcall"))
3554 stringop_alg = libcall;
3555 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
3556 stringop_alg = rep_prefix_4_byte;
3557 else if (!strcmp (ix86_stringop_string, "rep_8byte")
3559 /* rep; movq isn't available in 32-bit code. */
3560 stringop_alg = rep_prefix_8_byte;
3561 else if (!strcmp (ix86_stringop_string, "byte_loop"))
3562 stringop_alg = loop_1_byte;
3563 else if (!strcmp (ix86_stringop_string, "loop"))
3564 stringop_alg = loop;
3565 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
3566 stringop_alg = unrolled_loop;
3568 error ("bad value (%s) for %sstringop-strategy=%s %s",
3569 ix86_stringop_string, prefix, suffix, sw);
3572 if (!ix86_arch_string)
3573 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
3575 ix86_arch_specified = 1;
3577 /* Validate -mabi= value. */
3578 if (ix86_abi_string)
3580 if (strcmp (ix86_abi_string, "sysv") == 0)
3581 ix86_abi = SYSV_ABI;
3582 else if (strcmp (ix86_abi_string, "ms") == 0)
3585 error ("unknown ABI (%s) for %sabi=%s %s",
3586 ix86_abi_string, prefix, suffix, sw);
3589 ix86_abi = DEFAULT_ABI;
3591 if (ix86_cmodel_string != 0)
3593 if (!strcmp (ix86_cmodel_string, "small"))
3594 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3595 else if (!strcmp (ix86_cmodel_string, "medium"))
3596 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
3597 else if (!strcmp (ix86_cmodel_string, "large"))
3598 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
3600 error ("code model %s does not support PIC mode", ix86_cmodel_string);
3601 else if (!strcmp (ix86_cmodel_string, "32"))
3602 ix86_cmodel = CM_32;
3603 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
3604 ix86_cmodel = CM_KERNEL;
3606 error ("bad value (%s) for %scmodel=%s %s",
3607 ix86_cmodel_string, prefix, suffix, sw);
3611 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
3612 use of rip-relative addressing. This eliminates fixups that
3613 would otherwise be needed if this object is to be placed in a
3614 DLL, and is essentially just as efficient as direct addressing. */
3615 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
3616 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
3617 else if (TARGET_64BIT)
3618 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3620 ix86_cmodel = CM_32;
3622 if (ix86_asm_string != 0)
3625 && !strcmp (ix86_asm_string, "intel"))
3626 ix86_asm_dialect = ASM_INTEL;
3627 else if (!strcmp (ix86_asm_string, "att"))
3628 ix86_asm_dialect = ASM_ATT;
3630 error ("bad value (%s) for %sasm=%s %s",
3631 ix86_asm_string, prefix, suffix, sw);
3633 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
3634 error ("code model %qs not supported in the %s bit mode",
3635 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
3636 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
3637 sorry ("%i-bit mode not compiled in",
3638 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
3640 for (i = 0; i < pta_size; i++)
3641 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
3643 ix86_schedule = processor_alias_table[i].schedule;
3644 ix86_arch = processor_alias_table[i].processor;
3645 /* Default cpu tuning to the architecture. */
3646 ix86_tune = ix86_arch;
3648 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3649 error ("CPU you selected does not support x86-64 "
3652 if (processor_alias_table[i].flags & PTA_MMX
3653 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
3654 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
3655 if (processor_alias_table[i].flags & PTA_3DNOW
3656 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
3657 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
3658 if (processor_alias_table[i].flags & PTA_3DNOW_A
3659 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
3660 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
3661 if (processor_alias_table[i].flags & PTA_SSE
3662 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
3663 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
3664 if (processor_alias_table[i].flags & PTA_SSE2
3665 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
3666 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
3667 if (processor_alias_table[i].flags & PTA_SSE3
3668 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
3669 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
3670 if (processor_alias_table[i].flags & PTA_SSSE3
3671 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
3672 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
3673 if (processor_alias_table[i].flags & PTA_SSE4_1
3674 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
3675 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
3676 if (processor_alias_table[i].flags & PTA_SSE4_2
3677 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
3678 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
3679 if (processor_alias_table[i].flags & PTA_AVX
3680 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
3681 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
3682 if (processor_alias_table[i].flags & PTA_FMA
3683 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
3684 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
3685 if (processor_alias_table[i].flags & PTA_SSE4A
3686 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3687 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3688 if (processor_alias_table[i].flags & PTA_FMA4
3689 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3690 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3691 if (processor_alias_table[i].flags & PTA_XOP
3692 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3693 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3694 if (processor_alias_table[i].flags & PTA_LWP
3695 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3696 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3697 if (processor_alias_table[i].flags & PTA_ABM
3698 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3699 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3700 if (processor_alias_table[i].flags & PTA_BMI
3701 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_BMI))
3702 ix86_isa_flags |= OPTION_MASK_ISA_BMI;
3703 if (processor_alias_table[i].flags & PTA_TBM
3704 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_TBM))
3705 ix86_isa_flags |= OPTION_MASK_ISA_TBM;
3706 if (processor_alias_table[i].flags & PTA_CX16
3707 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3708 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3709 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3710 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3711 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3712 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3713 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3714 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3715 if (processor_alias_table[i].flags & PTA_MOVBE
3716 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3717 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3718 if (processor_alias_table[i].flags & PTA_AES
3719 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3720 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3721 if (processor_alias_table[i].flags & PTA_PCLMUL
3722 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3723 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3724 if (processor_alias_table[i].flags & PTA_FSGSBASE
3725 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FSGSBASE))
3726 ix86_isa_flags |= OPTION_MASK_ISA_FSGSBASE;
3727 if (processor_alias_table[i].flags & PTA_RDRND
3728 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_RDRND))
3729 ix86_isa_flags |= OPTION_MASK_ISA_RDRND;
3730 if (processor_alias_table[i].flags & PTA_F16C
3731 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_F16C))
3732 ix86_isa_flags |= OPTION_MASK_ISA_F16C;
3733 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3734 x86_prefetch_sse = true;
3739 if (!strcmp (ix86_arch_string, "generic"))
3740 error ("generic CPU can be used only for %stune=%s %s",
3741 prefix, suffix, sw);
3742 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3743 error ("bad value (%s) for %sarch=%s %s",
3744 ix86_arch_string, prefix, suffix, sw);
3746 ix86_arch_mask = 1u << ix86_arch;
3747 for (i = 0; i < X86_ARCH_LAST; ++i)
3748 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3750 for (i = 0; i < pta_size; i++)
3751 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3753 ix86_schedule = processor_alias_table[i].schedule;
3754 ix86_tune = processor_alias_table[i].processor;
3757 if (!(processor_alias_table[i].flags & PTA_64BIT))
3759 if (ix86_tune_defaulted)
3761 ix86_tune_string = "x86-64";
3762 for (i = 0; i < pta_size; i++)
3763 if (! strcmp (ix86_tune_string,
3764 processor_alias_table[i].name))
3766 ix86_schedule = processor_alias_table[i].schedule;
3767 ix86_tune = processor_alias_table[i].processor;
3770 error ("CPU you selected does not support x86-64 "
3776 /* Adjust tuning when compiling for 32-bit ABI. */
3779 case PROCESSOR_GENERIC64:
3780 ix86_tune = PROCESSOR_GENERIC32;
3781 ix86_schedule = CPU_PENTIUMPRO;
3784 case PROCESSOR_CORE2_64:
3785 ix86_tune = PROCESSOR_CORE2_32;
3788 case PROCESSOR_COREI7_64:
3789 ix86_tune = PROCESSOR_COREI7_32;
3796 /* Intel CPUs have always interpreted SSE prefetch instructions as
3797 NOPs; so, we can enable SSE prefetch instructions even when
3798 -mtune (rather than -march) points us to a processor that has them.
3799 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3800 higher processors. */
3802 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3803 x86_prefetch_sse = true;
3807 if (ix86_tune_specified && i == pta_size)
3808 error ("bad value (%s) for %stune=%s %s",
3809 ix86_tune_string, prefix, suffix, sw);
3811 ix86_tune_mask = 1u << ix86_tune;
3812 for (i = 0; i < X86_TUNE_LAST; ++i)
3813 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3815 #ifndef USE_IX86_FRAME_POINTER
3816 #define USE_IX86_FRAME_POINTER 0
3819 #ifndef USE_X86_64_FRAME_POINTER
3820 #define USE_X86_64_FRAME_POINTER 0
3823 /* Set the default values for switches whose default depends on TARGET_64BIT
3824 in case they weren't overwritten by command line options. */
3827 if (optimize > 1 && !global_options_set.x_flag_zee)
3829 if (optimize >= 1 && !global_options_set.x_flag_omit_frame_pointer)
3830 flag_omit_frame_pointer = !USE_X86_64_FRAME_POINTER;
3831 if (flag_asynchronous_unwind_tables == 2)
3832 flag_unwind_tables = flag_asynchronous_unwind_tables = 1;
3833 if (flag_pcc_struct_return == 2)
3834 flag_pcc_struct_return = 0;
3838 if (optimize >= 1 && !global_options_set.x_flag_omit_frame_pointer)
3839 flag_omit_frame_pointer = !(USE_IX86_FRAME_POINTER || optimize_size);
3840 if (flag_asynchronous_unwind_tables == 2)
3841 flag_asynchronous_unwind_tables = !USE_IX86_FRAME_POINTER;
3842 if (flag_pcc_struct_return == 2)
3843 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
3847 ix86_cost = &ix86_size_cost;
3849 ix86_cost = processor_target_table[ix86_tune].cost;
3851 /* Arrange to set up i386_stack_locals for all functions. */
3852 init_machine_status = ix86_init_machine_status;
3854 /* Validate -mregparm= value. */
3855 if (ix86_regparm_string)
3858 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3859 i = atoi (ix86_regparm_string);
3860 if (i < 0 || i > REGPARM_MAX)
3861 error ("%sregparm=%d%s is not between 0 and %d",
3862 prefix, i, suffix, REGPARM_MAX);
3867 ix86_regparm = REGPARM_MAX;
3869 /* If the user has provided any of the -malign-* options,
3870 warn and use that value only if -falign-* is not set.
3871 Remove this code in GCC 3.2 or later. */
3872 if (ix86_align_loops_string)
3874 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3875 prefix, suffix, suffix);
3876 if (align_loops == 0)
3878 i = atoi (ix86_align_loops_string);
3879 if (i < 0 || i > MAX_CODE_ALIGN)
3880 error ("%salign-loops=%d%s is not between 0 and %d",
3881 prefix, i, suffix, MAX_CODE_ALIGN);
3883 align_loops = 1 << i;
3887 if (ix86_align_jumps_string)
3889 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3890 prefix, suffix, suffix);
3891 if (align_jumps == 0)
3893 i = atoi (ix86_align_jumps_string);
3894 if (i < 0 || i > MAX_CODE_ALIGN)
3895 error ("%salign-loops=%d%s is not between 0 and %d",
3896 prefix, i, suffix, MAX_CODE_ALIGN);
3898 align_jumps = 1 << i;
3902 if (ix86_align_funcs_string)
3904 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3905 prefix, suffix, suffix);
3906 if (align_functions == 0)
3908 i = atoi (ix86_align_funcs_string);
3909 if (i < 0 || i > MAX_CODE_ALIGN)
3910 error ("%salign-loops=%d%s is not between 0 and %d",
3911 prefix, i, suffix, MAX_CODE_ALIGN);
3913 align_functions = 1 << i;
3917 /* Default align_* from the processor table. */
3918 if (align_loops == 0)
3920 align_loops = processor_target_table[ix86_tune].align_loop;
3921 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3923 if (align_jumps == 0)
3925 align_jumps = processor_target_table[ix86_tune].align_jump;
3926 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3928 if (align_functions == 0)
3930 align_functions = processor_target_table[ix86_tune].align_func;
3933 /* Validate -mbranch-cost= value, or provide default. */
3934 ix86_branch_cost = ix86_cost->branch_cost;
3935 if (ix86_branch_cost_string)
3937 i = atoi (ix86_branch_cost_string);
3939 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3941 ix86_branch_cost = i;
3943 if (ix86_section_threshold_string)
3945 i = atoi (ix86_section_threshold_string);
3947 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3949 ix86_section_threshold = i;
3952 if (ix86_tls_dialect_string)
3954 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3955 ix86_tls_dialect = TLS_DIALECT_GNU;
3956 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3957 ix86_tls_dialect = TLS_DIALECT_GNU2;
3959 error ("bad value (%s) for %stls-dialect=%s %s",
3960 ix86_tls_dialect_string, prefix, suffix, sw);
3963 if (ix87_precision_string)
3965 i = atoi (ix87_precision_string);
3966 if (i != 32 && i != 64 && i != 80)
3967 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3972 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3974 /* Enable by default the SSE and MMX builtins. Do allow the user to
3975 explicitly disable any of these. In particular, disabling SSE and
3976 MMX for kernel code is extremely useful. */
3977 if (!ix86_arch_specified)
3979 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3980 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3983 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3987 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3989 if (!ix86_arch_specified)
3991 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3993 /* i386 ABI does not specify red zone. It still makes sense to use it
3994 when programmer takes care to stack from being destroyed. */
3995 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3996 target_flags |= MASK_NO_RED_ZONE;
3999 /* Keep nonleaf frame pointers. */
4000 if (flag_omit_frame_pointer)
4001 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
4002 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
4003 flag_omit_frame_pointer = 1;
4005 /* If we're doing fast math, we don't care about comparison order
4006 wrt NaNs. This lets us use a shorter comparison sequence. */
4007 if (flag_finite_math_only)
4008 target_flags &= ~MASK_IEEE_FP;
4010 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
4011 since the insns won't need emulation. */
4012 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
4013 target_flags &= ~MASK_NO_FANCY_MATH_387;
4015 /* Likewise, if the target doesn't have a 387, or we've specified
4016 software floating point, don't use 387 inline intrinsics. */
4018 target_flags |= MASK_NO_FANCY_MATH_387;
4020 /* Turn on MMX builtins for -msse. */
4023 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
4024 x86_prefetch_sse = true;
4027 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
4028 if (TARGET_SSE4_2 || TARGET_ABM)
4029 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
4031 /* Validate -mpreferred-stack-boundary= value or default it to
4032 PREFERRED_STACK_BOUNDARY_DEFAULT. */
4033 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
4034 if (ix86_preferred_stack_boundary_string)
4036 int min = (TARGET_64BIT ? 4 : 2);
4037 int max = (TARGET_SEH ? 4 : 12);
4039 i = atoi (ix86_preferred_stack_boundary_string);
4040 if (i < min || i > max)
4043 error ("%spreferred-stack-boundary%s is not supported "
4044 "for this target", prefix, suffix);
4046 error ("%spreferred-stack-boundary=%d%s is not between %d and %d",
4047 prefix, i, suffix, min, max);
4050 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
4053 /* Set the default value for -mstackrealign. */
4054 if (ix86_force_align_arg_pointer == -1)
4055 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
4057 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
4059 /* Validate -mincoming-stack-boundary= value or default it to
4060 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
4061 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
4062 if (ix86_incoming_stack_boundary_string)
4064 i = atoi (ix86_incoming_stack_boundary_string);
4065 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
4066 error ("-mincoming-stack-boundary=%d is not between %d and 12",
4067 i, TARGET_64BIT ? 4 : 2);
4070 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
4071 ix86_incoming_stack_boundary
4072 = ix86_user_incoming_stack_boundary;
4076 /* Accept -msseregparm only if at least SSE support is enabled. */
4077 if (TARGET_SSEREGPARM
4079 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
4081 ix86_fpmath = TARGET_FPMATH_DEFAULT;
4082 if (ix86_fpmath_string != 0)
4084 if (! strcmp (ix86_fpmath_string, "387"))
4085 ix86_fpmath = FPMATH_387;
4086 else if (! strcmp (ix86_fpmath_string, "sse"))
4090 warning (0, "SSE instruction set disabled, using 387 arithmetics");
4091 ix86_fpmath = FPMATH_387;
4094 ix86_fpmath = FPMATH_SSE;
4096 else if (! strcmp (ix86_fpmath_string, "387,sse")
4097 || ! strcmp (ix86_fpmath_string, "387+sse")
4098 || ! strcmp (ix86_fpmath_string, "sse,387")
4099 || ! strcmp (ix86_fpmath_string, "sse+387")
4100 || ! strcmp (ix86_fpmath_string, "both"))
4104 warning (0, "SSE instruction set disabled, using 387 arithmetics");
4105 ix86_fpmath = FPMATH_387;
4107 else if (!TARGET_80387)
4109 warning (0, "387 instruction set disabled, using SSE arithmetics");
4110 ix86_fpmath = FPMATH_SSE;
4113 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
4116 error ("bad value (%s) for %sfpmath=%s %s",
4117 ix86_fpmath_string, prefix, suffix, sw);
4120 /* If the i387 is disabled, then do not return values in it. */
4122 target_flags &= ~MASK_FLOAT_RETURNS;
4124 /* Use external vectorized library in vectorizing intrinsics. */
4125 if (ix86_veclibabi_string)
4127 if (strcmp (ix86_veclibabi_string, "svml") == 0)
4128 ix86_veclib_handler = ix86_veclibabi_svml;
4129 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
4130 ix86_veclib_handler = ix86_veclibabi_acml;
4132 error ("unknown vectorization library ABI type (%s) for "
4133 "%sveclibabi=%s %s", ix86_veclibabi_string,
4134 prefix, suffix, sw);
4137 if ((!USE_IX86_FRAME_POINTER
4138 || (x86_accumulate_outgoing_args & ix86_tune_mask))
4139 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
4141 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
4143 /* ??? Unwind info is not correct around the CFG unless either a frame
4144 pointer is present or M_A_O_A is set. Fixing this requires rewriting
4145 unwind info generation to be aware of the CFG and propagating states
4147 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
4148 || flag_exceptions || flag_non_call_exceptions)
4149 && flag_omit_frame_pointer
4150 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
4152 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
4153 warning (0, "unwind tables currently require either a frame pointer "
4154 "or %saccumulate-outgoing-args%s for correctness",
4156 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
4159 /* If stack probes are required, the space used for large function
4160 arguments on the stack must also be probed, so enable
4161 -maccumulate-outgoing-args so this happens in the prologue. */
4162 if (TARGET_STACK_PROBE
4163 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
4165 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
4166 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
4167 "for correctness", prefix, suffix);
4168 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
4171 /* For sane SSE instruction set generation we need fcomi instruction.
4172 It is safe to enable all CMOVE instructions. */
4176 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
4179 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
4180 p = strchr (internal_label_prefix, 'X');
4181 internal_label_prefix_len = p - internal_label_prefix;
4185 /* When scheduling description is not available, disable scheduler pass
4186 so it won't slow down the compilation and make x87 code slower. */
4187 if (!TARGET_SCHEDULE)
4188 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
4190 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4191 ix86_cost->simultaneous_prefetches,
4192 global_options.x_param_values,
4193 global_options_set.x_param_values);
4194 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, ix86_cost->prefetch_block,
4195 global_options.x_param_values,
4196 global_options_set.x_param_values);
4197 maybe_set_param_value (PARAM_L1_CACHE_SIZE, ix86_cost->l1_cache_size,
4198 global_options.x_param_values,
4199 global_options_set.x_param_values);
4200 maybe_set_param_value (PARAM_L2_CACHE_SIZE, ix86_cost->l2_cache_size,
4201 global_options.x_param_values,
4202 global_options_set.x_param_values);
4204 /* Enable sw prefetching at -O3 for CPUS that prefetching is helpful. */
4205 if (flag_prefetch_loop_arrays < 0
4208 && software_prefetching_beneficial_p ())
4209 flag_prefetch_loop_arrays = 1;
4211 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
4212 can be optimized to ap = __builtin_next_arg (0). */
4213 if (!TARGET_64BIT && !flag_split_stack)
4214 targetm.expand_builtin_va_start = NULL;
4218 ix86_gen_leave = gen_leave_rex64;
4219 ix86_gen_add3 = gen_adddi3;
4220 ix86_gen_sub3 = gen_subdi3;
4221 ix86_gen_sub3_carry = gen_subdi3_carry;
4222 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
4223 ix86_gen_monitor = gen_sse3_monitor64;
4224 ix86_gen_andsp = gen_anddi3;
4225 ix86_gen_allocate_stack_worker = gen_allocate_stack_worker_probe_di;
4226 ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probedi;
4227 ix86_gen_probe_stack_range = gen_probe_stack_rangedi;
4231 ix86_gen_leave = gen_leave;
4232 ix86_gen_add3 = gen_addsi3;
4233 ix86_gen_sub3 = gen_subsi3;
4234 ix86_gen_sub3_carry = gen_subsi3_carry;
4235 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
4236 ix86_gen_monitor = gen_sse3_monitor;
4237 ix86_gen_andsp = gen_andsi3;
4238 ix86_gen_allocate_stack_worker = gen_allocate_stack_worker_probe_si;
4239 ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probesi;
4240 ix86_gen_probe_stack_range = gen_probe_stack_rangesi;
4244 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
4246 target_flags |= MASK_CLD & ~target_flags_explicit;
4249 if (!TARGET_64BIT && flag_pic)
4251 if (flag_fentry > 0)
4252 sorry ("-mfentry isn%'t supported for 32-bit in combination "
4256 else if (TARGET_SEH)
4258 if (flag_fentry == 0)
4259 sorry ("-mno-fentry isn%'t compatible with SEH");
4262 else if (flag_fentry < 0)
4264 #if defined(PROFILE_BEFORE_PROLOGUE)
4271 /* Save the initial options in case the user does function specific options */
4273 target_option_default_node = target_option_current_node
4274 = build_target_option_node ();
4278 /* When not optimize for size, enable vzeroupper optimization for
4279 TARGET_AVX with -fexpensive-optimizations and split 32-byte
4280 AVX unaligned load/store. */
4283 if (flag_expensive_optimizations
4284 && !(target_flags_explicit & MASK_VZEROUPPER))
4285 target_flags |= MASK_VZEROUPPER;
4286 if (!(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_LOAD))
4287 target_flags |= MASK_AVX256_SPLIT_UNALIGNED_LOAD;
4288 if (!(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_STORE))
4289 target_flags |= MASK_AVX256_SPLIT_UNALIGNED_STORE;
4294 /* Disable vzeroupper pass if TARGET_AVX is disabled. */
4295 target_flags &= ~MASK_VZEROUPPER;
4299 /* Return TRUE if VAL is passed in register with 256bit AVX modes. */
4302 function_pass_avx256_p (const_rtx val)
4307 if (REG_P (val) && VALID_AVX256_REG_MODE (GET_MODE (val)))
4310 if (GET_CODE (val) == PARALLEL)
4315 for (i = XVECLEN (val, 0) - 1; i >= 0; i--)
4317 r = XVECEXP (val, 0, i);
4318 if (GET_CODE (r) == EXPR_LIST
4320 && REG_P (XEXP (r, 0))
4321 && (GET_MODE (XEXP (r, 0)) == OImode
4322 || VALID_AVX256_REG_MODE (GET_MODE (XEXP (r, 0)))))
4330 /* Implement the TARGET_OPTION_OVERRIDE hook. */
4333 ix86_option_override (void)
4335 ix86_option_override_internal (true);
4338 /* Update register usage after having seen the compiler flags. */
4341 ix86_conditional_register_usage (void)
4346 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4348 if (fixed_regs[i] > 1)
4349 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
4350 if (call_used_regs[i] > 1)
4351 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
4354 /* The PIC register, if it exists, is fixed. */
4355 j = PIC_OFFSET_TABLE_REGNUM;
4356 if (j != INVALID_REGNUM)
4357 fixed_regs[j] = call_used_regs[j] = 1;
4359 /* The 64-bit MS_ABI changes the set of call-used registers. */
4360 if (TARGET_64BIT_MS_ABI)
4362 call_used_regs[SI_REG] = 0;
4363 call_used_regs[DI_REG] = 0;
4364 call_used_regs[XMM6_REG] = 0;
4365 call_used_regs[XMM7_REG] = 0;
4366 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
4367 call_used_regs[i] = 0;
4370 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
4371 other call-clobbered regs for 64-bit. */
4374 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
4376 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4377 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
4378 && call_used_regs[i])
4379 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
4382 /* If MMX is disabled, squash the registers. */
4384 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4385 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
4386 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
4388 /* If SSE is disabled, squash the registers. */
4390 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4391 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
4392 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
4394 /* If the FPU is disabled, squash the registers. */
4395 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
4396 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4397 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
4398 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
4400 /* If 32-bit, squash the 64-bit registers. */
4403 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
4405 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
4411 /* Save the current options */
4414 ix86_function_specific_save (struct cl_target_option *ptr)
4416 ptr->arch = ix86_arch;
4417 ptr->schedule = ix86_schedule;
4418 ptr->tune = ix86_tune;
4419 ptr->fpmath = ix86_fpmath;
4420 ptr->branch_cost = ix86_branch_cost;
4421 ptr->tune_defaulted = ix86_tune_defaulted;
4422 ptr->arch_specified = ix86_arch_specified;
4423 ptr->x_ix86_isa_flags_explicit = ix86_isa_flags_explicit;
4424 ptr->ix86_target_flags_explicit = target_flags_explicit;
4426 /* The fields are char but the variables are not; make sure the
4427 values fit in the fields. */
4428 gcc_assert (ptr->arch == ix86_arch);
4429 gcc_assert (ptr->schedule == ix86_schedule);
4430 gcc_assert (ptr->tune == ix86_tune);
4431 gcc_assert (ptr->fpmath == ix86_fpmath);
4432 gcc_assert (ptr->branch_cost == ix86_branch_cost);
4435 /* Restore the current options */
4438 ix86_function_specific_restore (struct cl_target_option *ptr)
4440 enum processor_type old_tune = ix86_tune;
4441 enum processor_type old_arch = ix86_arch;
4442 unsigned int ix86_arch_mask, ix86_tune_mask;
4445 ix86_arch = (enum processor_type) ptr->arch;
4446 ix86_schedule = (enum attr_cpu) ptr->schedule;
4447 ix86_tune = (enum processor_type) ptr->tune;
4448 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
4449 ix86_branch_cost = ptr->branch_cost;
4450 ix86_tune_defaulted = ptr->tune_defaulted;
4451 ix86_arch_specified = ptr->arch_specified;
4452 ix86_isa_flags_explicit = ptr->x_ix86_isa_flags_explicit;
4453 target_flags_explicit = ptr->ix86_target_flags_explicit;
4455 /* Recreate the arch feature tests if the arch changed */
4456 if (old_arch != ix86_arch)
4458 ix86_arch_mask = 1u << ix86_arch;
4459 for (i = 0; i < X86_ARCH_LAST; ++i)
4460 ix86_arch_features[i]
4461 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
4464 /* Recreate the tune optimization tests */
4465 if (old_tune != ix86_tune)
4467 ix86_tune_mask = 1u << ix86_tune;
4468 for (i = 0; i < X86_TUNE_LAST; ++i)
4469 ix86_tune_features[i]
4470 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
4474 /* Print the current options */
4477 ix86_function_specific_print (FILE *file, int indent,
4478 struct cl_target_option *ptr)
4481 = ix86_target_string (ptr->x_ix86_isa_flags, ptr->x_target_flags,
4482 NULL, NULL, NULL, false);
4484 fprintf (file, "%*sarch = %d (%s)\n",
4487 ((ptr->arch < TARGET_CPU_DEFAULT_max)
4488 ? cpu_names[ptr->arch]
4491 fprintf (file, "%*stune = %d (%s)\n",
4494 ((ptr->tune < TARGET_CPU_DEFAULT_max)
4495 ? cpu_names[ptr->tune]
4498 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
4499 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
4500 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
4501 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
4505 fprintf (file, "%*s%s\n", indent, "", target_string);
4506 free (target_string);
4511 /* Inner function to process the attribute((target(...))), take an argument and
4512 set the current options from the argument. If we have a list, recursively go
4516 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
4521 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
4522 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
4523 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
4524 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
4539 enum ix86_opt_type type;
4544 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
4545 IX86_ATTR_ISA ("abm", OPT_mabm),
4546 IX86_ATTR_ISA ("bmi", OPT_mbmi),
4547 IX86_ATTR_ISA ("tbm", OPT_mtbm),
4548 IX86_ATTR_ISA ("aes", OPT_maes),
4549 IX86_ATTR_ISA ("avx", OPT_mavx),
4550 IX86_ATTR_ISA ("mmx", OPT_mmmx),
4551 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
4552 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
4553 IX86_ATTR_ISA ("sse", OPT_msse),
4554 IX86_ATTR_ISA ("sse2", OPT_msse2),
4555 IX86_ATTR_ISA ("sse3", OPT_msse3),
4556 IX86_ATTR_ISA ("sse4", OPT_msse4),
4557 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
4558 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
4559 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
4560 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
4561 IX86_ATTR_ISA ("fma4", OPT_mfma4),
4562 IX86_ATTR_ISA ("xop", OPT_mxop),
4563 IX86_ATTR_ISA ("lwp", OPT_mlwp),
4564 IX86_ATTR_ISA ("fsgsbase", OPT_mfsgsbase),
4565 IX86_ATTR_ISA ("rdrnd", OPT_mrdrnd),
4566 IX86_ATTR_ISA ("f16c", OPT_mf16c),
4568 /* string options */
4569 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
4570 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
4571 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
4574 IX86_ATTR_YES ("cld",
4578 IX86_ATTR_NO ("fancy-math-387",
4579 OPT_mfancy_math_387,
4580 MASK_NO_FANCY_MATH_387),
4582 IX86_ATTR_YES ("ieee-fp",
4586 IX86_ATTR_YES ("inline-all-stringops",
4587 OPT_minline_all_stringops,
4588 MASK_INLINE_ALL_STRINGOPS),
4590 IX86_ATTR_YES ("inline-stringops-dynamically",
4591 OPT_minline_stringops_dynamically,
4592 MASK_INLINE_STRINGOPS_DYNAMICALLY),
4594 IX86_ATTR_NO ("align-stringops",
4595 OPT_mno_align_stringops,
4596 MASK_NO_ALIGN_STRINGOPS),
4598 IX86_ATTR_YES ("recip",
4604 /* If this is a list, recurse to get the options. */
4605 if (TREE_CODE (args) == TREE_LIST)
4609 for (; args; args = TREE_CHAIN (args))
4610 if (TREE_VALUE (args)
4611 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
4617 else if (TREE_CODE (args) != STRING_CST)
4620 /* Handle multiple arguments separated by commas. */
4621 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
4623 while (next_optstr && *next_optstr != '\0')
4625 char *p = next_optstr;
4627 char *comma = strchr (next_optstr, ',');
4628 const char *opt_string;
4629 size_t len, opt_len;
4634 enum ix86_opt_type type = ix86_opt_unknown;
4640 len = comma - next_optstr;
4641 next_optstr = comma + 1;
4649 /* Recognize no-xxx. */
4650 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
4659 /* Find the option. */
4662 for (i = 0; i < ARRAY_SIZE (attrs); i++)
4664 type = attrs[i].type;
4665 opt_len = attrs[i].len;
4666 if (ch == attrs[i].string[0]
4667 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
4668 && memcmp (p, attrs[i].string, opt_len) == 0)
4671 mask = attrs[i].mask;
4672 opt_string = attrs[i].string;
4677 /* Process the option. */
4680 error ("attribute(target(\"%s\")) is unknown", orig_p);
4684 else if (type == ix86_opt_isa)
4686 struct cl_decoded_option decoded;
4688 generate_option (opt, NULL, opt_set_p, CL_TARGET, &decoded);
4689 ix86_handle_option (&global_options, &global_options_set,
4690 &decoded, input_location);
4693 else if (type == ix86_opt_yes || type == ix86_opt_no)
4695 if (type == ix86_opt_no)
4696 opt_set_p = !opt_set_p;
4699 target_flags |= mask;
4701 target_flags &= ~mask;
4704 else if (type == ix86_opt_str)
4708 error ("option(\"%s\") was already specified", opt_string);
4712 p_strings[opt] = xstrdup (p + opt_len);
4722 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
4725 ix86_valid_target_attribute_tree (tree args)
4727 const char *orig_arch_string = ix86_arch_string;
4728 const char *orig_tune_string = ix86_tune_string;
4729 const char *orig_fpmath_string = ix86_fpmath_string;
4730 int orig_tune_defaulted = ix86_tune_defaulted;
4731 int orig_arch_specified = ix86_arch_specified;
4732 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
4735 struct cl_target_option *def
4736 = TREE_TARGET_OPTION (target_option_default_node);
4738 /* Process each of the options on the chain. */
4739 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
4742 /* If the changed options are different from the default, rerun
4743 ix86_option_override_internal, and then save the options away.
4744 The string options are are attribute options, and will be undone
4745 when we copy the save structure. */
4746 if (ix86_isa_flags != def->x_ix86_isa_flags
4747 || target_flags != def->x_target_flags
4748 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
4749 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
4750 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
4752 /* If we are using the default tune= or arch=, undo the string assigned,
4753 and use the default. */
4754 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
4755 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
4756 else if (!orig_arch_specified)
4757 ix86_arch_string = NULL;
4759 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
4760 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
4761 else if (orig_tune_defaulted)
4762 ix86_tune_string = NULL;
4764 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
4765 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
4766 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
4767 else if (!TARGET_64BIT && TARGET_SSE)
4768 ix86_fpmath_string = "sse,387";
4770 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
4771 ix86_option_override_internal (false);
4773 /* Add any builtin functions with the new isa if any. */
4774 ix86_add_new_builtins (ix86_isa_flags);
4776 /* Save the current options unless we are validating options for
4778 t = build_target_option_node ();
4780 ix86_arch_string = orig_arch_string;
4781 ix86_tune_string = orig_tune_string;
4782 ix86_fpmath_string = orig_fpmath_string;
4784 /* Free up memory allocated to hold the strings */
4785 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
4786 if (option_strings[i])
4787 free (option_strings[i]);
4793 /* Hook to validate attribute((target("string"))). */
4796 ix86_valid_target_attribute_p (tree fndecl,
4797 tree ARG_UNUSED (name),
4799 int ARG_UNUSED (flags))
4801 struct cl_target_option cur_target;
4803 tree old_optimize = build_optimization_node ();
4804 tree new_target, new_optimize;
4805 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
4807 /* If the function changed the optimization levels as well as setting target
4808 options, start with the optimizations specified. */
4809 if (func_optimize && func_optimize != old_optimize)
4810 cl_optimization_restore (&global_options,
4811 TREE_OPTIMIZATION (func_optimize));
4813 /* The target attributes may also change some optimization flags, so update
4814 the optimization options if necessary. */
4815 cl_target_option_save (&cur_target, &global_options);
4816 new_target = ix86_valid_target_attribute_tree (args);
4817 new_optimize = build_optimization_node ();
4824 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
4826 if (old_optimize != new_optimize)
4827 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
4830 cl_target_option_restore (&global_options, &cur_target);
4832 if (old_optimize != new_optimize)
4833 cl_optimization_restore (&global_options,
4834 TREE_OPTIMIZATION (old_optimize));
4840 /* Hook to determine if one function can safely inline another. */
4843 ix86_can_inline_p (tree caller, tree callee)
4846 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
4847 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
4849 /* If callee has no option attributes, then it is ok to inline. */
4853 /* If caller has no option attributes, but callee does then it is not ok to
4855 else if (!caller_tree)
4860 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
4861 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
4863 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
4864 can inline a SSE2 function but a SSE2 function can't inline a SSE4
4866 if ((caller_opts->x_ix86_isa_flags & callee_opts->x_ix86_isa_flags)
4867 != callee_opts->x_ix86_isa_flags)
4870 /* See if we have the same non-isa options. */
4871 else if (caller_opts->x_target_flags != callee_opts->x_target_flags)
4874 /* See if arch, tune, etc. are the same. */
4875 else if (caller_opts->arch != callee_opts->arch)
4878 else if (caller_opts->tune != callee_opts->tune)
4881 else if (caller_opts->fpmath != callee_opts->fpmath)
4884 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4895 /* Remember the last target of ix86_set_current_function. */
4896 static GTY(()) tree ix86_previous_fndecl;
4898 /* Establish appropriate back-end context for processing the function
4899 FNDECL. The argument might be NULL to indicate processing at top
4900 level, outside of any function scope. */
4902 ix86_set_current_function (tree fndecl)
4904 /* Only change the context if the function changes. This hook is called
4905 several times in the course of compiling a function, and we don't want to
4906 slow things down too much or call target_reinit when it isn't safe. */
4907 if (fndecl && fndecl != ix86_previous_fndecl)
4909 tree old_tree = (ix86_previous_fndecl
4910 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4913 tree new_tree = (fndecl
4914 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4917 ix86_previous_fndecl = fndecl;
4918 if (old_tree == new_tree)
4923 cl_target_option_restore (&global_options,
4924 TREE_TARGET_OPTION (new_tree));
4930 struct cl_target_option *def
4931 = TREE_TARGET_OPTION (target_option_current_node);
4933 cl_target_option_restore (&global_options, def);
4940 /* Return true if this goes in large data/bss. */
4943 ix86_in_large_data_p (tree exp)
4945 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4948 /* Functions are never large data. */
4949 if (TREE_CODE (exp) == FUNCTION_DECL)
4952 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4954 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4955 if (strcmp (section, ".ldata") == 0
4956 || strcmp (section, ".lbss") == 0)
4962 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4964 /* If this is an incomplete type with size 0, then we can't put it
4965 in data because it might be too big when completed. */
4966 if (!size || size > ix86_section_threshold)
4973 /* Switch to the appropriate section for output of DECL.
4974 DECL is either a `VAR_DECL' node or a constant of some sort.
4975 RELOC indicates whether forming the initial value of DECL requires
4976 link-time relocations. */
4978 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4982 x86_64_elf_select_section (tree decl, int reloc,
4983 unsigned HOST_WIDE_INT align)
4985 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4986 && ix86_in_large_data_p (decl))
4988 const char *sname = NULL;
4989 unsigned int flags = SECTION_WRITE;
4990 switch (categorize_decl_for_section (decl, reloc))
4995 case SECCAT_DATA_REL:
4996 sname = ".ldata.rel";
4998 case SECCAT_DATA_REL_LOCAL:
4999 sname = ".ldata.rel.local";
5001 case SECCAT_DATA_REL_RO:
5002 sname = ".ldata.rel.ro";
5004 case SECCAT_DATA_REL_RO_LOCAL:
5005 sname = ".ldata.rel.ro.local";
5009 flags |= SECTION_BSS;
5012 case SECCAT_RODATA_MERGE_STR:
5013 case SECCAT_RODATA_MERGE_STR_INIT:
5014 case SECCAT_RODATA_MERGE_CONST:
5018 case SECCAT_SRODATA:
5025 /* We don't split these for medium model. Place them into
5026 default sections and hope for best. */
5031 /* We might get called with string constants, but get_named_section
5032 doesn't like them as they are not DECLs. Also, we need to set
5033 flags in that case. */
5035 return get_section (sname, flags, NULL);
5036 return get_named_section (decl, sname, reloc);
5039 return default_elf_select_section (decl, reloc, align);
5042 /* Build up a unique section name, expressed as a
5043 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
5044 RELOC indicates whether the initial value of EXP requires
5045 link-time relocations. */
5047 static void ATTRIBUTE_UNUSED
5048 x86_64_elf_unique_section (tree decl, int reloc)
5050 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
5051 && ix86_in_large_data_p (decl))
5053 const char *prefix = NULL;
5054 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
5055 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
5057 switch (categorize_decl_for_section (decl, reloc))
5060 case SECCAT_DATA_REL:
5061 case SECCAT_DATA_REL_LOCAL:
5062 case SECCAT_DATA_REL_RO:
5063 case SECCAT_DATA_REL_RO_LOCAL:
5064 prefix = one_only ? ".ld" : ".ldata";
5067 prefix = one_only ? ".lb" : ".lbss";
5070 case SECCAT_RODATA_MERGE_STR:
5071 case SECCAT_RODATA_MERGE_STR_INIT:
5072 case SECCAT_RODATA_MERGE_CONST:
5073 prefix = one_only ? ".lr" : ".lrodata";
5075 case SECCAT_SRODATA:
5082 /* We don't split these for medium model. Place them into
5083 default sections and hope for best. */
5088 const char *name, *linkonce;
5091 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
5092 name = targetm.strip_name_encoding (name);
5094 /* If we're using one_only, then there needs to be a .gnu.linkonce
5095 prefix to the section name. */
5096 linkonce = one_only ? ".gnu.linkonce" : "";
5098 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
5100 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
5104 default_unique_section (decl, reloc);
5107 #ifdef COMMON_ASM_OP
5108 /* This says how to output assembler code to declare an
5109 uninitialized external linkage data object.
5111 For medium model x86-64 we need to use .largecomm opcode for
5114 x86_elf_aligned_common (FILE *file,
5115 const char *name, unsigned HOST_WIDE_INT size,
5118 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
5119 && size > (unsigned int)ix86_section_threshold)
5120 fputs (".largecomm\t", file);
5122 fputs (COMMON_ASM_OP, file);
5123 assemble_name (file, name);
5124 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
5125 size, align / BITS_PER_UNIT);
5129 /* Utility function for targets to use in implementing
5130 ASM_OUTPUT_ALIGNED_BSS. */
5133 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
5134 const char *name, unsigned HOST_WIDE_INT size,
5137 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
5138 && size > (unsigned int)ix86_section_threshold)
5139 switch_to_section (get_named_section (decl, ".lbss", 0));
5141 switch_to_section (bss_section);
5142 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
5143 #ifdef ASM_DECLARE_OBJECT_NAME
5144 last_assemble_variable_decl = decl;
5145 ASM_DECLARE_OBJECT_NAME (file, name, decl);
5147 /* Standard thing is just output label for the object. */
5148 ASM_OUTPUT_LABEL (file, name);
5149 #endif /* ASM_DECLARE_OBJECT_NAME */
5150 ASM_OUTPUT_SKIP (file, size ? size : 1);
5153 static const struct default_options ix86_option_optimization_table[] =
5155 /* Turn off -fschedule-insns by default. It tends to make the
5156 problem with not enough registers even worse. */
5157 #ifdef INSN_SCHEDULING
5158 { OPT_LEVELS_ALL, OPT_fschedule_insns, NULL, 0 },
5161 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
5162 SUBTARGET_OPTIMIZATION_OPTIONS,
5164 { OPT_LEVELS_NONE, 0, NULL, 0 }
5167 /* Implement TARGET_OPTION_INIT_STRUCT. */
5170 ix86_option_init_struct (struct gcc_options *opts)
5173 /* The Darwin libraries never set errno, so we might as well
5174 avoid calling them when that's the only reason we would. */
5175 opts->x_flag_errno_math = 0;
5177 opts->x_flag_pcc_struct_return = 2;
5178 opts->x_flag_asynchronous_unwind_tables = 2;
5179 opts->x_flag_vect_cost_model = 1;
5182 /* Decide whether we must probe the stack before any space allocation
5183 on this target. It's essentially TARGET_STACK_PROBE except when
5184 -fstack-check causes the stack to be already probed differently. */
5187 ix86_target_stack_probe (void)
5189 /* Do not probe the stack twice if static stack checking is enabled. */
5190 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
5193 return TARGET_STACK_PROBE;
5196 /* Decide whether we can make a sibling call to a function. DECL is the
5197 declaration of the function being targeted by the call and EXP is the
5198 CALL_EXPR representing the call. */
5201 ix86_function_ok_for_sibcall (tree decl, tree exp)
5203 tree type, decl_or_type;
5206 /* If we are generating position-independent code, we cannot sibcall
5207 optimize any indirect call, or a direct call to a global function,
5208 as the PLT requires %ebx be live. (Darwin does not have a PLT.) */
5212 && (!decl || !targetm.binds_local_p (decl)))
5215 /* If we need to align the outgoing stack, then sibcalling would
5216 unalign the stack, which may break the called function. */
5217 if (ix86_minimum_incoming_stack_boundary (true)
5218 < PREFERRED_STACK_BOUNDARY)
5223 decl_or_type = decl;
5224 type = TREE_TYPE (decl);
5228 /* We're looking at the CALL_EXPR, we need the type of the function. */
5229 type = CALL_EXPR_FN (exp); /* pointer expression */
5230 type = TREE_TYPE (type); /* pointer type */
5231 type = TREE_TYPE (type); /* function type */
5232 decl_or_type = type;
5235 /* Check that the return value locations are the same. Like
5236 if we are returning floats on the 80387 register stack, we cannot
5237 make a sibcall from a function that doesn't return a float to a
5238 function that does or, conversely, from a function that does return
5239 a float to a function that doesn't; the necessary stack adjustment
5240 would not be executed. This is also the place we notice
5241 differences in the return value ABI. Note that it is ok for one
5242 of the functions to have void return type as long as the return
5243 value of the other is passed in a register. */
5244 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
5245 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
5247 if (STACK_REG_P (a) || STACK_REG_P (b))
5249 if (!rtx_equal_p (a, b))
5252 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
5254 /* Disable sibcall if we need to generate vzeroupper after
5256 if (TARGET_VZEROUPPER
5257 && cfun->machine->callee_return_avx256_p
5258 && !cfun->machine->caller_return_avx256_p)
5261 else if (!rtx_equal_p (a, b))
5266 /* The SYSV ABI has more call-clobbered registers;
5267 disallow sibcalls from MS to SYSV. */
5268 if (cfun->machine->call_abi == MS_ABI
5269 && ix86_function_type_abi (type) == SYSV_ABI)
5274 /* If this call is indirect, we'll need to be able to use a
5275 call-clobbered register for the address of the target function.
5276 Make sure that all such registers are not used for passing
5277 parameters. Note that DLLIMPORT functions are indirect. */
5279 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
5281 if (ix86_function_regparm (type, NULL) >= 3)
5283 /* ??? Need to count the actual number of registers to be used,
5284 not the possible number of registers. Fix later. */
5290 /* Otherwise okay. That also includes certain types of indirect calls. */
5294 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
5295 and "sseregparm" calling convention attributes;
5296 arguments as in struct attribute_spec.handler. */
5299 ix86_handle_cconv_attribute (tree *node, tree name,
5301 int flags ATTRIBUTE_UNUSED,
5304 if (TREE_CODE (*node) != FUNCTION_TYPE
5305 && TREE_CODE (*node) != METHOD_TYPE
5306 && TREE_CODE (*node) != FIELD_DECL
5307 && TREE_CODE (*node) != TYPE_DECL)
5309 warning (OPT_Wattributes, "%qE attribute only applies to functions",
5311 *no_add_attrs = true;
5315 /* Can combine regparm with all attributes but fastcall. */
5316 if (is_attribute_p ("regparm", name))
5320 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5322 error ("fastcall and regparm attributes are not compatible");
5325 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5327 error ("regparam and thiscall attributes are not compatible");
5330 cst = TREE_VALUE (args);
5331 if (TREE_CODE (cst) != INTEGER_CST)
5333 warning (OPT_Wattributes,
5334 "%qE attribute requires an integer constant argument",
5336 *no_add_attrs = true;
5338 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
5340 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
5342 *no_add_attrs = true;
5350 /* Do not warn when emulating the MS ABI. */
5351 if ((TREE_CODE (*node) != FUNCTION_TYPE
5352 && TREE_CODE (*node) != METHOD_TYPE)
5353 || ix86_function_type_abi (*node) != MS_ABI)
5354 warning (OPT_Wattributes, "%qE attribute ignored",
5356 *no_add_attrs = true;
5360 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
5361 if (is_attribute_p ("fastcall", name))
5363 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
5365 error ("fastcall and cdecl attributes are not compatible");
5367 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
5369 error ("fastcall and stdcall attributes are not compatible");
5371 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
5373 error ("fastcall and regparm attributes are not compatible");
5375 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5377 error ("fastcall and thiscall attributes are not compatible");
5381 /* Can combine stdcall with fastcall (redundant), regparm and
5383 else if (is_attribute_p ("stdcall", name))
5385 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
5387 error ("stdcall and cdecl attributes are not compatible");
5389 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5391 error ("stdcall and fastcall attributes are not compatible");
5393 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5395 error ("stdcall and thiscall attributes are not compatible");
5399 /* Can combine cdecl with regparm and sseregparm. */
5400 else if (is_attribute_p ("cdecl", name))
5402 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
5404 error ("stdcall and cdecl attributes are not compatible");
5406 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5408 error ("fastcall and cdecl attributes are not compatible");
5410 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5412 error ("cdecl and thiscall attributes are not compatible");
5415 else if (is_attribute_p ("thiscall", name))
5417 if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
5418 warning (OPT_Wattributes, "%qE attribute is used for none class-method",
5420 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
5422 error ("stdcall and thiscall attributes are not compatible");
5424 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5426 error ("fastcall and thiscall attributes are not compatible");
5428 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
5430 error ("cdecl and thiscall attributes are not compatible");
5434 /* Can combine sseregparm with all attributes. */
5439 /* This function checks if the method-function has default __thiscall
5440 calling-convention for 32-bit msabi.
5441 It returns true if TYPE is of kind METHOD_TYPE, no stdarg function,
5442 and the MS_ABI 32-bit is used. Otherwise it returns false. */
5445 ix86_is_msabi_thiscall (const_tree type)
5447 if (TARGET_64BIT || ix86_function_type_abi (type) != MS_ABI
5448 || TREE_CODE (type) != METHOD_TYPE || stdarg_p (type))
5450 /* Check for different calling-conventions. */
5451 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (type))
5452 || lookup_attribute ("stdcall", TYPE_ATTRIBUTES (type))
5453 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type))
5454 || lookup_attribute ("regparm", TYPE_ATTRIBUTES (type))
5455 || lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type)))
5460 /* This function checks if the thiscall attribute is set for the TYPE,
5461 or if it is an method-type with default thiscall convention.
5462 It returns true if function match, otherwise false is returned. */
5465 ix86_is_type_thiscall (const_tree type)
5467 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type))
5468 || ix86_is_msabi_thiscall (type))
5473 /* Return 0 if the attributes for two types are incompatible, 1 if they
5474 are compatible, and 2 if they are nearly compatible (which causes a
5475 warning to be generated). */
5478 ix86_comp_type_attributes (const_tree type1, const_tree type2)
5480 /* Check for mismatch of non-default calling convention. */
5481 bool is_thiscall = ix86_is_msabi_thiscall (type1);
5482 const char *const rtdstr = TARGET_RTD ? (is_thiscall ? "thiscall" : "cdecl") : "stdcall";
5484 if (TREE_CODE (type1) != FUNCTION_TYPE
5485 && TREE_CODE (type1) != METHOD_TYPE)
5488 /* Check for mismatched fastcall/regparm types. */
5489 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
5490 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
5491 || (ix86_function_regparm (type1, NULL)
5492 != ix86_function_regparm (type2, NULL)))
5495 /* Check for mismatched sseregparm types. */
5496 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
5497 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
5500 /* Check for mismatched thiscall types. */
5501 if (is_thiscall && !TARGET_RTD)
5503 if (!lookup_attribute ("cdecl", TYPE_ATTRIBUTES (type1))
5504 != !lookup_attribute ("cdecl", TYPE_ATTRIBUTES (type2)))
5507 else if (!is_thiscall || TARGET_RTD)
5509 if (!lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type1))
5510 != !lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type2)))
5514 /* Check for mismatched return types (cdecl vs stdcall). */
5515 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
5516 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
5522 /* Return the regparm value for a function with the indicated TYPE and DECL.
5523 DECL may be NULL when calling function indirectly
5524 or considering a libcall. */
5527 ix86_function_regparm (const_tree type, const_tree decl)
5533 return (ix86_function_type_abi (type) == SYSV_ABI
5534 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
5536 regparm = ix86_regparm;
5537 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
5540 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
5544 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
5547 if (ix86_is_type_thiscall (type))
5550 /* Use register calling convention for local functions when possible. */
5552 && TREE_CODE (decl) == FUNCTION_DECL
5554 && !(profile_flag && !flag_fentry))
5556 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
5557 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
5558 if (i && i->local && i->can_change_signature)
5560 int local_regparm, globals = 0, regno;
5562 /* Make sure no regparm register is taken by a
5563 fixed register variable. */
5564 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
5565 if (fixed_regs[local_regparm])
5568 /* We don't want to use regparm(3) for nested functions as
5569 these use a static chain pointer in the third argument. */
5570 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
5573 /* In 32-bit mode save a register for the split stack. */
5574 if (!TARGET_64BIT && local_regparm == 3 && flag_split_stack)
5577 /* Each fixed register usage increases register pressure,
5578 so less registers should be used for argument passing.
5579 This functionality can be overriden by an explicit
5581 for (regno = 0; regno <= DI_REG; regno++)
5582 if (fixed_regs[regno])
5586 = globals < local_regparm ? local_regparm - globals : 0;
5588 if (local_regparm > regparm)
5589 regparm = local_regparm;
5596 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
5597 DFmode (2) arguments in SSE registers for a function with the
5598 indicated TYPE and DECL. DECL may be NULL when calling function
5599 indirectly or considering a libcall. Otherwise return 0. */
5602 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
5604 gcc_assert (!TARGET_64BIT);
5606 /* Use SSE registers to pass SFmode and DFmode arguments if requested
5607 by the sseregparm attribute. */
5608 if (TARGET_SSEREGPARM
5609 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
5616 error ("calling %qD with attribute sseregparm without "
5617 "SSE/SSE2 enabled", decl);
5619 error ("calling %qT with attribute sseregparm without "
5620 "SSE/SSE2 enabled", type);
5628 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
5629 (and DFmode for SSE2) arguments in SSE registers. */
5630 if (decl && TARGET_SSE_MATH && optimize
5631 && !(profile_flag && !flag_fentry))
5633 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
5634 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
5635 if (i && i->local && i->can_change_signature)
5636 return TARGET_SSE2 ? 2 : 1;
5642 /* Return true if EAX is live at the start of the function. Used by
5643 ix86_expand_prologue to determine if we need special help before
5644 calling allocate_stack_worker. */
5647 ix86_eax_live_at_start_p (void)
5649 /* Cheat. Don't bother working forward from ix86_function_regparm
5650 to the function type to whether an actual argument is located in
5651 eax. Instead just look at cfg info, which is still close enough
5652 to correct at this point. This gives false positives for broken
5653 functions that might use uninitialized data that happens to be
5654 allocated in eax, but who cares? */
5655 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
5659 ix86_keep_aggregate_return_pointer (tree fntype)
5665 attr = lookup_attribute ("callee_pop_aggregate_return",
5666 TYPE_ATTRIBUTES (fntype));
5668 return (TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr))) == 0);
5670 /* For 32-bit MS-ABI the default is to keep aggregate
5672 if (ix86_function_type_abi (fntype) == MS_ABI)
5675 return KEEP_AGGREGATE_RETURN_POINTER != 0;
5678 /* Value is the number of bytes of arguments automatically
5679 popped when returning from a subroutine call.
5680 FUNDECL is the declaration node of the function (as a tree),
5681 FUNTYPE is the data type of the function (as a tree),
5682 or for a library call it is an identifier node for the subroutine name.
5683 SIZE is the number of bytes of arguments passed on the stack.
5685 On the 80386, the RTD insn may be used to pop them if the number
5686 of args is fixed, but if the number is variable then the caller
5687 must pop them all. RTD can't be used for library calls now
5688 because the library is compiled with the Unix compiler.
5689 Use of RTD is a selectable option, since it is incompatible with
5690 standard Unix calling sequences. If the option is not selected,
5691 the caller must always pop the args.
5693 The attribute stdcall is equivalent to RTD on a per module basis. */
5696 ix86_return_pops_args (tree fundecl, tree funtype, int size)
5700 /* None of the 64-bit ABIs pop arguments. */
5704 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
5706 /* Cdecl functions override -mrtd, and never pop the stack. */
5707 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
5709 /* Stdcall and fastcall functions will pop the stack if not
5711 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
5712 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype))
5713 || ix86_is_type_thiscall (funtype))
5716 if (rtd && ! stdarg_p (funtype))
5720 /* Lose any fake structure return argument if it is passed on the stack. */
5721 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
5722 && !ix86_keep_aggregate_return_pointer (funtype))
5724 int nregs = ix86_function_regparm (funtype, fundecl);
5726 return GET_MODE_SIZE (Pmode);
5732 /* Argument support functions. */
5734 /* Return true when register may be used to pass function parameters. */
5736 ix86_function_arg_regno_p (int regno)
5739 const int *parm_regs;
5744 return (regno < REGPARM_MAX
5745 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
5747 return (regno < REGPARM_MAX
5748 || (TARGET_MMX && MMX_REGNO_P (regno)
5749 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
5750 || (TARGET_SSE && SSE_REGNO_P (regno)
5751 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
5756 if (SSE_REGNO_P (regno) && TARGET_SSE)
5761 if (TARGET_SSE && SSE_REGNO_P (regno)
5762 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
5766 /* TODO: The function should depend on current function ABI but
5767 builtins.c would need updating then. Therefore we use the
5770 /* RAX is used as hidden argument to va_arg functions. */
5771 if (ix86_abi == SYSV_ABI && regno == AX_REG)
5774 if (ix86_abi == MS_ABI)
5775 parm_regs = x86_64_ms_abi_int_parameter_registers;
5777 parm_regs = x86_64_int_parameter_registers;
5778 for (i = 0; i < (ix86_abi == MS_ABI
5779 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
5780 if (regno == parm_regs[i])
5785 /* Return if we do not know how to pass TYPE solely in registers. */
5788 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
5790 if (must_pass_in_stack_var_size_or_pad (mode, type))
5793 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
5794 The layout_type routine is crafty and tries to trick us into passing
5795 currently unsupported vector types on the stack by using TImode. */
5796 return (!TARGET_64BIT && mode == TImode
5797 && type && TREE_CODE (type) != VECTOR_TYPE);
5800 /* It returns the size, in bytes, of the area reserved for arguments passed
5801 in registers for the function represented by fndecl dependent to the used
5804 ix86_reg_parm_stack_space (const_tree fndecl)
5806 enum calling_abi call_abi = SYSV_ABI;
5807 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
5808 call_abi = ix86_function_abi (fndecl);
5810 call_abi = ix86_function_type_abi (fndecl);
5811 if (TARGET_64BIT && call_abi == MS_ABI)
5816 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
5819 ix86_function_type_abi (const_tree fntype)
5823 enum calling_abi abi = ix86_abi;
5824 if (abi == SYSV_ABI)
5826 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
5829 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
5837 ix86_function_ms_hook_prologue (const_tree fn)
5839 if (fn && lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fn)))
5841 if (decl_function_context (fn) != NULL_TREE)
5842 error_at (DECL_SOURCE_LOCATION (fn),
5843 "ms_hook_prologue is not compatible with nested function");
5850 static enum calling_abi
5851 ix86_function_abi (const_tree fndecl)
5855 return ix86_function_type_abi (TREE_TYPE (fndecl));
5858 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
5861 ix86_cfun_abi (void)
5865 return cfun->machine->call_abi;
5868 /* Write the extra assembler code needed to declare a function properly. */
5871 ix86_asm_output_function_label (FILE *asm_out_file, const char *fname,
5874 bool is_ms_hook = ix86_function_ms_hook_prologue (decl);
5878 int i, filler_count = (TARGET_64BIT ? 32 : 16);
5879 unsigned int filler_cc = 0xcccccccc;
5881 for (i = 0; i < filler_count; i += 4)
5882 fprintf (asm_out_file, ASM_LONG " %#x\n", filler_cc);
5885 #ifdef SUBTARGET_ASM_UNWIND_INIT
5886 SUBTARGET_ASM_UNWIND_INIT (asm_out_file);
5889 ASM_OUTPUT_LABEL (asm_out_file, fname);
5891 /* Output magic byte marker, if hot-patch attribute is set. */
5896 /* leaq [%rsp + 0], %rsp */
5897 asm_fprintf (asm_out_file, ASM_BYTE
5898 "0x48, 0x8d, 0xa4, 0x24, 0x00, 0x00, 0x00, 0x00\n");
5902 /* movl.s %edi, %edi
5904 movl.s %esp, %ebp */
5905 asm_fprintf (asm_out_file, ASM_BYTE
5906 "0x8b, 0xff, 0x55, 0x8b, 0xec\n");
5912 extern void init_regs (void);
5914 /* Implementation of call abi switching target hook. Specific to FNDECL
5915 the specific call register sets are set. See also
5916 ix86_conditional_register_usage for more details. */
5918 ix86_call_abi_override (const_tree fndecl)
5920 if (fndecl == NULL_TREE)
5921 cfun->machine->call_abi = ix86_abi;
5923 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
5926 /* 64-bit MS and SYSV ABI have different set of call used registers. Avoid
5927 expensive re-initialization of init_regs each time we switch function context
5928 since this is needed only during RTL expansion. */
5930 ix86_maybe_switch_abi (void)
5933 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
5937 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5938 for a call to a function whose data type is FNTYPE.
5939 For a library call, FNTYPE is 0. */
5942 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
5943 tree fntype, /* tree ptr for function decl */
5944 rtx libname, /* SYMBOL_REF of library name or 0 */
5948 struct cgraph_local_info *i;
5951 memset (cum, 0, sizeof (*cum));
5953 /* Initialize for the current callee. */
5956 cfun->machine->callee_pass_avx256_p = false;
5957 cfun->machine->callee_return_avx256_p = false;
5962 i = cgraph_local_info (fndecl);
5963 cum->call_abi = ix86_function_abi (fndecl);
5964 fnret_type = TREE_TYPE (TREE_TYPE (fndecl));
5969 cum->call_abi = ix86_function_type_abi (fntype);
5971 fnret_type = TREE_TYPE (fntype);
5976 if (TARGET_VZEROUPPER && fnret_type)
5978 rtx fnret_value = ix86_function_value (fnret_type, fntype,
5980 if (function_pass_avx256_p (fnret_value))
5982 /* The return value of this function uses 256bit AVX modes. */
5984 cfun->machine->callee_return_avx256_p = true;
5986 cfun->machine->caller_return_avx256_p = true;
5990 cum->caller = caller;
5992 /* Set up the number of registers to use for passing arguments. */
5994 if (TARGET_64BIT && cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
5995 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
5996 "or subtarget optimization implying it");
5997 cum->nregs = ix86_regparm;
6000 cum->nregs = (cum->call_abi == SYSV_ABI
6001 ? X86_64_REGPARM_MAX
6002 : X86_64_MS_REGPARM_MAX);
6006 cum->sse_nregs = SSE_REGPARM_MAX;
6009 cum->sse_nregs = (cum->call_abi == SYSV_ABI
6010 ? X86_64_SSE_REGPARM_MAX
6011 : X86_64_MS_SSE_REGPARM_MAX);
6015 cum->mmx_nregs = MMX_REGPARM_MAX;
6016 cum->warn_avx = true;
6017 cum->warn_sse = true;
6018 cum->warn_mmx = true;
6020 /* Because type might mismatch in between caller and callee, we need to
6021 use actual type of function for local calls.
6022 FIXME: cgraph_analyze can be told to actually record if function uses
6023 va_start so for local functions maybe_vaarg can be made aggressive
6025 FIXME: once typesytem is fixed, we won't need this code anymore. */
6026 if (i && i->local && i->can_change_signature)
6027 fntype = TREE_TYPE (fndecl);
6028 cum->maybe_vaarg = (fntype
6029 ? (!prototype_p (fntype) || stdarg_p (fntype))
6034 /* If there are variable arguments, then we won't pass anything
6035 in registers in 32-bit mode. */
6036 if (stdarg_p (fntype))
6047 /* Use ecx and edx registers if function has fastcall attribute,
6048 else look for regparm information. */
6051 if (ix86_is_type_thiscall (fntype))
6054 cum->fastcall = 1; /* Same first register as in fastcall. */
6056 else if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
6062 cum->nregs = ix86_function_regparm (fntype, fndecl);
6065 /* Set up the number of SSE registers used for passing SFmode
6066 and DFmode arguments. Warn for mismatching ABI. */
6067 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
6071 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
6072 But in the case of vector types, it is some vector mode.
6074 When we have only some of our vector isa extensions enabled, then there
6075 are some modes for which vector_mode_supported_p is false. For these
6076 modes, the generic vector support in gcc will choose some non-vector mode
6077 in order to implement the type. By computing the natural mode, we'll
6078 select the proper ABI location for the operand and not depend on whatever
6079 the middle-end decides to do with these vector types.
6081 The midde-end can't deal with the vector types > 16 bytes. In this
6082 case, we return the original mode and warn ABI change if CUM isn't
6085 static enum machine_mode
6086 type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum)
6088 enum machine_mode mode = TYPE_MODE (type);
6090 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
6092 HOST_WIDE_INT size = int_size_in_bytes (type);
6093 if ((size == 8 || size == 16 || size == 32)
6094 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
6095 && TYPE_VECTOR_SUBPARTS (type) > 1)
6097 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
6099 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
6100 mode = MIN_MODE_VECTOR_FLOAT;
6102 mode = MIN_MODE_VECTOR_INT;
6104 /* Get the mode which has this inner mode and number of units. */
6105 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
6106 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
6107 && GET_MODE_INNER (mode) == innermode)
6109 if (size == 32 && !TARGET_AVX)
6111 static bool warnedavx;
6118 warning (0, "AVX vector argument without AVX "
6119 "enabled changes the ABI");
6121 return TYPE_MODE (type);
6134 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
6135 this may not agree with the mode that the type system has chosen for the
6136 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
6137 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
6140 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
6145 if (orig_mode != BLKmode)
6146 tmp = gen_rtx_REG (orig_mode, regno);
6149 tmp = gen_rtx_REG (mode, regno);
6150 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
6151 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
6157 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
6158 of this code is to classify each 8bytes of incoming argument by the register
6159 class and assign registers accordingly. */
6161 /* Return the union class of CLASS1 and CLASS2.
6162 See the x86-64 PS ABI for details. */
6164 static enum x86_64_reg_class
6165 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
6167 /* Rule #1: If both classes are equal, this is the resulting class. */
6168 if (class1 == class2)
6171 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
6173 if (class1 == X86_64_NO_CLASS)
6175 if (class2 == X86_64_NO_CLASS)
6178 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
6179 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
6180 return X86_64_MEMORY_CLASS;
6182 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
6183 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
6184 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
6185 return X86_64_INTEGERSI_CLASS;
6186 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
6187 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
6188 return X86_64_INTEGER_CLASS;
6190 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
6192 if (class1 == X86_64_X87_CLASS
6193 || class1 == X86_64_X87UP_CLASS
6194 || class1 == X86_64_COMPLEX_X87_CLASS
6195 || class2 == X86_64_X87_CLASS
6196 || class2 == X86_64_X87UP_CLASS
6197 || class2 == X86_64_COMPLEX_X87_CLASS)
6198 return X86_64_MEMORY_CLASS;
6200 /* Rule #6: Otherwise class SSE is used. */
6201 return X86_64_SSE_CLASS;
6204 /* Classify the argument of type TYPE and mode MODE.
6205 CLASSES will be filled by the register class used to pass each word
6206 of the operand. The number of words is returned. In case the parameter
6207 should be passed in memory, 0 is returned. As a special case for zero
6208 sized containers, classes[0] will be NO_CLASS and 1 is returned.
6210 BIT_OFFSET is used internally for handling records and specifies offset
6211 of the offset in bits modulo 256 to avoid overflow cases.
6213 See the x86-64 PS ABI for details.
6217 classify_argument (enum machine_mode mode, const_tree type,
6218 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
6220 HOST_WIDE_INT bytes =
6221 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
6222 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6224 /* Variable sized entities are always passed/returned in memory. */
6228 if (mode != VOIDmode
6229 && targetm.calls.must_pass_in_stack (mode, type))
6232 if (type && AGGREGATE_TYPE_P (type))
6236 enum x86_64_reg_class subclasses[MAX_CLASSES];
6238 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
6242 for (i = 0; i < words; i++)
6243 classes[i] = X86_64_NO_CLASS;
6245 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
6246 signalize memory class, so handle it as special case. */
6249 classes[0] = X86_64_NO_CLASS;
6253 /* Classify each field of record and merge classes. */
6254 switch (TREE_CODE (type))
6257 /* And now merge the fields of structure. */
6258 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6260 if (TREE_CODE (field) == FIELD_DECL)
6264 if (TREE_TYPE (field) == error_mark_node)
6267 /* Bitfields are always classified as integer. Handle them
6268 early, since later code would consider them to be
6269 misaligned integers. */
6270 if (DECL_BIT_FIELD (field))
6272 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
6273 i < ((int_bit_position (field) + (bit_offset % 64))
6274 + tree_low_cst (DECL_SIZE (field), 0)
6277 merge_classes (X86_64_INTEGER_CLASS,
6284 type = TREE_TYPE (field);
6286 /* Flexible array member is ignored. */
6287 if (TYPE_MODE (type) == BLKmode
6288 && TREE_CODE (type) == ARRAY_TYPE
6289 && TYPE_SIZE (type) == NULL_TREE
6290 && TYPE_DOMAIN (type) != NULL_TREE
6291 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
6296 if (!warned && warn_psabi)
6299 inform (input_location,
6300 "the ABI of passing struct with"
6301 " a flexible array member has"
6302 " changed in GCC 4.4");
6306 num = classify_argument (TYPE_MODE (type), type,
6308 (int_bit_position (field)
6309 + bit_offset) % 256);
6312 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
6313 for (i = 0; i < num && (i + pos) < words; i++)
6315 merge_classes (subclasses[i], classes[i + pos]);
6322 /* Arrays are handled as small records. */
6325 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
6326 TREE_TYPE (type), subclasses, bit_offset);
6330 /* The partial classes are now full classes. */
6331 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
6332 subclasses[0] = X86_64_SSE_CLASS;
6333 if (subclasses[0] == X86_64_INTEGERSI_CLASS
6334 && !((bit_offset % 64) == 0 && bytes == 4))
6335 subclasses[0] = X86_64_INTEGER_CLASS;
6337 for (i = 0; i < words; i++)
6338 classes[i] = subclasses[i % num];
6343 case QUAL_UNION_TYPE:
6344 /* Unions are similar to RECORD_TYPE but offset is always 0.
6346 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6348 if (TREE_CODE (field) == FIELD_DECL)
6352 if (TREE_TYPE (field) == error_mark_node)
6355 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
6356 TREE_TYPE (field), subclasses,
6360 for (i = 0; i < num; i++)
6361 classes[i] = merge_classes (subclasses[i], classes[i]);
6372 /* When size > 16 bytes, if the first one isn't
6373 X86_64_SSE_CLASS or any other ones aren't
6374 X86_64_SSEUP_CLASS, everything should be passed in
6376 if (classes[0] != X86_64_SSE_CLASS)
6379 for (i = 1; i < words; i++)
6380 if (classes[i] != X86_64_SSEUP_CLASS)
6384 /* Final merger cleanup. */
6385 for (i = 0; i < words; i++)
6387 /* If one class is MEMORY, everything should be passed in
6389 if (classes[i] == X86_64_MEMORY_CLASS)
6392 /* The X86_64_SSEUP_CLASS should be always preceded by
6393 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
6394 if (classes[i] == X86_64_SSEUP_CLASS
6395 && classes[i - 1] != X86_64_SSE_CLASS
6396 && classes[i - 1] != X86_64_SSEUP_CLASS)
6398 /* The first one should never be X86_64_SSEUP_CLASS. */
6399 gcc_assert (i != 0);
6400 classes[i] = X86_64_SSE_CLASS;
6403 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
6404 everything should be passed in memory. */
6405 if (classes[i] == X86_64_X87UP_CLASS
6406 && (classes[i - 1] != X86_64_X87_CLASS))
6410 /* The first one should never be X86_64_X87UP_CLASS. */
6411 gcc_assert (i != 0);
6412 if (!warned && warn_psabi)
6415 inform (input_location,
6416 "the ABI of passing union with long double"
6417 " has changed in GCC 4.4");
6425 /* Compute alignment needed. We align all types to natural boundaries with
6426 exception of XFmode that is aligned to 64bits. */
6427 if (mode != VOIDmode && mode != BLKmode)
6429 int mode_alignment = GET_MODE_BITSIZE (mode);
6432 mode_alignment = 128;
6433 else if (mode == XCmode)
6434 mode_alignment = 256;
6435 if (COMPLEX_MODE_P (mode))
6436 mode_alignment /= 2;
6437 /* Misaligned fields are always returned in memory. */
6438 if (bit_offset % mode_alignment)
6442 /* for V1xx modes, just use the base mode */
6443 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
6444 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
6445 mode = GET_MODE_INNER (mode);
6447 /* Classification of atomic types. */
6452 classes[0] = X86_64_SSE_CLASS;
6455 classes[0] = X86_64_SSE_CLASS;
6456 classes[1] = X86_64_SSEUP_CLASS;
6466 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
6470 classes[0] = X86_64_INTEGERSI_CLASS;
6473 else if (size <= 64)
6475 classes[0] = X86_64_INTEGER_CLASS;
6478 else if (size <= 64+32)
6480 classes[0] = X86_64_INTEGER_CLASS;
6481 classes[1] = X86_64_INTEGERSI_CLASS;
6484 else if (size <= 64+64)
6486 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
6494 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
6498 /* OImode shouldn't be used directly. */
6503 if (!(bit_offset % 64))
6504 classes[0] = X86_64_SSESF_CLASS;
6506 classes[0] = X86_64_SSE_CLASS;
6509 classes[0] = X86_64_SSEDF_CLASS;
6512 classes[0] = X86_64_X87_CLASS;
6513 classes[1] = X86_64_X87UP_CLASS;
6516 classes[0] = X86_64_SSE_CLASS;
6517 classes[1] = X86_64_SSEUP_CLASS;
6520 classes[0] = X86_64_SSE_CLASS;
6521 if (!(bit_offset % 64))
6527 if (!warned && warn_psabi)
6530 inform (input_location,
6531 "the ABI of passing structure with complex float"
6532 " member has changed in GCC 4.4");
6534 classes[1] = X86_64_SSESF_CLASS;
6538 classes[0] = X86_64_SSEDF_CLASS;
6539 classes[1] = X86_64_SSEDF_CLASS;
6542 classes[0] = X86_64_COMPLEX_X87_CLASS;
6545 /* This modes is larger than 16 bytes. */
6553 classes[0] = X86_64_SSE_CLASS;
6554 classes[1] = X86_64_SSEUP_CLASS;
6555 classes[2] = X86_64_SSEUP_CLASS;
6556 classes[3] = X86_64_SSEUP_CLASS;
6564 classes[0] = X86_64_SSE_CLASS;
6565 classes[1] = X86_64_SSEUP_CLASS;
6573 classes[0] = X86_64_SSE_CLASS;
6579 gcc_assert (VECTOR_MODE_P (mode));
6584 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
6586 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
6587 classes[0] = X86_64_INTEGERSI_CLASS;
6589 classes[0] = X86_64_INTEGER_CLASS;
6590 classes[1] = X86_64_INTEGER_CLASS;
6591 return 1 + (bytes > 8);
6595 /* Examine the argument and return set number of register required in each
6596 class. Return 0 iff parameter should be passed in memory. */
6598 examine_argument (enum machine_mode mode, const_tree type, int in_return,
6599 int *int_nregs, int *sse_nregs)
6601 enum x86_64_reg_class regclass[MAX_CLASSES];
6602 int n = classify_argument (mode, type, regclass, 0);
6608 for (n--; n >= 0; n--)
6609 switch (regclass[n])
6611 case X86_64_INTEGER_CLASS:
6612 case X86_64_INTEGERSI_CLASS:
6615 case X86_64_SSE_CLASS:
6616 case X86_64_SSESF_CLASS:
6617 case X86_64_SSEDF_CLASS:
6620 case X86_64_NO_CLASS:
6621 case X86_64_SSEUP_CLASS:
6623 case X86_64_X87_CLASS:
6624 case X86_64_X87UP_CLASS:
6628 case X86_64_COMPLEX_X87_CLASS:
6629 return in_return ? 2 : 0;
6630 case X86_64_MEMORY_CLASS:
6636 /* Construct container for the argument used by GCC interface. See
6637 FUNCTION_ARG for the detailed description. */
6640 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
6641 const_tree type, int in_return, int nintregs, int nsseregs,
6642 const int *intreg, int sse_regno)
6644 /* The following variables hold the static issued_error state. */
6645 static bool issued_sse_arg_error;
6646 static bool issued_sse_ret_error;
6647 static bool issued_x87_ret_error;
6649 enum machine_mode tmpmode;
6651 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
6652 enum x86_64_reg_class regclass[MAX_CLASSES];
6656 int needed_sseregs, needed_intregs;
6657 rtx exp[MAX_CLASSES];
6660 n = classify_argument (mode, type, regclass, 0);
6663 if (!examine_argument (mode, type, in_return, &needed_intregs,
6666 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
6669 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
6670 some less clueful developer tries to use floating-point anyway. */
6671 if (needed_sseregs && !TARGET_SSE)
6675 if (!issued_sse_ret_error)
6677 error ("SSE register return with SSE disabled");
6678 issued_sse_ret_error = true;
6681 else if (!issued_sse_arg_error)
6683 error ("SSE register argument with SSE disabled");
6684 issued_sse_arg_error = true;
6689 /* Likewise, error if the ABI requires us to return values in the
6690 x87 registers and the user specified -mno-80387. */
6691 if (!TARGET_80387 && in_return)
6692 for (i = 0; i < n; i++)
6693 if (regclass[i] == X86_64_X87_CLASS
6694 || regclass[i] == X86_64_X87UP_CLASS
6695 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
6697 if (!issued_x87_ret_error)
6699 error ("x87 register return with x87 disabled");
6700 issued_x87_ret_error = true;
6705 /* First construct simple cases. Avoid SCmode, since we want to use
6706 single register to pass this type. */
6707 if (n == 1 && mode != SCmode)
6708 switch (regclass[0])
6710 case X86_64_INTEGER_CLASS:
6711 case X86_64_INTEGERSI_CLASS:
6712 return gen_rtx_REG (mode, intreg[0]);
6713 case X86_64_SSE_CLASS:
6714 case X86_64_SSESF_CLASS:
6715 case X86_64_SSEDF_CLASS:
6716 if (mode != BLKmode)
6717 return gen_reg_or_parallel (mode, orig_mode,
6718 SSE_REGNO (sse_regno));
6720 case X86_64_X87_CLASS:
6721 case X86_64_COMPLEX_X87_CLASS:
6722 return gen_rtx_REG (mode, FIRST_STACK_REG);
6723 case X86_64_NO_CLASS:
6724 /* Zero sized array, struct or class. */
6729 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
6730 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
6731 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
6733 && regclass[0] == X86_64_SSE_CLASS
6734 && regclass[1] == X86_64_SSEUP_CLASS
6735 && regclass[2] == X86_64_SSEUP_CLASS
6736 && regclass[3] == X86_64_SSEUP_CLASS
6738 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
6741 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
6742 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
6743 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
6744 && regclass[1] == X86_64_INTEGER_CLASS
6745 && (mode == CDImode || mode == TImode || mode == TFmode)
6746 && intreg[0] + 1 == intreg[1])
6747 return gen_rtx_REG (mode, intreg[0]);
6749 /* Otherwise figure out the entries of the PARALLEL. */
6750 for (i = 0; i < n; i++)
6754 switch (regclass[i])
6756 case X86_64_NO_CLASS:
6758 case X86_64_INTEGER_CLASS:
6759 case X86_64_INTEGERSI_CLASS:
6760 /* Merge TImodes on aligned occasions here too. */
6761 if (i * 8 + 8 > bytes)
6762 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
6763 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
6767 /* We've requested 24 bytes we don't have mode for. Use DImode. */
6768 if (tmpmode == BLKmode)
6770 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6771 gen_rtx_REG (tmpmode, *intreg),
6775 case X86_64_SSESF_CLASS:
6776 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6777 gen_rtx_REG (SFmode,
6778 SSE_REGNO (sse_regno)),
6782 case X86_64_SSEDF_CLASS:
6783 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6784 gen_rtx_REG (DFmode,
6785 SSE_REGNO (sse_regno)),
6789 case X86_64_SSE_CLASS:
6797 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
6807 && regclass[1] == X86_64_SSEUP_CLASS
6808 && regclass[2] == X86_64_SSEUP_CLASS
6809 && regclass[3] == X86_64_SSEUP_CLASS);
6816 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6817 gen_rtx_REG (tmpmode,
6818 SSE_REGNO (sse_regno)),
6827 /* Empty aligned struct, union or class. */
6831 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
6832 for (i = 0; i < nexps; i++)
6833 XVECEXP (ret, 0, i) = exp [i];
6837 /* Update the data in CUM to advance over an argument of mode MODE
6838 and data type TYPE. (TYPE is null for libcalls where that information
6839 may not be available.) */
6842 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6843 const_tree type, HOST_WIDE_INT bytes,
6844 HOST_WIDE_INT words)
6860 cum->words += words;
6861 cum->nregs -= words;
6862 cum->regno += words;
6864 if (cum->nregs <= 0)
6872 /* OImode shouldn't be used directly. */
6876 if (cum->float_in_sse < 2)
6879 if (cum->float_in_sse < 1)
6896 if (!type || !AGGREGATE_TYPE_P (type))
6898 cum->sse_words += words;
6899 cum->sse_nregs -= 1;
6900 cum->sse_regno += 1;
6901 if (cum->sse_nregs <= 0)
6915 if (!type || !AGGREGATE_TYPE_P (type))
6917 cum->mmx_words += words;
6918 cum->mmx_nregs -= 1;
6919 cum->mmx_regno += 1;
6920 if (cum->mmx_nregs <= 0)
6931 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6932 const_tree type, HOST_WIDE_INT words, bool named)
6934 int int_nregs, sse_nregs;
6936 /* Unnamed 256bit vector mode parameters are passed on stack. */
6937 if (!named && VALID_AVX256_REG_MODE (mode))
6940 if (examine_argument (mode, type, 0, &int_nregs, &sse_nregs)
6941 && sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
6943 cum->nregs -= int_nregs;
6944 cum->sse_nregs -= sse_nregs;
6945 cum->regno += int_nregs;
6946 cum->sse_regno += sse_nregs;
6950 int align = ix86_function_arg_boundary (mode, type) / BITS_PER_WORD;
6951 cum->words = (cum->words + align - 1) & ~(align - 1);
6952 cum->words += words;
6957 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
6958 HOST_WIDE_INT words)
6960 /* Otherwise, this should be passed indirect. */
6961 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
6963 cum->words += words;
6971 /* Update the data in CUM to advance over an argument of mode MODE and
6972 data type TYPE. (TYPE is null for libcalls where that information
6973 may not be available.) */
6976 ix86_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6977 const_tree type, bool named)
6979 HOST_WIDE_INT bytes, words;
6981 if (mode == BLKmode)
6982 bytes = int_size_in_bytes (type);
6984 bytes = GET_MODE_SIZE (mode);
6985 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6988 mode = type_natural_mode (type, NULL);
6990 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6991 function_arg_advance_ms_64 (cum, bytes, words);
6992 else if (TARGET_64BIT)
6993 function_arg_advance_64 (cum, mode, type, words, named);
6995 function_arg_advance_32 (cum, mode, type, bytes, words);
6998 /* Define where to put the arguments to a function.
6999 Value is zero to push the argument on the stack,
7000 or a hard register in which to store the argument.
7002 MODE is the argument's machine mode.
7003 TYPE is the data type of the argument (as a tree).
7004 This is null for libcalls where that information may
7006 CUM is a variable of type CUMULATIVE_ARGS which gives info about
7007 the preceding args and about the function being called.
7008 NAMED is nonzero if this argument is a named parameter
7009 (otherwise it is an extra parameter matching an ellipsis). */
7012 function_arg_32 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
7013 enum machine_mode orig_mode, const_tree type,
7014 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
7016 static bool warnedsse, warnedmmx;
7018 /* Avoid the AL settings for the Unix64 ABI. */
7019 if (mode == VOIDmode)
7035 if (words <= cum->nregs)
7037 int regno = cum->regno;
7039 /* Fastcall allocates the first two DWORD (SImode) or
7040 smaller arguments to ECX and EDX if it isn't an
7046 || (type && AGGREGATE_TYPE_P (type)))
7049 /* ECX not EAX is the first allocated register. */
7050 if (regno == AX_REG)
7053 return gen_rtx_REG (mode, regno);
7058 if (cum->float_in_sse < 2)
7061 if (cum->float_in_sse < 1)
7065 /* In 32bit, we pass TImode in xmm registers. */
7072 if (!type || !AGGREGATE_TYPE_P (type))
7074 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
7077 warning (0, "SSE vector argument without SSE enabled "
7081 return gen_reg_or_parallel (mode, orig_mode,
7082 cum->sse_regno + FIRST_SSE_REG);
7087 /* OImode shouldn't be used directly. */
7096 if (!type || !AGGREGATE_TYPE_P (type))
7099 return gen_reg_or_parallel (mode, orig_mode,
7100 cum->sse_regno + FIRST_SSE_REG);
7110 if (!type || !AGGREGATE_TYPE_P (type))
7112 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
7115 warning (0, "MMX vector argument without MMX enabled "
7119 return gen_reg_or_parallel (mode, orig_mode,
7120 cum->mmx_regno + FIRST_MMX_REG);
7129 function_arg_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
7130 enum machine_mode orig_mode, const_tree type, bool named)
7132 /* Handle a hidden AL argument containing number of registers
7133 for varargs x86-64 functions. */
7134 if (mode == VOIDmode)
7135 return GEN_INT (cum->maybe_vaarg
7136 ? (cum->sse_nregs < 0
7137 ? X86_64_SSE_REGPARM_MAX
7152 /* Unnamed 256bit vector mode parameters are passed on stack. */
7158 return construct_container (mode, orig_mode, type, 0, cum->nregs,
7160 &x86_64_int_parameter_registers [cum->regno],
7165 function_arg_ms_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
7166 enum machine_mode orig_mode, bool named,
7167 HOST_WIDE_INT bytes)
7171 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
7172 We use value of -2 to specify that current function call is MSABI. */
7173 if (mode == VOIDmode)
7174 return GEN_INT (-2);
7176 /* If we've run out of registers, it goes on the stack. */
7177 if (cum->nregs == 0)
7180 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
7182 /* Only floating point modes are passed in anything but integer regs. */
7183 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
7186 regno = cum->regno + FIRST_SSE_REG;
7191 /* Unnamed floating parameters are passed in both the
7192 SSE and integer registers. */
7193 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
7194 t2 = gen_rtx_REG (mode, regno);
7195 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
7196 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
7197 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
7200 /* Handle aggregated types passed in register. */
7201 if (orig_mode == BLKmode)
7203 if (bytes > 0 && bytes <= 8)
7204 mode = (bytes > 4 ? DImode : SImode);
7205 if (mode == BLKmode)
7209 return gen_reg_or_parallel (mode, orig_mode, regno);
7212 /* Return where to put the arguments to a function.
7213 Return zero to push the argument on the stack, or a hard register in which to store the argument.
7215 MODE is the argument's machine mode. TYPE is the data type of the
7216 argument. It is null for libcalls where that information may not be
7217 available. CUM gives information about the preceding args and about
7218 the function being called. NAMED is nonzero if this argument is a
7219 named parameter (otherwise it is an extra parameter matching an
7223 ix86_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
7224 const_tree type, bool named)
7226 enum machine_mode mode = omode;
7227 HOST_WIDE_INT bytes, words;
7230 if (mode == BLKmode)
7231 bytes = int_size_in_bytes (type);
7233 bytes = GET_MODE_SIZE (mode);
7234 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7236 /* To simplify the code below, represent vector types with a vector mode
7237 even if MMX/SSE are not active. */
7238 if (type && TREE_CODE (type) == VECTOR_TYPE)
7239 mode = type_natural_mode (type, cum);
7241 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
7242 arg = function_arg_ms_64 (cum, mode, omode, named, bytes);
7243 else if (TARGET_64BIT)
7244 arg = function_arg_64 (cum, mode, omode, type, named);
7246 arg = function_arg_32 (cum, mode, omode, type, bytes, words);
7248 if (TARGET_VZEROUPPER && function_pass_avx256_p (arg))
7250 /* This argument uses 256bit AVX modes. */
7252 cfun->machine->callee_pass_avx256_p = true;
7254 cfun->machine->caller_pass_avx256_p = true;
7260 /* A C expression that indicates when an argument must be passed by
7261 reference. If nonzero for an argument, a copy of that argument is
7262 made in memory and a pointer to the argument is passed instead of
7263 the argument itself. The pointer is passed in whatever way is
7264 appropriate for passing a pointer to that type. */
7267 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7268 enum machine_mode mode ATTRIBUTE_UNUSED,
7269 const_tree type, bool named ATTRIBUTE_UNUSED)
7271 /* See Windows x64 Software Convention. */
7272 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
7274 int msize = (int) GET_MODE_SIZE (mode);
7277 /* Arrays are passed by reference. */
7278 if (TREE_CODE (type) == ARRAY_TYPE)
7281 if (AGGREGATE_TYPE_P (type))
7283 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
7284 are passed by reference. */
7285 msize = int_size_in_bytes (type);
7289 /* __m128 is passed by reference. */
7291 case 1: case 2: case 4: case 8:
7297 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
7303 /* Return true when TYPE should be 128bit aligned for 32bit argument
7304 passing ABI. XXX: This function is obsolete and is only used for
7305 checking psABI compatibility with previous versions of GCC. */
7308 ix86_compat_aligned_value_p (const_tree type)
7310 enum machine_mode mode = TYPE_MODE (type);
7311 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
7315 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
7317 if (TYPE_ALIGN (type) < 128)
7320 if (AGGREGATE_TYPE_P (type))
7322 /* Walk the aggregates recursively. */
7323 switch (TREE_CODE (type))
7327 case QUAL_UNION_TYPE:
7331 /* Walk all the structure fields. */
7332 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
7334 if (TREE_CODE (field) == FIELD_DECL
7335 && ix86_compat_aligned_value_p (TREE_TYPE (field)))
7342 /* Just for use if some languages passes arrays by value. */
7343 if (ix86_compat_aligned_value_p (TREE_TYPE (type)))
7354 /* Return the alignment boundary for MODE and TYPE with alignment ALIGN.
7355 XXX: This function is obsolete and is only used for checking psABI
7356 compatibility with previous versions of GCC. */
7359 ix86_compat_function_arg_boundary (enum machine_mode mode,
7360 const_tree type, unsigned int align)
7362 /* In 32bit, only _Decimal128 and __float128 are aligned to their
7363 natural boundaries. */
7364 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
7366 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
7367 make an exception for SSE modes since these require 128bit
7370 The handling here differs from field_alignment. ICC aligns MMX
7371 arguments to 4 byte boundaries, while structure fields are aligned
7372 to 8 byte boundaries. */
7375 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
7376 align = PARM_BOUNDARY;
7380 if (!ix86_compat_aligned_value_p (type))
7381 align = PARM_BOUNDARY;
7384 if (align > BIGGEST_ALIGNMENT)
7385 align = BIGGEST_ALIGNMENT;
7389 /* Return true when TYPE should be 128bit aligned for 32bit argument
7393 ix86_contains_aligned_value_p (const_tree type)
7395 enum machine_mode mode = TYPE_MODE (type);
7397 if (mode == XFmode || mode == XCmode)
7400 if (TYPE_ALIGN (type) < 128)
7403 if (AGGREGATE_TYPE_P (type))
7405 /* Walk the aggregates recursively. */
7406 switch (TREE_CODE (type))
7410 case QUAL_UNION_TYPE:
7414 /* Walk all the structure fields. */
7415 for (field = TYPE_FIELDS (type);
7417 field = DECL_CHAIN (field))
7419 if (TREE_CODE (field) == FIELD_DECL
7420 && ix86_contains_aligned_value_p (TREE_TYPE (field)))
7427 /* Just for use if some languages passes arrays by value. */
7428 if (ix86_contains_aligned_value_p (TREE_TYPE (type)))
7437 return TYPE_ALIGN (type) >= 128;
7442 /* Gives the alignment boundary, in bits, of an argument with the
7443 specified mode and type. */
7446 ix86_function_arg_boundary (enum machine_mode mode, const_tree type)
7451 /* Since the main variant type is used for call, we convert it to
7452 the main variant type. */
7453 type = TYPE_MAIN_VARIANT (type);
7454 align = TYPE_ALIGN (type);
7457 align = GET_MODE_ALIGNMENT (mode);
7458 if (align < PARM_BOUNDARY)
7459 align = PARM_BOUNDARY;
7463 unsigned int saved_align = align;
7467 /* i386 ABI defines XFmode arguments to be 4 byte aligned. */
7470 if (mode == XFmode || mode == XCmode)
7471 align = PARM_BOUNDARY;
7473 else if (!ix86_contains_aligned_value_p (type))
7474 align = PARM_BOUNDARY;
7477 align = PARM_BOUNDARY;
7482 && align != ix86_compat_function_arg_boundary (mode, type,
7486 inform (input_location,
7487 "The ABI for passing parameters with %d-byte"
7488 " alignment has changed in GCC 4.6",
7489 align / BITS_PER_UNIT);
7496 /* Return true if N is a possible register number of function value. */
7499 ix86_function_value_regno_p (const unsigned int regno)
7506 case FIRST_FLOAT_REG:
7507 /* TODO: The function should depend on current function ABI but
7508 builtins.c would need updating then. Therefore we use the
7510 if (TARGET_64BIT && ix86_abi == MS_ABI)
7512 return TARGET_FLOAT_RETURNS_IN_80387;
7518 if (TARGET_MACHO || TARGET_64BIT)
7526 /* Define how to find the value returned by a function.
7527 VALTYPE is the data type of the value (as a tree).
7528 If the precise function being called is known, FUNC is its FUNCTION_DECL;
7529 otherwise, FUNC is 0. */
7532 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
7533 const_tree fntype, const_tree fn)
7537 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
7538 we normally prevent this case when mmx is not available. However
7539 some ABIs may require the result to be returned like DImode. */
7540 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
7541 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
7543 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
7544 we prevent this case when sse is not available. However some ABIs
7545 may require the result to be returned like integer TImode. */
7546 else if (mode == TImode
7547 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
7548 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
7550 /* 32-byte vector modes in %ymm0. */
7551 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
7552 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
7554 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
7555 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
7556 regno = FIRST_FLOAT_REG;
7558 /* Most things go in %eax. */
7561 /* Override FP return register with %xmm0 for local functions when
7562 SSE math is enabled or for functions with sseregparm attribute. */
7563 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
7565 int sse_level = ix86_function_sseregparm (fntype, fn, false);
7566 if ((sse_level >= 1 && mode == SFmode)
7567 || (sse_level == 2 && mode == DFmode))
7568 regno = FIRST_SSE_REG;
7571 /* OImode shouldn't be used directly. */
7572 gcc_assert (mode != OImode);
7574 return gen_rtx_REG (orig_mode, regno);
7578 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
7583 /* Handle libcalls, which don't provide a type node. */
7584 if (valtype == NULL)
7596 return gen_rtx_REG (mode, FIRST_SSE_REG);
7599 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
7603 return gen_rtx_REG (mode, AX_REG);
7607 ret = construct_container (mode, orig_mode, valtype, 1,
7608 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
7609 x86_64_int_return_registers, 0);
7611 /* For zero sized structures, construct_container returns NULL, but we
7612 need to keep rest of compiler happy by returning meaningful value. */
7614 ret = gen_rtx_REG (orig_mode, AX_REG);
7620 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
7622 unsigned int regno = AX_REG;
7626 switch (GET_MODE_SIZE (mode))
7629 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
7630 && !COMPLEX_MODE_P (mode))
7631 regno = FIRST_SSE_REG;
7635 if (mode == SFmode || mode == DFmode)
7636 regno = FIRST_SSE_REG;
7642 return gen_rtx_REG (orig_mode, regno);
7646 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
7647 enum machine_mode orig_mode, enum machine_mode mode)
7649 const_tree fn, fntype;
7652 if (fntype_or_decl && DECL_P (fntype_or_decl))
7653 fn = fntype_or_decl;
7654 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
7656 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
7657 return function_value_ms_64 (orig_mode, mode);
7658 else if (TARGET_64BIT)
7659 return function_value_64 (orig_mode, mode, valtype);
7661 return function_value_32 (orig_mode, mode, fntype, fn);
7665 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
7666 bool outgoing ATTRIBUTE_UNUSED)
7668 enum machine_mode mode, orig_mode;
7670 orig_mode = TYPE_MODE (valtype);
7671 mode = type_natural_mode (valtype, NULL);
7672 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
7676 ix86_libcall_value (enum machine_mode mode)
7678 return ix86_function_value_1 (NULL, NULL, mode, mode);
7681 /* Return true iff type is returned in memory. */
7683 static bool ATTRIBUTE_UNUSED
7684 return_in_memory_32 (const_tree type, enum machine_mode mode)
7688 if (mode == BLKmode)
7691 size = int_size_in_bytes (type);
7693 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
7696 if (VECTOR_MODE_P (mode) || mode == TImode)
7698 /* User-created vectors small enough to fit in EAX. */
7702 /* MMX/3dNow values are returned in MM0,
7703 except when it doesn't exits or the ABI prescribes otherwise. */
7705 return !TARGET_MMX || TARGET_VECT8_RETURNS;
7707 /* SSE values are returned in XMM0, except when it doesn't exist. */
7711 /* AVX values are returned in YMM0, except when it doesn't exist. */
7722 /* OImode shouldn't be used directly. */
7723 gcc_assert (mode != OImode);
7728 static bool ATTRIBUTE_UNUSED
7729 return_in_memory_64 (const_tree type, enum machine_mode mode)
7731 int needed_intregs, needed_sseregs;
7732 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
7735 static bool ATTRIBUTE_UNUSED
7736 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
7738 HOST_WIDE_INT size = int_size_in_bytes (type);
7740 /* __m128 is returned in xmm0. */
7741 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
7742 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
7745 /* Otherwise, the size must be exactly in [1248]. */
7746 return size != 1 && size != 2 && size != 4 && size != 8;
7750 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7752 #ifdef SUBTARGET_RETURN_IN_MEMORY
7753 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
7755 const enum machine_mode mode = type_natural_mode (type, NULL);
7759 if (ix86_function_type_abi (fntype) == MS_ABI)
7760 return return_in_memory_ms_64 (type, mode);
7762 return return_in_memory_64 (type, mode);
7765 return return_in_memory_32 (type, mode);
7769 /* When returning SSE vector types, we have a choice of either
7770 (1) being abi incompatible with a -march switch, or
7771 (2) generating an error.
7772 Given no good solution, I think the safest thing is one warning.
7773 The user won't be able to use -Werror, but....
7775 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
7776 called in response to actually generating a caller or callee that
7777 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
7778 via aggregate_value_p for general type probing from tree-ssa. */
7781 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
7783 static bool warnedsse, warnedmmx;
7785 if (!TARGET_64BIT && type)
7787 /* Look at the return type of the function, not the function type. */
7788 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
7790 if (!TARGET_SSE && !warnedsse)
7793 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
7796 warning (0, "SSE vector return without SSE enabled "
7801 if (!TARGET_MMX && !warnedmmx)
7803 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
7806 warning (0, "MMX vector return without MMX enabled "
7816 /* Create the va_list data type. */
7818 /* Returns the calling convention specific va_list date type.
7819 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
7822 ix86_build_builtin_va_list_abi (enum calling_abi abi)
7824 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
7826 /* For i386 we use plain pointer to argument area. */
7827 if (!TARGET_64BIT || abi == MS_ABI)
7828 return build_pointer_type (char_type_node);
7830 record = lang_hooks.types.make_type (RECORD_TYPE);
7831 type_decl = build_decl (BUILTINS_LOCATION,
7832 TYPE_DECL, get_identifier ("__va_list_tag"), record);
7834 f_gpr = build_decl (BUILTINS_LOCATION,
7835 FIELD_DECL, get_identifier ("gp_offset"),
7836 unsigned_type_node);
7837 f_fpr = build_decl (BUILTINS_LOCATION,
7838 FIELD_DECL, get_identifier ("fp_offset"),
7839 unsigned_type_node);
7840 f_ovf = build_decl (BUILTINS_LOCATION,
7841 FIELD_DECL, get_identifier ("overflow_arg_area"),
7843 f_sav = build_decl (BUILTINS_LOCATION,
7844 FIELD_DECL, get_identifier ("reg_save_area"),
7847 va_list_gpr_counter_field = f_gpr;
7848 va_list_fpr_counter_field = f_fpr;
7850 DECL_FIELD_CONTEXT (f_gpr) = record;
7851 DECL_FIELD_CONTEXT (f_fpr) = record;
7852 DECL_FIELD_CONTEXT (f_ovf) = record;
7853 DECL_FIELD_CONTEXT (f_sav) = record;
7855 TYPE_STUB_DECL (record) = type_decl;
7856 TYPE_NAME (record) = type_decl;
7857 TYPE_FIELDS (record) = f_gpr;
7858 DECL_CHAIN (f_gpr) = f_fpr;
7859 DECL_CHAIN (f_fpr) = f_ovf;
7860 DECL_CHAIN (f_ovf) = f_sav;
7862 layout_type (record);
7864 /* The correct type is an array type of one element. */
7865 return build_array_type (record, build_index_type (size_zero_node));
7868 /* Setup the builtin va_list data type and for 64-bit the additional
7869 calling convention specific va_list data types. */
7872 ix86_build_builtin_va_list (void)
7874 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
7876 /* Initialize abi specific va_list builtin types. */
7880 if (ix86_abi == MS_ABI)
7882 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
7883 if (TREE_CODE (t) != RECORD_TYPE)
7884 t = build_variant_type_copy (t);
7885 sysv_va_list_type_node = t;
7890 if (TREE_CODE (t) != RECORD_TYPE)
7891 t = build_variant_type_copy (t);
7892 sysv_va_list_type_node = t;
7894 if (ix86_abi != MS_ABI)
7896 t = ix86_build_builtin_va_list_abi (MS_ABI);
7897 if (TREE_CODE (t) != RECORD_TYPE)
7898 t = build_variant_type_copy (t);
7899 ms_va_list_type_node = t;
7904 if (TREE_CODE (t) != RECORD_TYPE)
7905 t = build_variant_type_copy (t);
7906 ms_va_list_type_node = t;
7913 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
7916 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
7922 /* GPR size of varargs save area. */
7923 if (cfun->va_list_gpr_size)
7924 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
7926 ix86_varargs_gpr_size = 0;
7928 /* FPR size of varargs save area. We don't need it if we don't pass
7929 anything in SSE registers. */
7930 if (TARGET_SSE && cfun->va_list_fpr_size)
7931 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
7933 ix86_varargs_fpr_size = 0;
7935 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
7938 save_area = frame_pointer_rtx;
7939 set = get_varargs_alias_set ();
7941 max = cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
7942 if (max > X86_64_REGPARM_MAX)
7943 max = X86_64_REGPARM_MAX;
7945 for (i = cum->regno; i < max; i++)
7947 mem = gen_rtx_MEM (Pmode,
7948 plus_constant (save_area, i * UNITS_PER_WORD));
7949 MEM_NOTRAP_P (mem) = 1;
7950 set_mem_alias_set (mem, set);
7951 emit_move_insn (mem, gen_rtx_REG (Pmode,
7952 x86_64_int_parameter_registers[i]));
7955 if (ix86_varargs_fpr_size)
7957 enum machine_mode smode;
7960 /* Now emit code to save SSE registers. The AX parameter contains number
7961 of SSE parameter registers used to call this function, though all we
7962 actually check here is the zero/non-zero status. */
7964 label = gen_label_rtx ();
7965 test = gen_rtx_EQ (VOIDmode, gen_rtx_REG (QImode, AX_REG), const0_rtx);
7966 emit_jump_insn (gen_cbranchqi4 (test, XEXP (test, 0), XEXP (test, 1),
7969 /* ??? If !TARGET_SSE_TYPELESS_STORES, would we perform better if
7970 we used movdqa (i.e. TImode) instead? Perhaps even better would
7971 be if we could determine the real mode of the data, via a hook
7972 into pass_stdarg. Ignore all that for now. */
7974 if (crtl->stack_alignment_needed < GET_MODE_ALIGNMENT (smode))
7975 crtl->stack_alignment_needed = GET_MODE_ALIGNMENT (smode);
7977 max = cum->sse_regno + cfun->va_list_fpr_size / 16;
7978 if (max > X86_64_SSE_REGPARM_MAX)
7979 max = X86_64_SSE_REGPARM_MAX;
7981 for (i = cum->sse_regno; i < max; ++i)
7983 mem = plus_constant (save_area, i * 16 + ix86_varargs_gpr_size);
7984 mem = gen_rtx_MEM (smode, mem);
7985 MEM_NOTRAP_P (mem) = 1;
7986 set_mem_alias_set (mem, set);
7987 set_mem_align (mem, GET_MODE_ALIGNMENT (smode));
7989 emit_move_insn (mem, gen_rtx_REG (smode, SSE_REGNO (i)));
7997 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
7999 alias_set_type set = get_varargs_alias_set ();
8002 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
8006 mem = gen_rtx_MEM (Pmode,
8007 plus_constant (virtual_incoming_args_rtx,
8008 i * UNITS_PER_WORD));
8009 MEM_NOTRAP_P (mem) = 1;
8010 set_mem_alias_set (mem, set);
8012 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
8013 emit_move_insn (mem, reg);
8018 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8019 tree type, int *pretend_size ATTRIBUTE_UNUSED,
8022 CUMULATIVE_ARGS next_cum;
8025 /* This argument doesn't appear to be used anymore. Which is good,
8026 because the old code here didn't suppress rtl generation. */
8027 gcc_assert (!no_rtl);
8032 fntype = TREE_TYPE (current_function_decl);
8034 /* For varargs, we do not want to skip the dummy va_dcl argument.
8035 For stdargs, we do want to skip the last named argument. */
8037 if (stdarg_p (fntype))
8038 ix86_function_arg_advance (&next_cum, mode, type, true);
8040 if (cum->call_abi == MS_ABI)
8041 setup_incoming_varargs_ms_64 (&next_cum);
8043 setup_incoming_varargs_64 (&next_cum);
8046 /* Checks if TYPE is of kind va_list char *. */
8049 is_va_list_char_pointer (tree type)
8053 /* For 32-bit it is always true. */
8056 canonic = ix86_canonical_va_list_type (type);
8057 return (canonic == ms_va_list_type_node
8058 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
8061 /* Implement va_start. */
8064 ix86_va_start (tree valist, rtx nextarg)
8066 HOST_WIDE_INT words, n_gpr, n_fpr;
8067 tree f_gpr, f_fpr, f_ovf, f_sav;
8068 tree gpr, fpr, ovf, sav, t;
8072 if (flag_split_stack
8073 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
8075 unsigned int scratch_regno;
8077 /* When we are splitting the stack, we can't refer to the stack
8078 arguments using internal_arg_pointer, because they may be on
8079 the old stack. The split stack prologue will arrange to
8080 leave a pointer to the old stack arguments in a scratch
8081 register, which we here copy to a pseudo-register. The split
8082 stack prologue can't set the pseudo-register directly because
8083 it (the prologue) runs before any registers have been saved. */
8085 scratch_regno = split_stack_prologue_scratch_regno ();
8086 if (scratch_regno != INVALID_REGNUM)
8090 reg = gen_reg_rtx (Pmode);
8091 cfun->machine->split_stack_varargs_pointer = reg;
8094 emit_move_insn (reg, gen_rtx_REG (Pmode, scratch_regno));
8098 push_topmost_sequence ();
8099 emit_insn_after (seq, entry_of_function ());
8100 pop_topmost_sequence ();
8104 /* Only 64bit target needs something special. */
8105 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
8107 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
8108 std_expand_builtin_va_start (valist, nextarg);
8113 va_r = expand_expr (valist, NULL_RTX, VOIDmode, EXPAND_WRITE);
8114 next = expand_binop (ptr_mode, add_optab,
8115 cfun->machine->split_stack_varargs_pointer,
8116 crtl->args.arg_offset_rtx,
8117 NULL_RTX, 0, OPTAB_LIB_WIDEN);
8118 convert_move (va_r, next, 0);
8123 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
8124 f_fpr = DECL_CHAIN (f_gpr);
8125 f_ovf = DECL_CHAIN (f_fpr);
8126 f_sav = DECL_CHAIN (f_ovf);
8128 valist = build_simple_mem_ref (valist);
8129 TREE_TYPE (valist) = TREE_TYPE (sysv_va_list_type_node);
8130 /* The following should be folded into the MEM_REF offset. */
8131 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), unshare_expr (valist),
8133 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
8135 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
8137 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
8140 /* Count number of gp and fp argument registers used. */
8141 words = crtl->args.info.words;
8142 n_gpr = crtl->args.info.regno;
8143 n_fpr = crtl->args.info.sse_regno;
8145 if (cfun->va_list_gpr_size)
8147 type = TREE_TYPE (gpr);
8148 t = build2 (MODIFY_EXPR, type,
8149 gpr, build_int_cst (type, n_gpr * 8));
8150 TREE_SIDE_EFFECTS (t) = 1;
8151 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8154 if (TARGET_SSE && cfun->va_list_fpr_size)
8156 type = TREE_TYPE (fpr);
8157 t = build2 (MODIFY_EXPR, type, fpr,
8158 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
8159 TREE_SIDE_EFFECTS (t) = 1;
8160 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8163 /* Find the overflow area. */
8164 type = TREE_TYPE (ovf);
8165 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
8166 ovf_rtx = crtl->args.internal_arg_pointer;
8168 ovf_rtx = cfun->machine->split_stack_varargs_pointer;
8169 t = make_tree (type, ovf_rtx);
8171 t = build2 (POINTER_PLUS_EXPR, type, t,
8172 size_int (words * UNITS_PER_WORD));
8173 t = build2 (MODIFY_EXPR, type, ovf, t);
8174 TREE_SIDE_EFFECTS (t) = 1;
8175 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8177 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
8179 /* Find the register save area.
8180 Prologue of the function save it right above stack frame. */
8181 type = TREE_TYPE (sav);
8182 t = make_tree (type, frame_pointer_rtx);
8183 if (!ix86_varargs_gpr_size)
8184 t = build2 (POINTER_PLUS_EXPR, type, t,
8185 size_int (-8 * X86_64_REGPARM_MAX));
8186 t = build2 (MODIFY_EXPR, type, sav, t);
8187 TREE_SIDE_EFFECTS (t) = 1;
8188 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8192 /* Implement va_arg. */
8195 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
8198 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
8199 tree f_gpr, f_fpr, f_ovf, f_sav;
8200 tree gpr, fpr, ovf, sav, t;
8202 tree lab_false, lab_over = NULL_TREE;
8207 enum machine_mode nat_mode;
8208 unsigned int arg_boundary;
8210 /* Only 64bit target needs something special. */
8211 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
8212 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
8214 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
8215 f_fpr = DECL_CHAIN (f_gpr);
8216 f_ovf = DECL_CHAIN (f_fpr);
8217 f_sav = DECL_CHAIN (f_ovf);
8219 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
8220 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
8221 valist = build_va_arg_indirect_ref (valist);
8222 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8223 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8224 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8226 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
8228 type = build_pointer_type (type);
8229 size = int_size_in_bytes (type);
8230 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8232 nat_mode = type_natural_mode (type, NULL);
8241 /* Unnamed 256bit vector mode parameters are passed on stack. */
8242 if (!TARGET_64BIT_MS_ABI)
8249 container = construct_container (nat_mode, TYPE_MODE (type),
8250 type, 0, X86_64_REGPARM_MAX,
8251 X86_64_SSE_REGPARM_MAX, intreg,
8256 /* Pull the value out of the saved registers. */
8258 addr = create_tmp_var (ptr_type_node, "addr");
8262 int needed_intregs, needed_sseregs;
8264 tree int_addr, sse_addr;
8266 lab_false = create_artificial_label (UNKNOWN_LOCATION);
8267 lab_over = create_artificial_label (UNKNOWN_LOCATION);
8269 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
8271 need_temp = (!REG_P (container)
8272 && ((needed_intregs && TYPE_ALIGN (type) > 64)
8273 || TYPE_ALIGN (type) > 128));
8275 /* In case we are passing structure, verify that it is consecutive block
8276 on the register save area. If not we need to do moves. */
8277 if (!need_temp && !REG_P (container))
8279 /* Verify that all registers are strictly consecutive */
8280 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
8284 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
8286 rtx slot = XVECEXP (container, 0, i);
8287 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
8288 || INTVAL (XEXP (slot, 1)) != i * 16)
8296 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
8298 rtx slot = XVECEXP (container, 0, i);
8299 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
8300 || INTVAL (XEXP (slot, 1)) != i * 8)
8312 int_addr = create_tmp_var (ptr_type_node, "int_addr");
8313 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
8316 /* First ensure that we fit completely in registers. */
8319 t = build_int_cst (TREE_TYPE (gpr),
8320 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
8321 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
8322 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
8323 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
8324 gimplify_and_add (t, pre_p);
8328 t = build_int_cst (TREE_TYPE (fpr),
8329 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
8330 + X86_64_REGPARM_MAX * 8);
8331 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
8332 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
8333 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
8334 gimplify_and_add (t, pre_p);
8337 /* Compute index to start of area used for integer regs. */
8340 /* int_addr = gpr + sav; */
8341 t = fold_convert (sizetype, gpr);
8342 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
8343 gimplify_assign (int_addr, t, pre_p);
8347 /* sse_addr = fpr + sav; */
8348 t = fold_convert (sizetype, fpr);
8349 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
8350 gimplify_assign (sse_addr, t, pre_p);
8354 int i, prev_size = 0;
8355 tree temp = create_tmp_var (type, "va_arg_tmp");
8358 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
8359 gimplify_assign (addr, t, pre_p);
8361 for (i = 0; i < XVECLEN (container, 0); i++)
8363 rtx slot = XVECEXP (container, 0, i);
8364 rtx reg = XEXP (slot, 0);
8365 enum machine_mode mode = GET_MODE (reg);
8371 tree dest_addr, dest;
8372 int cur_size = GET_MODE_SIZE (mode);
8374 gcc_assert (prev_size <= INTVAL (XEXP (slot, 1)));
8375 prev_size = INTVAL (XEXP (slot, 1));
8376 if (prev_size + cur_size > size)
8378 cur_size = size - prev_size;
8379 mode = mode_for_size (cur_size * BITS_PER_UNIT, MODE_INT, 1);
8380 if (mode == BLKmode)
8383 piece_type = lang_hooks.types.type_for_mode (mode, 1);
8384 if (mode == GET_MODE (reg))
8385 addr_type = build_pointer_type (piece_type);
8387 addr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
8389 daddr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
8392 if (SSE_REGNO_P (REGNO (reg)))
8394 src_addr = sse_addr;
8395 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
8399 src_addr = int_addr;
8400 src_offset = REGNO (reg) * 8;
8402 src_addr = fold_convert (addr_type, src_addr);
8403 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
8404 size_int (src_offset));
8406 dest_addr = fold_convert (daddr_type, addr);
8407 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
8408 size_int (prev_size));
8409 if (cur_size == GET_MODE_SIZE (mode))
8411 src = build_va_arg_indirect_ref (src_addr);
8412 dest = build_va_arg_indirect_ref (dest_addr);
8414 gimplify_assign (dest, src, pre_p);
8419 = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
8420 3, dest_addr, src_addr,
8421 size_int (cur_size));
8422 gimplify_and_add (copy, pre_p);
8424 prev_size += cur_size;
8430 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
8431 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
8432 gimplify_assign (gpr, t, pre_p);
8437 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
8438 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
8439 gimplify_assign (fpr, t, pre_p);
8442 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
8444 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
8447 /* ... otherwise out of the overflow area. */
8449 /* When we align parameter on stack for caller, if the parameter
8450 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
8451 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
8452 here with caller. */
8453 arg_boundary = ix86_function_arg_boundary (VOIDmode, type);
8454 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
8455 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
8457 /* Care for on-stack alignment if needed. */
8458 if (arg_boundary <= 64 || size == 0)
8462 HOST_WIDE_INT align = arg_boundary / 8;
8463 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
8464 size_int (align - 1));
8465 t = fold_convert (sizetype, t);
8466 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
8468 t = fold_convert (TREE_TYPE (ovf), t);
8471 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
8472 gimplify_assign (addr, t, pre_p);
8474 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
8475 size_int (rsize * UNITS_PER_WORD));
8476 gimplify_assign (unshare_expr (ovf), t, pre_p);
8479 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
8481 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
8482 addr = fold_convert (ptrtype, addr);
8485 addr = build_va_arg_indirect_ref (addr);
8486 return build_va_arg_indirect_ref (addr);
8489 /* Return true if OPNUM's MEM should be matched
8490 in movabs* patterns. */
8493 ix86_check_movabs (rtx insn, int opnum)
8497 set = PATTERN (insn);
8498 if (GET_CODE (set) == PARALLEL)
8499 set = XVECEXP (set, 0, 0);
8500 gcc_assert (GET_CODE (set) == SET);
8501 mem = XEXP (set, opnum);
8502 while (GET_CODE (mem) == SUBREG)
8503 mem = SUBREG_REG (mem);
8504 gcc_assert (MEM_P (mem));
8505 return volatile_ok || !MEM_VOLATILE_P (mem);
8508 /* Initialize the table of extra 80387 mathematical constants. */
8511 init_ext_80387_constants (void)
8513 static const char * cst[5] =
8515 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
8516 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
8517 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
8518 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
8519 "3.1415926535897932385128089594061862044", /* 4: fldpi */
8523 for (i = 0; i < 5; i++)
8525 real_from_string (&ext_80387_constants_table[i], cst[i]);
8526 /* Ensure each constant is rounded to XFmode precision. */
8527 real_convert (&ext_80387_constants_table[i],
8528 XFmode, &ext_80387_constants_table[i]);
8531 ext_80387_constants_init = 1;
8534 /* Return non-zero if the constant is something that
8535 can be loaded with a special instruction. */
8538 standard_80387_constant_p (rtx x)
8540 enum machine_mode mode = GET_MODE (x);
8544 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
8547 if (x == CONST0_RTX (mode))
8549 if (x == CONST1_RTX (mode))
8552 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8554 /* For XFmode constants, try to find a special 80387 instruction when
8555 optimizing for size or on those CPUs that benefit from them. */
8557 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
8561 if (! ext_80387_constants_init)
8562 init_ext_80387_constants ();
8564 for (i = 0; i < 5; i++)
8565 if (real_identical (&r, &ext_80387_constants_table[i]))
8569 /* Load of the constant -0.0 or -1.0 will be split as
8570 fldz;fchs or fld1;fchs sequence. */
8571 if (real_isnegzero (&r))
8573 if (real_identical (&r, &dconstm1))
8579 /* Return the opcode of the special instruction to be used to load
8583 standard_80387_constant_opcode (rtx x)
8585 switch (standard_80387_constant_p (x))
8609 /* Return the CONST_DOUBLE representing the 80387 constant that is
8610 loaded by the specified special instruction. The argument IDX
8611 matches the return value from standard_80387_constant_p. */
8614 standard_80387_constant_rtx (int idx)
8618 if (! ext_80387_constants_init)
8619 init_ext_80387_constants ();
8635 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
8639 /* Return 1 if X is all 0s and 2 if x is all 1s
8640 in supported SSE vector mode. */
8643 standard_sse_constant_p (rtx x)
8645 enum machine_mode mode = GET_MODE (x);
8647 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
8649 if (vector_all_ones_operand (x, mode))
8665 /* Return the opcode of the special instruction to be used to load
8669 standard_sse_constant_opcode (rtx insn, rtx x)
8671 switch (standard_sse_constant_p (x))
8674 switch (get_attr_mode (insn))
8677 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
8679 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8680 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
8682 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
8684 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8685 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
8687 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
8689 return "vxorps\t%x0, %x0, %x0";
8691 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8692 return "vxorps\t%x0, %x0, %x0";
8694 return "vxorpd\t%x0, %x0, %x0";
8696 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8697 return "vxorps\t%x0, %x0, %x0";
8699 return "vpxor\t%x0, %x0, %x0";
8704 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
8711 /* Returns true if OP contains a symbol reference */
8714 symbolic_reference_mentioned_p (rtx op)
8719 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
8722 fmt = GET_RTX_FORMAT (GET_CODE (op));
8723 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
8729 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
8730 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
8734 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
8741 /* Return true if it is appropriate to emit `ret' instructions in the
8742 body of a function. Do this only if the epilogue is simple, needing a
8743 couple of insns. Prior to reloading, we can't tell how many registers
8744 must be saved, so return false then. Return false if there is no frame
8745 marker to de-allocate. */
8748 ix86_can_use_return_insn_p (void)
8750 struct ix86_frame frame;
8752 if (! reload_completed || frame_pointer_needed)
8755 /* Don't allow more than 32k pop, since that's all we can do
8756 with one instruction. */
8757 if (crtl->args.pops_args && crtl->args.size >= 32768)
8760 ix86_compute_frame_layout (&frame);
8761 return (frame.stack_pointer_offset == UNITS_PER_WORD
8762 && (frame.nregs + frame.nsseregs) == 0);
8765 /* Value should be nonzero if functions must have frame pointers.
8766 Zero means the frame pointer need not be set up (and parms may
8767 be accessed via the stack pointer) in functions that seem suitable. */
8770 ix86_frame_pointer_required (void)
8772 /* If we accessed previous frames, then the generated code expects
8773 to be able to access the saved ebp value in our frame. */
8774 if (cfun->machine->accesses_prev_frame)
8777 /* Several x86 os'es need a frame pointer for other reasons,
8778 usually pertaining to setjmp. */
8779 if (SUBTARGET_FRAME_POINTER_REQUIRED)
8782 /* In ix86_option_override_internal, TARGET_OMIT_LEAF_FRAME_POINTER
8783 turns off the frame pointer by default. Turn it back on now if
8784 we've not got a leaf function. */
8785 if (TARGET_OMIT_LEAF_FRAME_POINTER
8786 && (!current_function_is_leaf
8787 || ix86_current_function_calls_tls_descriptor))
8790 if (crtl->profile && !flag_fentry)
8796 /* Record that the current function accesses previous call frames. */
8799 ix86_setup_frame_addresses (void)
8801 cfun->machine->accesses_prev_frame = 1;
8804 #ifndef USE_HIDDEN_LINKONCE
8805 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
8806 # define USE_HIDDEN_LINKONCE 1
8808 # define USE_HIDDEN_LINKONCE 0
8812 static int pic_labels_used;
8814 /* Fills in the label name that should be used for a pc thunk for
8815 the given register. */
8818 get_pc_thunk_name (char name[32], unsigned int regno)
8820 gcc_assert (!TARGET_64BIT);
8822 if (USE_HIDDEN_LINKONCE)
8823 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
8825 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
8829 /* This function generates code for -fpic that loads %ebx with
8830 the return address of the caller and then returns. */
8833 ix86_code_end (void)
8838 for (regno = AX_REG; regno <= SP_REG; regno++)
8843 if (!(pic_labels_used & (1 << regno)))
8846 get_pc_thunk_name (name, regno);
8848 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
8849 get_identifier (name),
8850 build_function_type (void_type_node, void_list_node));
8851 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
8852 NULL_TREE, void_type_node);
8853 TREE_PUBLIC (decl) = 1;
8854 TREE_STATIC (decl) = 1;
8859 switch_to_section (darwin_sections[text_coal_section]);
8860 fputs ("\t.weak_definition\t", asm_out_file);
8861 assemble_name (asm_out_file, name);
8862 fputs ("\n\t.private_extern\t", asm_out_file);
8863 assemble_name (asm_out_file, name);
8864 putc ('\n', asm_out_file);
8865 ASM_OUTPUT_LABEL (asm_out_file, name);
8866 DECL_WEAK (decl) = 1;
8870 if (USE_HIDDEN_LINKONCE)
8872 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
8874 targetm.asm_out.unique_section (decl, 0);
8875 switch_to_section (get_named_section (decl, NULL, 0));
8877 targetm.asm_out.globalize_label (asm_out_file, name);
8878 fputs ("\t.hidden\t", asm_out_file);
8879 assemble_name (asm_out_file, name);
8880 putc ('\n', asm_out_file);
8881 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
8885 switch_to_section (text_section);
8886 ASM_OUTPUT_LABEL (asm_out_file, name);
8889 DECL_INITIAL (decl) = make_node (BLOCK);
8890 current_function_decl = decl;
8891 init_function_start (decl);
8892 first_function_block_is_cold = false;
8893 /* Make sure unwind info is emitted for the thunk if needed. */
8894 final_start_function (emit_barrier (), asm_out_file, 1);
8896 /* Pad stack IP move with 4 instructions (two NOPs count
8897 as one instruction). */
8898 if (TARGET_PAD_SHORT_FUNCTION)
8903 fputs ("\tnop\n", asm_out_file);
8906 xops[0] = gen_rtx_REG (Pmode, regno);
8907 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8908 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
8909 fputs ("\tret\n", asm_out_file);
8910 final_end_function ();
8911 init_insn_lengths ();
8912 free_after_compilation (cfun);
8914 current_function_decl = NULL;
8917 if (flag_split_stack)
8918 file_end_indicate_split_stack ();
8921 /* Emit code for the SET_GOT patterns. */
8924 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
8930 if (TARGET_VXWORKS_RTP && flag_pic)
8932 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
8933 xops[2] = gen_rtx_MEM (Pmode,
8934 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
8935 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
8937 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
8938 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
8939 an unadorned address. */
8940 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
8941 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
8942 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
8946 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
8948 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
8950 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
8953 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
8956 output_asm_insn ("call\t%a2", xops);
8957 #ifdef DWARF2_UNWIND_INFO
8958 /* The call to next label acts as a push. */
8959 if (dwarf2out_do_frame ())
8963 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8964 gen_rtx_PLUS (Pmode,
8967 RTX_FRAME_RELATED_P (insn) = 1;
8968 dwarf2out_frame_debug (insn, true);
8975 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
8976 is what will be referenced by the Mach-O PIC subsystem. */
8978 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
8981 targetm.asm_out.internal_label (asm_out_file, "L",
8982 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
8986 output_asm_insn ("pop%z0\t%0", xops);
8987 #ifdef DWARF2_UNWIND_INFO
8988 /* The pop is a pop and clobbers dest, but doesn't restore it
8989 for unwind info purposes. */
8990 if (dwarf2out_do_frame ())
8994 insn = emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
8995 dwarf2out_frame_debug (insn, true);
8996 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8997 gen_rtx_PLUS (Pmode,
9000 RTX_FRAME_RELATED_P (insn) = 1;
9001 dwarf2out_frame_debug (insn, true);
9010 get_pc_thunk_name (name, REGNO (dest));
9011 pic_labels_used |= 1 << REGNO (dest);
9013 #ifdef DWARF2_UNWIND_INFO
9014 /* Ensure all queued register saves are flushed before the
9016 if (dwarf2out_do_frame ())
9017 dwarf2out_flush_queued_reg_saves ();
9019 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
9020 xops[2] = gen_rtx_MEM (QImode, xops[2]);
9021 output_asm_insn ("call\t%X2", xops);
9022 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
9023 is what will be referenced by the Mach-O PIC subsystem. */
9026 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
9028 targetm.asm_out.internal_label (asm_out_file, "L",
9029 CODE_LABEL_NUMBER (label));
9036 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
9037 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
9039 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
9044 /* Generate an "push" pattern for input ARG. */
9049 struct machine_function *m = cfun->machine;
9051 if (m->fs.cfa_reg == stack_pointer_rtx)
9052 m->fs.cfa_offset += UNITS_PER_WORD;
9053 m->fs.sp_offset += UNITS_PER_WORD;
9055 return gen_rtx_SET (VOIDmode,
9057 gen_rtx_PRE_DEC (Pmode,
9058 stack_pointer_rtx)),
9062 /* Generate an "pop" pattern for input ARG. */
9067 return gen_rtx_SET (VOIDmode,
9070 gen_rtx_POST_INC (Pmode,
9071 stack_pointer_rtx)));
9074 /* Return >= 0 if there is an unused call-clobbered register available
9075 for the entire function. */
9078 ix86_select_alt_pic_regnum (void)
9080 if (current_function_is_leaf
9082 && !ix86_current_function_calls_tls_descriptor)
9085 /* Can't use the same register for both PIC and DRAP. */
9087 drap = REGNO (crtl->drap_reg);
9090 for (i = 2; i >= 0; --i)
9091 if (i != drap && !df_regs_ever_live_p (i))
9095 return INVALID_REGNUM;
9098 /* Return 1 if we need to save REGNO. */
9100 ix86_save_reg (unsigned int regno, int maybe_eh_return)
9102 if (pic_offset_table_rtx
9103 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
9104 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
9106 || crtl->calls_eh_return
9107 || crtl->uses_const_pool))
9109 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
9114 if (crtl->calls_eh_return && maybe_eh_return)
9119 unsigned test = EH_RETURN_DATA_REGNO (i);
9120 if (test == INVALID_REGNUM)
9127 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
9130 return (df_regs_ever_live_p (regno)
9131 && !call_used_regs[regno]
9132 && !fixed_regs[regno]
9133 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
9136 /* Return number of saved general prupose registers. */
9139 ix86_nsaved_regs (void)
9144 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9145 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9150 /* Return number of saved SSE registrers. */
9153 ix86_nsaved_sseregs (void)
9158 if (!TARGET_64BIT_MS_ABI)
9160 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9161 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9166 /* Given FROM and TO register numbers, say whether this elimination is
9167 allowed. If stack alignment is needed, we can only replace argument
9168 pointer with hard frame pointer, or replace frame pointer with stack
9169 pointer. Otherwise, frame pointer elimination is automatically
9170 handled and all other eliminations are valid. */
9173 ix86_can_eliminate (const int from, const int to)
9175 if (stack_realign_fp)
9176 return ((from == ARG_POINTER_REGNUM
9177 && to == HARD_FRAME_POINTER_REGNUM)
9178 || (from == FRAME_POINTER_REGNUM
9179 && to == STACK_POINTER_REGNUM));
9181 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
9184 /* Return the offset between two registers, one to be eliminated, and the other
9185 its replacement, at the start of a routine. */
9188 ix86_initial_elimination_offset (int from, int to)
9190 struct ix86_frame frame;
9191 ix86_compute_frame_layout (&frame);
9193 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9194 return frame.hard_frame_pointer_offset;
9195 else if (from == FRAME_POINTER_REGNUM
9196 && to == HARD_FRAME_POINTER_REGNUM)
9197 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
9200 gcc_assert (to == STACK_POINTER_REGNUM);
9202 if (from == ARG_POINTER_REGNUM)
9203 return frame.stack_pointer_offset;
9205 gcc_assert (from == FRAME_POINTER_REGNUM);
9206 return frame.stack_pointer_offset - frame.frame_pointer_offset;
9210 /* In a dynamically-aligned function, we can't know the offset from
9211 stack pointer to frame pointer, so we must ensure that setjmp
9212 eliminates fp against the hard fp (%ebp) rather than trying to
9213 index from %esp up to the top of the frame across a gap that is
9214 of unknown (at compile-time) size. */
9216 ix86_builtin_setjmp_frame_value (void)
9218 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
9221 /* On the x86 -fsplit-stack and -fstack-protector both use the same
9222 field in the TCB, so they can not be used together. */
9225 ix86_supports_split_stack (bool report ATTRIBUTE_UNUSED,
9226 struct gcc_options *opts ATTRIBUTE_UNUSED)
9230 #ifndef TARGET_THREAD_SPLIT_STACK_OFFSET
9232 error ("%<-fsplit-stack%> currently only supported on GNU/Linux");
9235 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
9238 error ("%<-fsplit-stack%> requires "
9239 "assembler support for CFI directives");
9247 /* When using -fsplit-stack, the allocation routines set a field in
9248 the TCB to the bottom of the stack plus this much space, measured
9251 #define SPLIT_STACK_AVAILABLE 256
9253 /* Fill structure ix86_frame about frame of currently computed function. */
9256 ix86_compute_frame_layout (struct ix86_frame *frame)
9258 unsigned int stack_alignment_needed;
9259 HOST_WIDE_INT offset;
9260 unsigned int preferred_alignment;
9261 HOST_WIDE_INT size = get_frame_size ();
9262 HOST_WIDE_INT to_allocate;
9264 frame->nregs = ix86_nsaved_regs ();
9265 frame->nsseregs = ix86_nsaved_sseregs ();
9267 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
9268 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
9270 /* 64-bit MS ABI seem to require stack alignment to be always 16 except for
9271 function prologues and leaf. */
9272 if ((TARGET_64BIT_MS_ABI && preferred_alignment < 16)
9273 && (!current_function_is_leaf || cfun->calls_alloca != 0
9274 || ix86_current_function_calls_tls_descriptor))
9276 preferred_alignment = 16;
9277 stack_alignment_needed = 16;
9278 crtl->preferred_stack_boundary = 128;
9279 crtl->stack_alignment_needed = 128;
9282 gcc_assert (!size || stack_alignment_needed);
9283 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
9284 gcc_assert (preferred_alignment <= stack_alignment_needed);
9286 /* For SEH we have to limit the amount of code movement into the prologue.
9287 At present we do this via a BLOCKAGE, at which point there's very little
9288 scheduling that can be done, which means that there's very little point
9289 in doing anything except PUSHs. */
9291 cfun->machine->use_fast_prologue_epilogue = false;
9293 /* During reload iteration the amount of registers saved can change.
9294 Recompute the value as needed. Do not recompute when amount of registers
9295 didn't change as reload does multiple calls to the function and does not
9296 expect the decision to change within single iteration. */
9297 else if (!optimize_function_for_size_p (cfun)
9298 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
9300 int count = frame->nregs;
9301 struct cgraph_node *node = cgraph_node (current_function_decl);
9303 cfun->machine->use_fast_prologue_epilogue_nregs = count;
9305 /* The fast prologue uses move instead of push to save registers. This
9306 is significantly longer, but also executes faster as modern hardware
9307 can execute the moves in parallel, but can't do that for push/pop.
9309 Be careful about choosing what prologue to emit: When function takes
9310 many instructions to execute we may use slow version as well as in
9311 case function is known to be outside hot spot (this is known with
9312 feedback only). Weight the size of function by number of registers
9313 to save as it is cheap to use one or two push instructions but very
9314 slow to use many of them. */
9316 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
9317 if (node->frequency < NODE_FREQUENCY_NORMAL
9318 || (flag_branch_probabilities
9319 && node->frequency < NODE_FREQUENCY_HOT))
9320 cfun->machine->use_fast_prologue_epilogue = false;
9322 cfun->machine->use_fast_prologue_epilogue
9323 = !expensive_function_p (count);
9325 if (TARGET_PROLOGUE_USING_MOVE
9326 && cfun->machine->use_fast_prologue_epilogue)
9327 frame->save_regs_using_mov = true;
9329 frame->save_regs_using_mov = false;
9331 /* If static stack checking is enabled and done with probes, the registers
9332 need to be saved before allocating the frame. */
9333 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
9334 frame->save_regs_using_mov = false;
9336 /* Skip return address. */
9337 offset = UNITS_PER_WORD;
9339 /* Skip pushed static chain. */
9340 if (ix86_static_chain_on_stack)
9341 offset += UNITS_PER_WORD;
9343 /* Skip saved base pointer. */
9344 if (frame_pointer_needed)
9345 offset += UNITS_PER_WORD;
9346 frame->hfp_save_offset = offset;
9348 /* The traditional frame pointer location is at the top of the frame. */
9349 frame->hard_frame_pointer_offset = offset;
9351 /* Register save area */
9352 offset += frame->nregs * UNITS_PER_WORD;
9353 frame->reg_save_offset = offset;
9355 /* Align and set SSE register save area. */
9356 if (frame->nsseregs)
9358 /* The only ABI that has saved SSE registers (Win64) also has a
9359 16-byte aligned default stack, and thus we don't need to be
9360 within the re-aligned local stack frame to save them. */
9361 gcc_assert (INCOMING_STACK_BOUNDARY >= 128);
9362 offset = (offset + 16 - 1) & -16;
9363 offset += frame->nsseregs * 16;
9365 frame->sse_reg_save_offset = offset;
9367 /* The re-aligned stack starts here. Values before this point are not
9368 directly comparable with values below this point. In order to make
9369 sure that no value happens to be the same before and after, force
9370 the alignment computation below to add a non-zero value. */
9371 if (stack_realign_fp)
9372 offset = (offset + stack_alignment_needed) & -stack_alignment_needed;
9375 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
9376 offset += frame->va_arg_size;
9378 /* Align start of frame for local function. */
9379 if (stack_realign_fp
9380 || offset != frame->sse_reg_save_offset
9382 || !current_function_is_leaf
9383 || cfun->calls_alloca
9384 || ix86_current_function_calls_tls_descriptor)
9385 offset = (offset + stack_alignment_needed - 1) & -stack_alignment_needed;
9387 /* Frame pointer points here. */
9388 frame->frame_pointer_offset = offset;
9392 /* Add outgoing arguments area. Can be skipped if we eliminated
9393 all the function calls as dead code.
9394 Skipping is however impossible when function calls alloca. Alloca
9395 expander assumes that last crtl->outgoing_args_size
9396 of stack frame are unused. */
9397 if (ACCUMULATE_OUTGOING_ARGS
9398 && (!current_function_is_leaf || cfun->calls_alloca
9399 || ix86_current_function_calls_tls_descriptor))
9401 offset += crtl->outgoing_args_size;
9402 frame->outgoing_arguments_size = crtl->outgoing_args_size;
9405 frame->outgoing_arguments_size = 0;
9407 /* Align stack boundary. Only needed if we're calling another function
9409 if (!current_function_is_leaf || cfun->calls_alloca
9410 || ix86_current_function_calls_tls_descriptor)
9411 offset = (offset + preferred_alignment - 1) & -preferred_alignment;
9413 /* We've reached end of stack frame. */
9414 frame->stack_pointer_offset = offset;
9416 /* Size prologue needs to allocate. */
9417 to_allocate = offset - frame->sse_reg_save_offset;
9419 if ((!to_allocate && frame->nregs <= 1)
9420 || (TARGET_64BIT && to_allocate >= (HOST_WIDE_INT) 0x80000000))
9421 frame->save_regs_using_mov = false;
9423 if (ix86_using_red_zone ()
9424 && current_function_sp_is_unchanging
9425 && current_function_is_leaf
9426 && !ix86_current_function_calls_tls_descriptor)
9428 frame->red_zone_size = to_allocate;
9429 if (frame->save_regs_using_mov)
9430 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
9431 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
9432 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
9435 frame->red_zone_size = 0;
9436 frame->stack_pointer_offset -= frame->red_zone_size;
9438 /* The SEH frame pointer location is near the bottom of the frame.
9439 This is enforced by the fact that the difference between the
9440 stack pointer and the frame pointer is limited to 240 bytes in
9441 the unwind data structure. */
9446 /* If we can leave the frame pointer where it is, do so. */
9447 diff = frame->stack_pointer_offset - frame->hard_frame_pointer_offset;
9448 if (diff > 240 || (diff & 15) != 0)
9450 /* Ideally we'd determine what portion of the local stack frame
9451 (within the constraint of the lowest 240) is most heavily used.
9452 But without that complication, simply bias the frame pointer
9453 by 128 bytes so as to maximize the amount of the local stack
9454 frame that is addressable with 8-bit offsets. */
9455 frame->hard_frame_pointer_offset = frame->stack_pointer_offset - 128;
9460 /* This is semi-inlined memory_address_length, but simplified
9461 since we know that we're always dealing with reg+offset, and
9462 to avoid having to create and discard all that rtl. */
9465 choose_baseaddr_len (unsigned int regno, HOST_WIDE_INT offset)
9471 /* EBP and R13 cannot be encoded without an offset. */
9472 len = (regno == BP_REG || regno == R13_REG);
9474 else if (IN_RANGE (offset, -128, 127))
9477 /* ESP and R12 must be encoded with a SIB byte. */
9478 if (regno == SP_REG || regno == R12_REG)
9484 /* Return an RTX that points to CFA_OFFSET within the stack frame.
9485 The valid base registers are taken from CFUN->MACHINE->FS. */
9488 choose_baseaddr (HOST_WIDE_INT cfa_offset)
9490 const struct machine_function *m = cfun->machine;
9491 rtx base_reg = NULL;
9492 HOST_WIDE_INT base_offset = 0;
9494 if (m->use_fast_prologue_epilogue)
9496 /* Choose the base register most likely to allow the most scheduling
9497 opportunities. Generally FP is valid througout the function,
9498 while DRAP must be reloaded within the epilogue. But choose either
9499 over the SP due to increased encoding size. */
9503 base_reg = hard_frame_pointer_rtx;
9504 base_offset = m->fs.fp_offset - cfa_offset;
9506 else if (m->fs.drap_valid)
9508 base_reg = crtl->drap_reg;
9509 base_offset = 0 - cfa_offset;
9511 else if (m->fs.sp_valid)
9513 base_reg = stack_pointer_rtx;
9514 base_offset = m->fs.sp_offset - cfa_offset;
9519 HOST_WIDE_INT toffset;
9522 /* Choose the base register with the smallest address encoding.
9523 With a tie, choose FP > DRAP > SP. */
9526 base_reg = stack_pointer_rtx;
9527 base_offset = m->fs.sp_offset - cfa_offset;
9528 len = choose_baseaddr_len (STACK_POINTER_REGNUM, base_offset);
9530 if (m->fs.drap_valid)
9532 toffset = 0 - cfa_offset;
9533 tlen = choose_baseaddr_len (REGNO (crtl->drap_reg), toffset);
9536 base_reg = crtl->drap_reg;
9537 base_offset = toffset;
9543 toffset = m->fs.fp_offset - cfa_offset;
9544 tlen = choose_baseaddr_len (HARD_FRAME_POINTER_REGNUM, toffset);
9547 base_reg = hard_frame_pointer_rtx;
9548 base_offset = toffset;
9553 gcc_assert (base_reg != NULL);
9555 return plus_constant (base_reg, base_offset);
9558 /* Emit code to save registers in the prologue. */
9561 ix86_emit_save_regs (void)
9566 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
9567 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9569 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
9570 RTX_FRAME_RELATED_P (insn) = 1;
9574 /* Emit a single register save at CFA - CFA_OFFSET. */
9577 ix86_emit_save_reg_using_mov (enum machine_mode mode, unsigned int regno,
9578 HOST_WIDE_INT cfa_offset)
9580 struct machine_function *m = cfun->machine;
9581 rtx reg = gen_rtx_REG (mode, regno);
9582 rtx mem, addr, base, insn;
9584 addr = choose_baseaddr (cfa_offset);
9585 mem = gen_frame_mem (mode, addr);
9587 /* For SSE saves, we need to indicate the 128-bit alignment. */
9588 set_mem_align (mem, GET_MODE_ALIGNMENT (mode));
9590 insn = emit_move_insn (mem, reg);
9591 RTX_FRAME_RELATED_P (insn) = 1;
9594 if (GET_CODE (base) == PLUS)
9595 base = XEXP (base, 0);
9596 gcc_checking_assert (REG_P (base));
9598 /* When saving registers into a re-aligned local stack frame, avoid
9599 any tricky guessing by dwarf2out. */
9600 if (m->fs.realigned)
9602 gcc_checking_assert (stack_realign_drap);
9604 if (regno == REGNO (crtl->drap_reg))
9606 /* A bit of a hack. We force the DRAP register to be saved in
9607 the re-aligned stack frame, which provides us with a copy
9608 of the CFA that will last past the prologue. Install it. */
9609 gcc_checking_assert (cfun->machine->fs.fp_valid);
9610 addr = plus_constant (hard_frame_pointer_rtx,
9611 cfun->machine->fs.fp_offset - cfa_offset);
9612 mem = gen_rtx_MEM (mode, addr);
9613 add_reg_note (insn, REG_CFA_DEF_CFA, mem);
9617 /* The frame pointer is a stable reference within the
9618 aligned frame. Use it. */
9619 gcc_checking_assert (cfun->machine->fs.fp_valid);
9620 addr = plus_constant (hard_frame_pointer_rtx,
9621 cfun->machine->fs.fp_offset - cfa_offset);
9622 mem = gen_rtx_MEM (mode, addr);
9623 add_reg_note (insn, REG_CFA_EXPRESSION,
9624 gen_rtx_SET (VOIDmode, mem, reg));
9628 /* The memory may not be relative to the current CFA register,
9629 which means that we may need to generate a new pattern for
9630 use by the unwind info. */
9631 else if (base != m->fs.cfa_reg)
9633 addr = plus_constant (m->fs.cfa_reg, m->fs.cfa_offset - cfa_offset);
9634 mem = gen_rtx_MEM (mode, addr);
9635 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (VOIDmode, mem, reg));
9639 /* Emit code to save registers using MOV insns.
9640 First register is stored at CFA - CFA_OFFSET. */
9642 ix86_emit_save_regs_using_mov (HOST_WIDE_INT cfa_offset)
9646 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9647 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9649 ix86_emit_save_reg_using_mov (Pmode, regno, cfa_offset);
9650 cfa_offset -= UNITS_PER_WORD;
9654 /* Emit code to save SSE registers using MOV insns.
9655 First register is stored at CFA - CFA_OFFSET. */
9657 ix86_emit_save_sse_regs_using_mov (HOST_WIDE_INT cfa_offset)
9661 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9662 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9664 ix86_emit_save_reg_using_mov (V4SFmode, regno, cfa_offset);
9669 static GTY(()) rtx queued_cfa_restores;
9671 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
9672 manipulation insn. The value is on the stack at CFA - CFA_OFFSET.
9673 Don't add the note if the previously saved value will be left untouched
9674 within stack red-zone till return, as unwinders can find the same value
9675 in the register and on the stack. */
9678 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT cfa_offset)
9680 if (cfa_offset <= cfun->machine->fs.red_zone_offset)
9685 add_reg_note (insn, REG_CFA_RESTORE, reg);
9686 RTX_FRAME_RELATED_P (insn) = 1;
9690 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
9693 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
9696 ix86_add_queued_cfa_restore_notes (rtx insn)
9699 if (!queued_cfa_restores)
9701 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
9703 XEXP (last, 1) = REG_NOTES (insn);
9704 REG_NOTES (insn) = queued_cfa_restores;
9705 queued_cfa_restores = NULL_RTX;
9706 RTX_FRAME_RELATED_P (insn) = 1;
9709 /* Expand prologue or epilogue stack adjustment.
9710 The pattern exist to put a dependency on all ebp-based memory accesses.
9711 STYLE should be negative if instructions should be marked as frame related,
9712 zero if %r11 register is live and cannot be freely used and positive
9716 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
9717 int style, bool set_cfa)
9719 struct machine_function *m = cfun->machine;
9721 bool add_frame_related_expr = false;
9724 insn = gen_pro_epilogue_adjust_stack_si_add (dest, src, offset);
9725 else if (x86_64_immediate_operand (offset, DImode))
9726 insn = gen_pro_epilogue_adjust_stack_di_add (dest, src, offset);
9730 /* r11 is used by indirect sibcall return as well, set before the
9731 epilogue and used after the epilogue. */
9733 tmp = gen_rtx_REG (DImode, R11_REG);
9736 gcc_assert (src != hard_frame_pointer_rtx
9737 && dest != hard_frame_pointer_rtx);
9738 tmp = hard_frame_pointer_rtx;
9740 insn = emit_insn (gen_rtx_SET (DImode, tmp, offset));
9742 add_frame_related_expr = true;
9744 insn = gen_pro_epilogue_adjust_stack_di_add (dest, src, tmp);
9747 insn = emit_insn (insn);
9749 ix86_add_queued_cfa_restore_notes (insn);
9755 gcc_assert (m->fs.cfa_reg == src);
9756 m->fs.cfa_offset += INTVAL (offset);
9757 m->fs.cfa_reg = dest;
9759 r = gen_rtx_PLUS (Pmode, src, offset);
9760 r = gen_rtx_SET (VOIDmode, dest, r);
9761 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9762 RTX_FRAME_RELATED_P (insn) = 1;
9766 RTX_FRAME_RELATED_P (insn) = 1;
9767 if (add_frame_related_expr)
9769 rtx r = gen_rtx_PLUS (Pmode, src, offset);
9770 r = gen_rtx_SET (VOIDmode, dest, r);
9771 add_reg_note (insn, REG_FRAME_RELATED_EXPR, r);
9775 if (dest == stack_pointer_rtx)
9777 HOST_WIDE_INT ooffset = m->fs.sp_offset;
9778 bool valid = m->fs.sp_valid;
9780 if (src == hard_frame_pointer_rtx)
9782 valid = m->fs.fp_valid;
9783 ooffset = m->fs.fp_offset;
9785 else if (src == crtl->drap_reg)
9787 valid = m->fs.drap_valid;
9792 /* Else there are two possibilities: SP itself, which we set
9793 up as the default above. Or EH_RETURN_STACKADJ_RTX, which is
9794 taken care of this by hand along the eh_return path. */
9795 gcc_checking_assert (src == stack_pointer_rtx
9796 || offset == const0_rtx);
9799 m->fs.sp_offset = ooffset - INTVAL (offset);
9800 m->fs.sp_valid = valid;
9804 /* Find an available register to be used as dynamic realign argument
9805 pointer regsiter. Such a register will be written in prologue and
9806 used in begin of body, so it must not be
9807 1. parameter passing register.
9809 We reuse static-chain register if it is available. Otherwise, we
9810 use DI for i386 and R13 for x86-64. We chose R13 since it has
9813 Return: the regno of chosen register. */
9816 find_drap_reg (void)
9818 tree decl = cfun->decl;
9822 /* Use R13 for nested function or function need static chain.
9823 Since function with tail call may use any caller-saved
9824 registers in epilogue, DRAP must not use caller-saved
9825 register in such case. */
9826 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
9833 /* Use DI for nested function or function need static chain.
9834 Since function with tail call may use any caller-saved
9835 registers in epilogue, DRAP must not use caller-saved
9836 register in such case. */
9837 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
9840 /* Reuse static chain register if it isn't used for parameter
9842 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
9843 && !lookup_attribute ("fastcall",
9844 TYPE_ATTRIBUTES (TREE_TYPE (decl)))
9845 && !ix86_is_type_thiscall (TREE_TYPE (decl)))
9852 /* Return minimum incoming stack alignment. */
9855 ix86_minimum_incoming_stack_boundary (bool sibcall)
9857 unsigned int incoming_stack_boundary;
9859 /* Prefer the one specified at command line. */
9860 if (ix86_user_incoming_stack_boundary)
9861 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
9862 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
9863 if -mstackrealign is used, it isn't used for sibcall check and
9864 estimated stack alignment is 128bit. */
9867 && ix86_force_align_arg_pointer
9868 && crtl->stack_alignment_estimated == 128)
9869 incoming_stack_boundary = MIN_STACK_BOUNDARY;
9871 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
9873 /* Incoming stack alignment can be changed on individual functions
9874 via force_align_arg_pointer attribute. We use the smallest
9875 incoming stack boundary. */
9876 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
9877 && lookup_attribute (ix86_force_align_arg_pointer_string,
9878 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
9879 incoming_stack_boundary = MIN_STACK_BOUNDARY;
9881 /* The incoming stack frame has to be aligned at least at
9882 parm_stack_boundary. */
9883 if (incoming_stack_boundary < crtl->parm_stack_boundary)
9884 incoming_stack_boundary = crtl->parm_stack_boundary;
9886 /* Stack at entrance of main is aligned by runtime. We use the
9887 smallest incoming stack boundary. */
9888 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
9889 && DECL_NAME (current_function_decl)
9890 && MAIN_NAME_P (DECL_NAME (current_function_decl))
9891 && DECL_FILE_SCOPE_P (current_function_decl))
9892 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
9894 return incoming_stack_boundary;
9897 /* Update incoming stack boundary and estimated stack alignment. */
9900 ix86_update_stack_boundary (void)
9902 ix86_incoming_stack_boundary
9903 = ix86_minimum_incoming_stack_boundary (false);
9905 /* x86_64 vararg needs 16byte stack alignment for register save
9909 && crtl->stack_alignment_estimated < 128)
9910 crtl->stack_alignment_estimated = 128;
9913 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
9914 needed or an rtx for DRAP otherwise. */
9917 ix86_get_drap_rtx (void)
9919 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
9920 crtl->need_drap = true;
9922 if (stack_realign_drap)
9924 /* Assign DRAP to vDRAP and returns vDRAP */
9925 unsigned int regno = find_drap_reg ();
9930 arg_ptr = gen_rtx_REG (Pmode, regno);
9931 crtl->drap_reg = arg_ptr;
9934 drap_vreg = copy_to_reg (arg_ptr);
9938 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
9941 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
9942 RTX_FRAME_RELATED_P (insn) = 1;
9950 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
9953 ix86_internal_arg_pointer (void)
9955 return virtual_incoming_args_rtx;
9958 struct scratch_reg {
9963 /* Return a short-lived scratch register for use on function entry.
9964 In 32-bit mode, it is valid only after the registers are saved
9965 in the prologue. This register must be released by means of
9966 release_scratch_register_on_entry once it is dead. */
9969 get_scratch_register_on_entry (struct scratch_reg *sr)
9977 /* We always use R11 in 64-bit mode. */
9982 tree decl = current_function_decl, fntype = TREE_TYPE (decl);
9984 = lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)) != NULL_TREE;
9985 bool static_chain_p = DECL_STATIC_CHAIN (decl);
9986 int regparm = ix86_function_regparm (fntype, decl);
9988 = crtl->drap_reg ? REGNO (crtl->drap_reg) : INVALID_REGNUM;
9990 /* 'fastcall' sets regparm to 2, uses ecx/edx for arguments and eax
9991 for the static chain register. */
9992 if ((regparm < 1 || (fastcall_p && !static_chain_p))
9993 && drap_regno != AX_REG)
9995 else if (regparm < 2 && drap_regno != DX_REG)
9997 /* ecx is the static chain register. */
9998 else if (regparm < 3 && !fastcall_p && !static_chain_p
9999 && drap_regno != CX_REG)
10001 else if (ix86_save_reg (BX_REG, true))
10003 /* esi is the static chain register. */
10004 else if (!(regparm == 3 && static_chain_p)
10005 && ix86_save_reg (SI_REG, true))
10007 else if (ix86_save_reg (DI_REG, true))
10011 regno = (drap_regno == AX_REG ? DX_REG : AX_REG);
10016 sr->reg = gen_rtx_REG (Pmode, regno);
10019 rtx insn = emit_insn (gen_push (sr->reg));
10020 RTX_FRAME_RELATED_P (insn) = 1;
10024 /* Release a scratch register obtained from the preceding function. */
10027 release_scratch_register_on_entry (struct scratch_reg *sr)
10031 rtx x, insn = emit_insn (gen_pop (sr->reg));
10033 /* The RTX_FRAME_RELATED_P mechanism doesn't know about pop. */
10034 RTX_FRAME_RELATED_P (insn) = 1;
10035 x = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (UNITS_PER_WORD));
10036 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
10037 add_reg_note (insn, REG_FRAME_RELATED_EXPR, x);
10041 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
10043 /* Emit code to adjust the stack pointer by SIZE bytes while probing it. */
10046 ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
10048 /* We skip the probe for the first interval + a small dope of 4 words and
10049 probe that many bytes past the specified size to maintain a protection
10050 area at the botton of the stack. */
10051 const int dope = 4 * UNITS_PER_WORD;
10052 rtx size_rtx = GEN_INT (size), last;
10054 /* See if we have a constant small number of probes to generate. If so,
10055 that's the easy case. The run-time loop is made up of 11 insns in the
10056 generic case while the compile-time loop is made up of 3+2*(n-1) insns
10057 for n # of intervals. */
10058 if (size <= 5 * PROBE_INTERVAL)
10060 HOST_WIDE_INT i, adjust;
10061 bool first_probe = true;
10063 /* Adjust SP and probe at PROBE_INTERVAL + N * PROBE_INTERVAL for
10064 values of N from 1 until it exceeds SIZE. If only one probe is
10065 needed, this will not generate any code. Then adjust and probe
10066 to PROBE_INTERVAL + SIZE. */
10067 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
10071 adjust = 2 * PROBE_INTERVAL + dope;
10072 first_probe = false;
10075 adjust = PROBE_INTERVAL;
10077 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10078 plus_constant (stack_pointer_rtx, -adjust)));
10079 emit_stack_probe (stack_pointer_rtx);
10083 adjust = size + PROBE_INTERVAL + dope;
10085 adjust = size + PROBE_INTERVAL - i;
10087 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10088 plus_constant (stack_pointer_rtx, -adjust)));
10089 emit_stack_probe (stack_pointer_rtx);
10091 /* Adjust back to account for the additional first interval. */
10092 last = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10093 plus_constant (stack_pointer_rtx,
10094 PROBE_INTERVAL + dope)));
10097 /* Otherwise, do the same as above, but in a loop. Note that we must be
10098 extra careful with variables wrapping around because we might be at
10099 the very top (or the very bottom) of the address space and we have
10100 to be able to handle this case properly; in particular, we use an
10101 equality test for the loop condition. */
10104 HOST_WIDE_INT rounded_size;
10105 struct scratch_reg sr;
10107 get_scratch_register_on_entry (&sr);
10110 /* Step 1: round SIZE to the previous multiple of the interval. */
10112 rounded_size = size & -PROBE_INTERVAL;
10115 /* Step 2: compute initial and final value of the loop counter. */
10117 /* SP = SP_0 + PROBE_INTERVAL. */
10118 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10119 plus_constant (stack_pointer_rtx,
10120 - (PROBE_INTERVAL + dope))));
10122 /* LAST_ADDR = SP_0 + PROBE_INTERVAL + ROUNDED_SIZE. */
10123 emit_move_insn (sr.reg, GEN_INT (-rounded_size));
10124 emit_insn (gen_rtx_SET (VOIDmode, sr.reg,
10125 gen_rtx_PLUS (Pmode, sr.reg,
10126 stack_pointer_rtx)));
10129 /* Step 3: the loop
10131 while (SP != LAST_ADDR)
10133 SP = SP + PROBE_INTERVAL
10137 adjusts SP and probes to PROBE_INTERVAL + N * PROBE_INTERVAL for
10138 values of N from 1 until it is equal to ROUNDED_SIZE. */
10140 emit_insn (ix86_gen_adjust_stack_and_probe (sr.reg, sr.reg, size_rtx));
10143 /* Step 4: adjust SP and probe at PROBE_INTERVAL + SIZE if we cannot
10144 assert at compile-time that SIZE is equal to ROUNDED_SIZE. */
10146 if (size != rounded_size)
10148 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10149 plus_constant (stack_pointer_rtx,
10150 rounded_size - size)));
10151 emit_stack_probe (stack_pointer_rtx);
10154 /* Adjust back to account for the additional first interval. */
10155 last = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10156 plus_constant (stack_pointer_rtx,
10157 PROBE_INTERVAL + dope)));
10159 release_scratch_register_on_entry (&sr);
10162 gcc_assert (cfun->machine->fs.cfa_reg != stack_pointer_rtx);
10164 /* Even if the stack pointer isn't the CFA register, we need to correctly
10165 describe the adjustments made to it, in particular differentiate the
10166 frame-related ones from the frame-unrelated ones. */
10169 rtx expr = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (2));
10170 XVECEXP (expr, 0, 0)
10171 = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10172 plus_constant (stack_pointer_rtx, -size));
10173 XVECEXP (expr, 0, 1)
10174 = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10175 plus_constant (stack_pointer_rtx,
10176 PROBE_INTERVAL + dope + size));
10177 add_reg_note (last, REG_FRAME_RELATED_EXPR, expr);
10178 RTX_FRAME_RELATED_P (last) = 1;
10180 cfun->machine->fs.sp_offset += size;
10183 /* Make sure nothing is scheduled before we are done. */
10184 emit_insn (gen_blockage ());
10187 /* Adjust the stack pointer up to REG while probing it. */
10190 output_adjust_stack_and_probe (rtx reg)
10192 static int labelno = 0;
10193 char loop_lab[32], end_lab[32];
10196 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
10197 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
10199 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
10201 /* Jump to END_LAB if SP == LAST_ADDR. */
10202 xops[0] = stack_pointer_rtx;
10204 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops);
10205 fputs ("\tje\t", asm_out_file);
10206 assemble_name_raw (asm_out_file, end_lab);
10207 fputc ('\n', asm_out_file);
10209 /* SP = SP + PROBE_INTERVAL. */
10210 xops[1] = GEN_INT (PROBE_INTERVAL);
10211 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
10214 xops[1] = const0_rtx;
10215 output_asm_insn ("or%z0\t{%1, (%0)|DWORD PTR [%0], %1}", xops);
10217 fprintf (asm_out_file, "\tjmp\t");
10218 assemble_name_raw (asm_out_file, loop_lab);
10219 fputc ('\n', asm_out_file);
10221 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
10226 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
10227 inclusive. These are offsets from the current stack pointer. */
10230 ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
10232 /* See if we have a constant small number of probes to generate. If so,
10233 that's the easy case. The run-time loop is made up of 7 insns in the
10234 generic case while the compile-time loop is made up of n insns for n #
10236 if (size <= 7 * PROBE_INTERVAL)
10240 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
10241 it exceeds SIZE. If only one probe is needed, this will not
10242 generate any code. Then probe at FIRST + SIZE. */
10243 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
10244 emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + i)));
10246 emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + size)));
10249 /* Otherwise, do the same as above, but in a loop. Note that we must be
10250 extra careful with variables wrapping around because we might be at
10251 the very top (or the very bottom) of the address space and we have
10252 to be able to handle this case properly; in particular, we use an
10253 equality test for the loop condition. */
10256 HOST_WIDE_INT rounded_size, last;
10257 struct scratch_reg sr;
10259 get_scratch_register_on_entry (&sr);
10262 /* Step 1: round SIZE to the previous multiple of the interval. */
10264 rounded_size = size & -PROBE_INTERVAL;
10267 /* Step 2: compute initial and final value of the loop counter. */
10269 /* TEST_OFFSET = FIRST. */
10270 emit_move_insn (sr.reg, GEN_INT (-first));
10272 /* LAST_OFFSET = FIRST + ROUNDED_SIZE. */
10273 last = first + rounded_size;
10276 /* Step 3: the loop
10278 while (TEST_ADDR != LAST_ADDR)
10280 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
10284 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
10285 until it is equal to ROUNDED_SIZE. */
10287 emit_insn (ix86_gen_probe_stack_range (sr.reg, sr.reg, GEN_INT (-last)));
10290 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
10291 that SIZE is equal to ROUNDED_SIZE. */
10293 if (size != rounded_size)
10294 emit_stack_probe (plus_constant (gen_rtx_PLUS (Pmode,
10297 rounded_size - size));
10299 release_scratch_register_on_entry (&sr);
10302 /* Make sure nothing is scheduled before we are done. */
10303 emit_insn (gen_blockage ());
10306 /* Probe a range of stack addresses from REG to END, inclusive. These are
10307 offsets from the current stack pointer. */
10310 output_probe_stack_range (rtx reg, rtx end)
10312 static int labelno = 0;
10313 char loop_lab[32], end_lab[32];
10316 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
10317 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
10319 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
10321 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
10324 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops);
10325 fputs ("\tje\t", asm_out_file);
10326 assemble_name_raw (asm_out_file, end_lab);
10327 fputc ('\n', asm_out_file);
10329 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
10330 xops[1] = GEN_INT (PROBE_INTERVAL);
10331 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
10333 /* Probe at TEST_ADDR. */
10334 xops[0] = stack_pointer_rtx;
10336 xops[2] = const0_rtx;
10337 output_asm_insn ("or%z0\t{%2, (%0,%1)|DWORD PTR [%0+%1], %2}", xops);
10339 fprintf (asm_out_file, "\tjmp\t");
10340 assemble_name_raw (asm_out_file, loop_lab);
10341 fputc ('\n', asm_out_file);
10343 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
10348 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
10349 to be generated in correct form. */
10351 ix86_finalize_stack_realign_flags (void)
10353 /* Check if stack realign is really needed after reload, and
10354 stores result in cfun */
10355 unsigned int incoming_stack_boundary
10356 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
10357 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
10358 unsigned int stack_realign = (incoming_stack_boundary
10359 < (current_function_is_leaf
10360 ? crtl->max_used_stack_slot_alignment
10361 : crtl->stack_alignment_needed));
10363 if (crtl->stack_realign_finalized)
10365 /* After stack_realign_needed is finalized, we can't no longer
10367 gcc_assert (crtl->stack_realign_needed == stack_realign);
10371 crtl->stack_realign_needed = stack_realign;
10372 crtl->stack_realign_finalized = true;
10376 /* Expand the prologue into a bunch of separate insns. */
10379 ix86_expand_prologue (void)
10381 struct machine_function *m = cfun->machine;
10384 struct ix86_frame frame;
10385 HOST_WIDE_INT allocate;
10386 bool int_registers_saved;
10388 ix86_finalize_stack_realign_flags ();
10390 /* DRAP should not coexist with stack_realign_fp */
10391 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
10393 memset (&m->fs, 0, sizeof (m->fs));
10395 /* Initialize CFA state for before the prologue. */
10396 m->fs.cfa_reg = stack_pointer_rtx;
10397 m->fs.cfa_offset = INCOMING_FRAME_SP_OFFSET;
10399 /* Track SP offset to the CFA. We continue tracking this after we've
10400 swapped the CFA register away from SP. In the case of re-alignment
10401 this is fudged; we're interested to offsets within the local frame. */
10402 m->fs.sp_offset = INCOMING_FRAME_SP_OFFSET;
10403 m->fs.sp_valid = true;
10405 ix86_compute_frame_layout (&frame);
10407 if (!TARGET_64BIT && ix86_function_ms_hook_prologue (current_function_decl))
10409 /* We should have already generated an error for any use of
10410 ms_hook on a nested function. */
10411 gcc_checking_assert (!ix86_static_chain_on_stack);
10413 /* Check if profiling is active and we shall use profiling before
10414 prologue variant. If so sorry. */
10415 if (crtl->profile && flag_fentry != 0)
10416 sorry ("ms_hook_prologue attribute isn%'t compatible "
10417 "with -mfentry for 32-bit");
10419 /* In ix86_asm_output_function_label we emitted:
10420 8b ff movl.s %edi,%edi
10422 8b ec movl.s %esp,%ebp
10424 This matches the hookable function prologue in Win32 API
10425 functions in Microsoft Windows XP Service Pack 2 and newer.
10426 Wine uses this to enable Windows apps to hook the Win32 API
10427 functions provided by Wine.
10429 What that means is that we've already set up the frame pointer. */
10431 if (frame_pointer_needed
10432 && !(crtl->drap_reg && crtl->stack_realign_needed))
10436 /* We've decided to use the frame pointer already set up.
10437 Describe this to the unwinder by pretending that both
10438 push and mov insns happen right here.
10440 Putting the unwind info here at the end of the ms_hook
10441 is done so that we can make absolutely certain we get
10442 the required byte sequence at the start of the function,
10443 rather than relying on an assembler that can produce
10444 the exact encoding required.
10446 However it does mean (in the unpatched case) that we have
10447 a 1 insn window where the asynchronous unwind info is
10448 incorrect. However, if we placed the unwind info at
10449 its correct location we would have incorrect unwind info
10450 in the patched case. Which is probably all moot since
10451 I don't expect Wine generates dwarf2 unwind info for the
10452 system libraries that use this feature. */
10454 insn = emit_insn (gen_blockage ());
10456 push = gen_push (hard_frame_pointer_rtx);
10457 mov = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
10458 stack_pointer_rtx);
10459 RTX_FRAME_RELATED_P (push) = 1;
10460 RTX_FRAME_RELATED_P (mov) = 1;
10462 RTX_FRAME_RELATED_P (insn) = 1;
10463 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10464 gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, push, mov)));
10466 /* Note that gen_push incremented m->fs.cfa_offset, even
10467 though we didn't emit the push insn here. */
10468 m->fs.cfa_reg = hard_frame_pointer_rtx;
10469 m->fs.fp_offset = m->fs.cfa_offset;
10470 m->fs.fp_valid = true;
10474 /* The frame pointer is not needed so pop %ebp again.
10475 This leaves us with a pristine state. */
10476 emit_insn (gen_pop (hard_frame_pointer_rtx));
10480 /* The first insn of a function that accepts its static chain on the
10481 stack is to push the register that would be filled in by a direct
10482 call. This insn will be skipped by the trampoline. */
10483 else if (ix86_static_chain_on_stack)
10485 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
10486 emit_insn (gen_blockage ());
10488 /* We don't want to interpret this push insn as a register save,
10489 only as a stack adjustment. The real copy of the register as
10490 a save will be done later, if needed. */
10491 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
10492 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
10493 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
10494 RTX_FRAME_RELATED_P (insn) = 1;
10497 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
10498 of DRAP is needed and stack realignment is really needed after reload */
10499 if (stack_realign_drap)
10501 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
10503 /* Only need to push parameter pointer reg if it is caller saved. */
10504 if (!call_used_regs[REGNO (crtl->drap_reg)])
10506 /* Push arg pointer reg */
10507 insn = emit_insn (gen_push (crtl->drap_reg));
10508 RTX_FRAME_RELATED_P (insn) = 1;
10511 /* Grab the argument pointer. */
10512 t = plus_constant (stack_pointer_rtx, m->fs.sp_offset);
10513 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, t));
10514 RTX_FRAME_RELATED_P (insn) = 1;
10515 m->fs.cfa_reg = crtl->drap_reg;
10516 m->fs.cfa_offset = 0;
10518 /* Align the stack. */
10519 insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
10521 GEN_INT (-align_bytes)));
10522 RTX_FRAME_RELATED_P (insn) = 1;
10524 /* Replicate the return address on the stack so that return
10525 address can be reached via (argp - 1) slot. This is needed
10526 to implement macro RETURN_ADDR_RTX and intrinsic function
10527 expand_builtin_return_addr etc. */
10528 t = plus_constant (crtl->drap_reg, -UNITS_PER_WORD);
10529 t = gen_frame_mem (Pmode, t);
10530 insn = emit_insn (gen_push (t));
10531 RTX_FRAME_RELATED_P (insn) = 1;
10533 /* For the purposes of frame and register save area addressing,
10534 we've started over with a new frame. */
10535 m->fs.sp_offset = INCOMING_FRAME_SP_OFFSET;
10536 m->fs.realigned = true;
10539 if (frame_pointer_needed && !m->fs.fp_valid)
10541 /* Note: AT&T enter does NOT have reversed args. Enter is probably
10542 slower on all targets. Also sdb doesn't like it. */
10543 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
10544 RTX_FRAME_RELATED_P (insn) = 1;
10546 if (m->fs.sp_offset == frame.hard_frame_pointer_offset)
10548 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
10549 RTX_FRAME_RELATED_P (insn) = 1;
10551 if (m->fs.cfa_reg == stack_pointer_rtx)
10552 m->fs.cfa_reg = hard_frame_pointer_rtx;
10553 m->fs.fp_offset = m->fs.sp_offset;
10554 m->fs.fp_valid = true;
10558 int_registers_saved = (frame.nregs == 0);
10560 if (!int_registers_saved)
10562 /* If saving registers via PUSH, do so now. */
10563 if (!frame.save_regs_using_mov)
10565 ix86_emit_save_regs ();
10566 int_registers_saved = true;
10567 gcc_assert (m->fs.sp_offset == frame.reg_save_offset);
10570 /* When using red zone we may start register saving before allocating
10571 the stack frame saving one cycle of the prologue. However, avoid
10572 doing this if we have to probe the stack; at least on x86_64 the
10573 stack probe can turn into a call that clobbers a red zone location. */
10574 else if (ix86_using_red_zone ()
10575 && (! TARGET_STACK_PROBE
10576 || frame.stack_pointer_offset < CHECK_STACK_LIMIT))
10578 ix86_emit_save_regs_using_mov (frame.reg_save_offset);
10579 int_registers_saved = true;
10583 if (stack_realign_fp)
10585 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
10586 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
10588 /* The computation of the size of the re-aligned stack frame means
10589 that we must allocate the size of the register save area before
10590 performing the actual alignment. Otherwise we cannot guarantee
10591 that there's enough storage above the realignment point. */
10592 if (m->fs.sp_offset != frame.sse_reg_save_offset)
10593 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10594 GEN_INT (m->fs.sp_offset
10595 - frame.sse_reg_save_offset),
10598 /* Align the stack. */
10599 insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
10601 GEN_INT (-align_bytes)));
10603 /* For the purposes of register save area addressing, the stack
10604 pointer is no longer valid. As for the value of sp_offset,
10605 see ix86_compute_frame_layout, which we need to match in order
10606 to pass verification of stack_pointer_offset at the end. */
10607 m->fs.sp_offset = (m->fs.sp_offset + align_bytes) & -align_bytes;
10608 m->fs.sp_valid = false;
10611 allocate = frame.stack_pointer_offset - m->fs.sp_offset;
10613 if (flag_stack_usage)
10615 /* We start to count from ARG_POINTER. */
10616 HOST_WIDE_INT stack_size = frame.stack_pointer_offset;
10618 /* If it was realigned, take into account the fake frame. */
10619 if (stack_realign_drap)
10621 if (ix86_static_chain_on_stack)
10622 stack_size += UNITS_PER_WORD;
10624 if (!call_used_regs[REGNO (crtl->drap_reg)])
10625 stack_size += UNITS_PER_WORD;
10627 /* This over-estimates by 1 minimal-stack-alignment-unit but
10628 mitigates that by counting in the new return address slot. */
10629 current_function_dynamic_stack_size
10630 += crtl->stack_alignment_needed / BITS_PER_UNIT;
10633 current_function_static_stack_size = stack_size;
10636 /* The stack has already been decremented by the instruction calling us
10637 so we need to probe unconditionally to preserve the protection area. */
10638 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
10640 /* We expect the registers to be saved when probes are used. */
10641 gcc_assert (int_registers_saved);
10643 if (STACK_CHECK_MOVING_SP)
10645 ix86_adjust_stack_and_probe (allocate);
10650 HOST_WIDE_INT size = allocate;
10652 if (TARGET_64BIT && size >= (HOST_WIDE_INT) 0x80000000)
10653 size = 0x80000000 - STACK_CHECK_PROTECT - 1;
10655 if (TARGET_STACK_PROBE)
10656 ix86_emit_probe_stack_range (0, size + STACK_CHECK_PROTECT);
10658 ix86_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
10664 else if (!ix86_target_stack_probe ()
10665 || frame.stack_pointer_offset < CHECK_STACK_LIMIT)
10667 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10668 GEN_INT (-allocate), -1,
10669 m->fs.cfa_reg == stack_pointer_rtx);
10673 rtx eax = gen_rtx_REG (Pmode, AX_REG);
10675 rtx (*adjust_stack_insn)(rtx, rtx, rtx);
10677 bool eax_live = false;
10678 bool r10_live = false;
10681 r10_live = (DECL_STATIC_CHAIN (current_function_decl) != 0);
10682 if (!TARGET_64BIT_MS_ABI)
10683 eax_live = ix86_eax_live_at_start_p ();
10687 emit_insn (gen_push (eax));
10688 allocate -= UNITS_PER_WORD;
10692 r10 = gen_rtx_REG (Pmode, R10_REG);
10693 emit_insn (gen_push (r10));
10694 allocate -= UNITS_PER_WORD;
10697 emit_move_insn (eax, GEN_INT (allocate));
10698 emit_insn (ix86_gen_allocate_stack_worker (eax, eax));
10700 /* Use the fact that AX still contains ALLOCATE. */
10701 adjust_stack_insn = (TARGET_64BIT
10702 ? gen_pro_epilogue_adjust_stack_di_sub
10703 : gen_pro_epilogue_adjust_stack_si_sub);
10705 insn = emit_insn (adjust_stack_insn (stack_pointer_rtx,
10706 stack_pointer_rtx, eax));
10708 /* Note that SEH directives need to continue tracking the stack
10709 pointer even after the frame pointer has been set up. */
10710 if (m->fs.cfa_reg == stack_pointer_rtx || TARGET_SEH)
10712 if (m->fs.cfa_reg == stack_pointer_rtx)
10713 m->fs.cfa_offset += allocate;
10715 RTX_FRAME_RELATED_P (insn) = 1;
10716 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10717 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10718 plus_constant (stack_pointer_rtx,
10721 m->fs.sp_offset += allocate;
10723 if (r10_live && eax_live)
10725 t = choose_baseaddr (m->fs.sp_offset - allocate);
10726 emit_move_insn (r10, gen_frame_mem (Pmode, t));
10727 t = choose_baseaddr (m->fs.sp_offset - allocate - UNITS_PER_WORD);
10728 emit_move_insn (eax, gen_frame_mem (Pmode, t));
10730 else if (eax_live || r10_live)
10732 t = choose_baseaddr (m->fs.sp_offset - allocate);
10733 emit_move_insn ((eax_live ? eax : r10), gen_frame_mem (Pmode, t));
10736 gcc_assert (m->fs.sp_offset == frame.stack_pointer_offset);
10738 /* If we havn't already set up the frame pointer, do so now. */
10739 if (frame_pointer_needed && !m->fs.fp_valid)
10741 insn = ix86_gen_add3 (hard_frame_pointer_rtx, stack_pointer_rtx,
10742 GEN_INT (frame.stack_pointer_offset
10743 - frame.hard_frame_pointer_offset));
10744 insn = emit_insn (insn);
10745 RTX_FRAME_RELATED_P (insn) = 1;
10746 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);
10748 if (m->fs.cfa_reg == stack_pointer_rtx)
10749 m->fs.cfa_reg = hard_frame_pointer_rtx;
10750 m->fs.fp_offset = frame.hard_frame_pointer_offset;
10751 m->fs.fp_valid = true;
10754 if (!int_registers_saved)
10755 ix86_emit_save_regs_using_mov (frame.reg_save_offset);
10756 if (frame.nsseregs)
10757 ix86_emit_save_sse_regs_using_mov (frame.sse_reg_save_offset);
10759 pic_reg_used = false;
10760 if (pic_offset_table_rtx
10761 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
10764 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
10766 if (alt_pic_reg_used != INVALID_REGNUM)
10767 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
10769 pic_reg_used = true;
10776 if (ix86_cmodel == CM_LARGE_PIC)
10778 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
10779 rtx label = gen_label_rtx ();
10780 emit_label (label);
10781 LABEL_PRESERVE_P (label) = 1;
10782 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
10783 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
10784 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
10785 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
10786 pic_offset_table_rtx, tmp_reg));
10789 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
10792 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
10795 /* In the pic_reg_used case, make sure that the got load isn't deleted
10796 when mcount needs it. Blockage to avoid call movement across mcount
10797 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
10799 if (crtl->profile && !flag_fentry && pic_reg_used)
10800 emit_insn (gen_prologue_use (pic_offset_table_rtx));
10802 if (crtl->drap_reg && !crtl->stack_realign_needed)
10804 /* vDRAP is setup but after reload it turns out stack realign
10805 isn't necessary, here we will emit prologue to setup DRAP
10806 without stack realign adjustment */
10807 t = choose_baseaddr (0);
10808 emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, t));
10811 /* Prevent instructions from being scheduled into register save push
10812 sequence when access to the redzone area is done through frame pointer.
10813 The offset between the frame pointer and the stack pointer is calculated
10814 relative to the value of the stack pointer at the end of the function
10815 prologue, and moving instructions that access redzone area via frame
10816 pointer inside push sequence violates this assumption. */
10817 if (frame_pointer_needed && frame.red_zone_size)
10818 emit_insn (gen_memory_blockage ());
10820 /* Emit cld instruction if stringops are used in the function. */
10821 if (TARGET_CLD && ix86_current_function_needs_cld)
10822 emit_insn (gen_cld ());
10824 /* SEH requires that the prologue end within 256 bytes of the start of
10825 the function. Prevent instruction schedules that would extend that. */
10827 emit_insn (gen_blockage ());
10830 /* Emit code to restore REG using a POP insn. */
10833 ix86_emit_restore_reg_using_pop (rtx reg)
10835 struct machine_function *m = cfun->machine;
10836 rtx insn = emit_insn (gen_pop (reg));
10838 ix86_add_cfa_restore_note (insn, reg, m->fs.sp_offset);
10839 m->fs.sp_offset -= UNITS_PER_WORD;
10841 if (m->fs.cfa_reg == crtl->drap_reg
10842 && REGNO (reg) == REGNO (crtl->drap_reg))
10844 /* Previously we'd represented the CFA as an expression
10845 like *(%ebp - 8). We've just popped that value from
10846 the stack, which means we need to reset the CFA to
10847 the drap register. This will remain until we restore
10848 the stack pointer. */
10849 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
10850 RTX_FRAME_RELATED_P (insn) = 1;
10852 /* This means that the DRAP register is valid for addressing too. */
10853 m->fs.drap_valid = true;
10857 if (m->fs.cfa_reg == stack_pointer_rtx)
10859 rtx x = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
10860 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
10861 add_reg_note (insn, REG_CFA_ADJUST_CFA, x);
10862 RTX_FRAME_RELATED_P (insn) = 1;
10864 m->fs.cfa_offset -= UNITS_PER_WORD;
10867 /* When the frame pointer is the CFA, and we pop it, we are
10868 swapping back to the stack pointer as the CFA. This happens
10869 for stack frames that don't allocate other data, so we assume
10870 the stack pointer is now pointing at the return address, i.e.
10871 the function entry state, which makes the offset be 1 word. */
10872 if (reg == hard_frame_pointer_rtx)
10874 m->fs.fp_valid = false;
10875 if (m->fs.cfa_reg == hard_frame_pointer_rtx)
10877 m->fs.cfa_reg = stack_pointer_rtx;
10878 m->fs.cfa_offset -= UNITS_PER_WORD;
10880 add_reg_note (insn, REG_CFA_DEF_CFA,
10881 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10882 GEN_INT (m->fs.cfa_offset)));
10883 RTX_FRAME_RELATED_P (insn) = 1;
10888 /* Emit code to restore saved registers using POP insns. */
10891 ix86_emit_restore_regs_using_pop (void)
10893 unsigned int regno;
10895 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10896 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
10897 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno));
10900 /* Emit code and notes for the LEAVE instruction. */
10903 ix86_emit_leave (void)
10905 struct machine_function *m = cfun->machine;
10906 rtx insn = emit_insn (ix86_gen_leave ());
10908 ix86_add_queued_cfa_restore_notes (insn);
10910 gcc_assert (m->fs.fp_valid);
10911 m->fs.sp_valid = true;
10912 m->fs.sp_offset = m->fs.fp_offset - UNITS_PER_WORD;
10913 m->fs.fp_valid = false;
10915 if (m->fs.cfa_reg == hard_frame_pointer_rtx)
10917 m->fs.cfa_reg = stack_pointer_rtx;
10918 m->fs.cfa_offset = m->fs.sp_offset;
10920 add_reg_note (insn, REG_CFA_DEF_CFA,
10921 plus_constant (stack_pointer_rtx, m->fs.sp_offset));
10922 RTX_FRAME_RELATED_P (insn) = 1;
10923 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx,
10928 /* Emit code to restore saved registers using MOV insns.
10929 First register is restored from CFA - CFA_OFFSET. */
10931 ix86_emit_restore_regs_using_mov (HOST_WIDE_INT cfa_offset,
10932 int maybe_eh_return)
10934 struct machine_function *m = cfun->machine;
10935 unsigned int regno;
10937 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10938 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
10940 rtx reg = gen_rtx_REG (Pmode, regno);
10943 mem = choose_baseaddr (cfa_offset);
10944 mem = gen_frame_mem (Pmode, mem);
10945 insn = emit_move_insn (reg, mem);
10947 if (m->fs.cfa_reg == crtl->drap_reg && regno == REGNO (crtl->drap_reg))
10949 /* Previously we'd represented the CFA as an expression
10950 like *(%ebp - 8). We've just popped that value from
10951 the stack, which means we need to reset the CFA to
10952 the drap register. This will remain until we restore
10953 the stack pointer. */
10954 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
10955 RTX_FRAME_RELATED_P (insn) = 1;
10957 /* This means that the DRAP register is valid for addressing. */
10958 m->fs.drap_valid = true;
10961 ix86_add_cfa_restore_note (NULL_RTX, reg, cfa_offset);
10963 cfa_offset -= UNITS_PER_WORD;
10967 /* Emit code to restore saved registers using MOV insns.
10968 First register is restored from CFA - CFA_OFFSET. */
10970 ix86_emit_restore_sse_regs_using_mov (HOST_WIDE_INT cfa_offset,
10971 int maybe_eh_return)
10973 unsigned int regno;
10975 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10976 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
10978 rtx reg = gen_rtx_REG (V4SFmode, regno);
10981 mem = choose_baseaddr (cfa_offset);
10982 mem = gen_rtx_MEM (V4SFmode, mem);
10983 set_mem_align (mem, 128);
10984 emit_move_insn (reg, mem);
10986 ix86_add_cfa_restore_note (NULL_RTX, reg, cfa_offset);
10992 /* Restore function stack, frame, and registers. */
10995 ix86_expand_epilogue (int style)
10997 struct machine_function *m = cfun->machine;
10998 struct machine_frame_state frame_state_save = m->fs;
10999 struct ix86_frame frame;
11000 bool restore_regs_via_mov;
11003 ix86_finalize_stack_realign_flags ();
11004 ix86_compute_frame_layout (&frame);
11006 m->fs.sp_valid = (!frame_pointer_needed
11007 || (current_function_sp_is_unchanging
11008 && !stack_realign_fp));
11009 gcc_assert (!m->fs.sp_valid
11010 || m->fs.sp_offset == frame.stack_pointer_offset);
11012 /* The FP must be valid if the frame pointer is present. */
11013 gcc_assert (frame_pointer_needed == m->fs.fp_valid);
11014 gcc_assert (!m->fs.fp_valid
11015 || m->fs.fp_offset == frame.hard_frame_pointer_offset);
11017 /* We must have *some* valid pointer to the stack frame. */
11018 gcc_assert (m->fs.sp_valid || m->fs.fp_valid);
11020 /* The DRAP is never valid at this point. */
11021 gcc_assert (!m->fs.drap_valid);
11023 /* See the comment about red zone and frame
11024 pointer usage in ix86_expand_prologue. */
11025 if (frame_pointer_needed && frame.red_zone_size)
11026 emit_insn (gen_memory_blockage ());
11028 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
11029 gcc_assert (!using_drap || m->fs.cfa_reg == crtl->drap_reg);
11031 /* Determine the CFA offset of the end of the red-zone. */
11032 m->fs.red_zone_offset = 0;
11033 if (ix86_using_red_zone () && crtl->args.pops_args < 65536)
11035 /* The red-zone begins below the return address. */
11036 m->fs.red_zone_offset = RED_ZONE_SIZE + UNITS_PER_WORD;
11038 /* When the register save area is in the aligned portion of
11039 the stack, determine the maximum runtime displacement that
11040 matches up with the aligned frame. */
11041 if (stack_realign_drap)
11042 m->fs.red_zone_offset -= (crtl->stack_alignment_needed / BITS_PER_UNIT
11046 /* Special care must be taken for the normal return case of a function
11047 using eh_return: the eax and edx registers are marked as saved, but
11048 not restored along this path. Adjust the save location to match. */
11049 if (crtl->calls_eh_return && style != 2)
11050 frame.reg_save_offset -= 2 * UNITS_PER_WORD;
11052 /* EH_RETURN requires the use of moves to function properly. */
11053 if (crtl->calls_eh_return)
11054 restore_regs_via_mov = true;
11055 /* SEH requires the use of pops to identify the epilogue. */
11056 else if (TARGET_SEH)
11057 restore_regs_via_mov = false;
11058 /* If we're only restoring one register and sp is not valid then
11059 using a move instruction to restore the register since it's
11060 less work than reloading sp and popping the register. */
11061 else if (!m->fs.sp_valid && frame.nregs <= 1)
11062 restore_regs_via_mov = true;
11063 else if (TARGET_EPILOGUE_USING_MOVE
11064 && cfun->machine->use_fast_prologue_epilogue
11065 && (frame.nregs > 1
11066 || m->fs.sp_offset != frame.reg_save_offset))
11067 restore_regs_via_mov = true;
11068 else if (frame_pointer_needed
11070 && m->fs.sp_offset != frame.reg_save_offset)
11071 restore_regs_via_mov = true;
11072 else if (frame_pointer_needed
11073 && TARGET_USE_LEAVE
11074 && cfun->machine->use_fast_prologue_epilogue
11075 && frame.nregs == 1)
11076 restore_regs_via_mov = true;
11078 restore_regs_via_mov = false;
11080 if (restore_regs_via_mov || frame.nsseregs)
11082 /* Ensure that the entire register save area is addressable via
11083 the stack pointer, if we will restore via sp. */
11085 && m->fs.sp_offset > 0x7fffffff
11086 && !(m->fs.fp_valid || m->fs.drap_valid)
11087 && (frame.nsseregs + frame.nregs) != 0)
11089 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
11090 GEN_INT (m->fs.sp_offset
11091 - frame.sse_reg_save_offset),
11093 m->fs.cfa_reg == stack_pointer_rtx);
11097 /* If there are any SSE registers to restore, then we have to do it
11098 via moves, since there's obviously no pop for SSE regs. */
11099 if (frame.nsseregs)
11100 ix86_emit_restore_sse_regs_using_mov (frame.sse_reg_save_offset,
11103 if (restore_regs_via_mov)
11108 ix86_emit_restore_regs_using_mov (frame.reg_save_offset, style == 2);
11110 /* eh_return epilogues need %ecx added to the stack pointer. */
11113 rtx insn, sa = EH_RETURN_STACKADJ_RTX;
11115 /* Stack align doesn't work with eh_return. */
11116 gcc_assert (!stack_realign_drap);
11117 /* Neither does regparm nested functions. */
11118 gcc_assert (!ix86_static_chain_on_stack);
11120 if (frame_pointer_needed)
11122 t = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
11123 t = plus_constant (t, m->fs.fp_offset - UNITS_PER_WORD);
11124 emit_insn (gen_rtx_SET (VOIDmode, sa, t));
11126 t = gen_frame_mem (Pmode, hard_frame_pointer_rtx);
11127 insn = emit_move_insn (hard_frame_pointer_rtx, t);
11129 /* Note that we use SA as a temporary CFA, as the return
11130 address is at the proper place relative to it. We
11131 pretend this happens at the FP restore insn because
11132 prior to this insn the FP would be stored at the wrong
11133 offset relative to SA, and after this insn we have no
11134 other reasonable register to use for the CFA. We don't
11135 bother resetting the CFA to the SP for the duration of
11136 the return insn. */
11137 add_reg_note (insn, REG_CFA_DEF_CFA,
11138 plus_constant (sa, UNITS_PER_WORD));
11139 ix86_add_queued_cfa_restore_notes (insn);
11140 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
11141 RTX_FRAME_RELATED_P (insn) = 1;
11143 m->fs.cfa_reg = sa;
11144 m->fs.cfa_offset = UNITS_PER_WORD;
11145 m->fs.fp_valid = false;
11147 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
11148 const0_rtx, style, false);
11152 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
11153 t = plus_constant (t, m->fs.sp_offset - UNITS_PER_WORD);
11154 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, t));
11155 ix86_add_queued_cfa_restore_notes (insn);
11157 gcc_assert (m->fs.cfa_reg == stack_pointer_rtx);
11158 if (m->fs.cfa_offset != UNITS_PER_WORD)
11160 m->fs.cfa_offset = UNITS_PER_WORD;
11161 add_reg_note (insn, REG_CFA_DEF_CFA,
11162 plus_constant (stack_pointer_rtx,
11164 RTX_FRAME_RELATED_P (insn) = 1;
11167 m->fs.sp_offset = UNITS_PER_WORD;
11168 m->fs.sp_valid = true;
11173 /* SEH requires that the function end with (1) a stack adjustment
11174 if necessary, (2) a sequence of pops, and (3) a return or
11175 jump instruction. Prevent insns from the function body from
11176 being scheduled into this sequence. */
11179 /* Prevent a catch region from being adjacent to the standard
11180 epilogue sequence. Unfortuantely crtl->uses_eh_lsda nor
11181 several other flags that would be interesting to test are
11183 if (flag_non_call_exceptions)
11184 emit_insn (gen_nops (const1_rtx));
11186 emit_insn (gen_blockage ());
11189 /* First step is to deallocate the stack frame so that we can
11190 pop the registers. */
11191 if (!m->fs.sp_valid)
11193 pro_epilogue_adjust_stack (stack_pointer_rtx, hard_frame_pointer_rtx,
11194 GEN_INT (m->fs.fp_offset
11195 - frame.reg_save_offset),
11198 else if (m->fs.sp_offset != frame.reg_save_offset)
11200 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
11201 GEN_INT (m->fs.sp_offset
11202 - frame.reg_save_offset),
11204 m->fs.cfa_reg == stack_pointer_rtx);
11207 ix86_emit_restore_regs_using_pop ();
11210 /* If we used a stack pointer and haven't already got rid of it,
11212 if (m->fs.fp_valid)
11214 /* If the stack pointer is valid and pointing at the frame
11215 pointer store address, then we only need a pop. */
11216 if (m->fs.sp_valid && m->fs.sp_offset == frame.hfp_save_offset)
11217 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx);
11218 /* Leave results in shorter dependency chains on CPUs that are
11219 able to grok it fast. */
11220 else if (TARGET_USE_LEAVE
11221 || optimize_function_for_size_p (cfun)
11222 || !cfun->machine->use_fast_prologue_epilogue)
11223 ix86_emit_leave ();
11226 pro_epilogue_adjust_stack (stack_pointer_rtx,
11227 hard_frame_pointer_rtx,
11228 const0_rtx, style, !using_drap);
11229 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx);
11235 int param_ptr_offset = UNITS_PER_WORD;
11238 gcc_assert (stack_realign_drap);
11240 if (ix86_static_chain_on_stack)
11241 param_ptr_offset += UNITS_PER_WORD;
11242 if (!call_used_regs[REGNO (crtl->drap_reg)])
11243 param_ptr_offset += UNITS_PER_WORD;
11245 insn = emit_insn (gen_rtx_SET
11246 (VOIDmode, stack_pointer_rtx,
11247 gen_rtx_PLUS (Pmode,
11249 GEN_INT (-param_ptr_offset))));
11250 m->fs.cfa_reg = stack_pointer_rtx;
11251 m->fs.cfa_offset = param_ptr_offset;
11252 m->fs.sp_offset = param_ptr_offset;
11253 m->fs.realigned = false;
11255 add_reg_note (insn, REG_CFA_DEF_CFA,
11256 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11257 GEN_INT (param_ptr_offset)));
11258 RTX_FRAME_RELATED_P (insn) = 1;
11260 if (!call_used_regs[REGNO (crtl->drap_reg)])
11261 ix86_emit_restore_reg_using_pop (crtl->drap_reg);
11264 /* At this point the stack pointer must be valid, and we must have
11265 restored all of the registers. We may not have deallocated the
11266 entire stack frame. We've delayed this until now because it may
11267 be possible to merge the local stack deallocation with the
11268 deallocation forced by ix86_static_chain_on_stack. */
11269 gcc_assert (m->fs.sp_valid);
11270 gcc_assert (!m->fs.fp_valid);
11271 gcc_assert (!m->fs.realigned);
11272 if (m->fs.sp_offset != UNITS_PER_WORD)
11274 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
11275 GEN_INT (m->fs.sp_offset - UNITS_PER_WORD),
11279 /* Sibcall epilogues don't want a return instruction. */
11282 m->fs = frame_state_save;
11286 /* Emit vzeroupper if needed. */
11287 if (TARGET_VZEROUPPER
11288 && !TREE_THIS_VOLATILE (cfun->decl)
11289 && !cfun->machine->caller_return_avx256_p)
11290 emit_insn (gen_avx_vzeroupper (GEN_INT (call_no_avx256)));
11292 if (crtl->args.pops_args && crtl->args.size)
11294 rtx popc = GEN_INT (crtl->args.pops_args);
11296 /* i386 can only pop 64K bytes. If asked to pop more, pop return
11297 address, do explicit add, and jump indirectly to the caller. */
11299 if (crtl->args.pops_args >= 65536)
11301 rtx ecx = gen_rtx_REG (SImode, CX_REG);
11304 /* There is no "pascal" calling convention in any 64bit ABI. */
11305 gcc_assert (!TARGET_64BIT);
11307 insn = emit_insn (gen_pop (ecx));
11308 m->fs.cfa_offset -= UNITS_PER_WORD;
11309 m->fs.sp_offset -= UNITS_PER_WORD;
11311 add_reg_note (insn, REG_CFA_ADJUST_CFA,
11312 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
11313 add_reg_note (insn, REG_CFA_REGISTER,
11314 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
11315 RTX_FRAME_RELATED_P (insn) = 1;
11317 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
11319 emit_jump_insn (gen_return_indirect_internal (ecx));
11322 emit_jump_insn (gen_return_pop_internal (popc));
11325 emit_jump_insn (gen_return_internal ());
11327 /* Restore the state back to the state from the prologue,
11328 so that it's correct for the next epilogue. */
11329 m->fs = frame_state_save;
11332 /* Reset from the function's potential modifications. */
11335 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
11336 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
11338 if (pic_offset_table_rtx)
11339 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
11341 /* Mach-O doesn't support labels at the end of objects, so if
11342 it looks like we might want one, insert a NOP. */
11344 rtx insn = get_last_insn ();
11347 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
11348 insn = PREV_INSN (insn);
11352 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
11353 fputs ("\tnop\n", file);
11359 /* Return a scratch register to use in the split stack prologue. The
11360 split stack prologue is used for -fsplit-stack. It is the first
11361 instructions in the function, even before the regular prologue.
11362 The scratch register can be any caller-saved register which is not
11363 used for parameters or for the static chain. */
11365 static unsigned int
11366 split_stack_prologue_scratch_regno (void)
11375 is_fastcall = (lookup_attribute ("fastcall",
11376 TYPE_ATTRIBUTES (TREE_TYPE (cfun->decl)))
11378 regparm = ix86_function_regparm (TREE_TYPE (cfun->decl), cfun->decl);
11382 if (DECL_STATIC_CHAIN (cfun->decl))
11384 sorry ("-fsplit-stack does not support fastcall with "
11385 "nested function");
11386 return INVALID_REGNUM;
11390 else if (regparm < 3)
11392 if (!DECL_STATIC_CHAIN (cfun->decl))
11398 sorry ("-fsplit-stack does not support 2 register "
11399 " parameters for a nested function");
11400 return INVALID_REGNUM;
11407 /* FIXME: We could make this work by pushing a register
11408 around the addition and comparison. */
11409 sorry ("-fsplit-stack does not support 3 register parameters");
11410 return INVALID_REGNUM;
11415 /* A SYMBOL_REF for the function which allocates new stackspace for
11418 static GTY(()) rtx split_stack_fn;
11420 /* A SYMBOL_REF for the more stack function when using the large
11423 static GTY(()) rtx split_stack_fn_large;
11425 /* Handle -fsplit-stack. These are the first instructions in the
11426 function, even before the regular prologue. */
11429 ix86_expand_split_stack_prologue (void)
11431 struct ix86_frame frame;
11432 HOST_WIDE_INT allocate;
11433 unsigned HOST_WIDE_INT args_size;
11434 rtx label, limit, current, jump_insn, allocate_rtx, call_insn, call_fusage;
11435 rtx scratch_reg = NULL_RTX;
11436 rtx varargs_label = NULL_RTX;
11439 gcc_assert (flag_split_stack && reload_completed);
11441 ix86_finalize_stack_realign_flags ();
11442 ix86_compute_frame_layout (&frame);
11443 allocate = frame.stack_pointer_offset - INCOMING_FRAME_SP_OFFSET;
11445 /* This is the label we will branch to if we have enough stack
11446 space. We expect the basic block reordering pass to reverse this
11447 branch if optimizing, so that we branch in the unlikely case. */
11448 label = gen_label_rtx ();
11450 /* We need to compare the stack pointer minus the frame size with
11451 the stack boundary in the TCB. The stack boundary always gives
11452 us SPLIT_STACK_AVAILABLE bytes, so if we need less than that we
11453 can compare directly. Otherwise we need to do an addition. */
11455 limit = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
11456 UNSPEC_STACK_CHECK);
11457 limit = gen_rtx_CONST (Pmode, limit);
11458 limit = gen_rtx_MEM (Pmode, limit);
11459 if (allocate < SPLIT_STACK_AVAILABLE)
11460 current = stack_pointer_rtx;
11463 unsigned int scratch_regno;
11466 /* We need a scratch register to hold the stack pointer minus
11467 the required frame size. Since this is the very start of the
11468 function, the scratch register can be any caller-saved
11469 register which is not used for parameters. */
11470 offset = GEN_INT (- allocate);
11471 scratch_regno = split_stack_prologue_scratch_regno ();
11472 if (scratch_regno == INVALID_REGNUM)
11474 scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
11475 if (!TARGET_64BIT || x86_64_immediate_operand (offset, Pmode))
11477 /* We don't use ix86_gen_add3 in this case because it will
11478 want to split to lea, but when not optimizing the insn
11479 will not be split after this point. */
11480 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11481 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11486 emit_move_insn (scratch_reg, offset);
11487 emit_insn (gen_adddi3 (scratch_reg, scratch_reg,
11488 stack_pointer_rtx));
11490 current = scratch_reg;
11493 ix86_expand_branch (GEU, current, limit, label);
11494 jump_insn = get_last_insn ();
11495 JUMP_LABEL (jump_insn) = label;
11497 /* Mark the jump as very likely to be taken. */
11498 add_reg_note (jump_insn, REG_BR_PROB,
11499 GEN_INT (REG_BR_PROB_BASE - REG_BR_PROB_BASE / 100));
11501 if (split_stack_fn == NULL_RTX)
11502 split_stack_fn = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11503 fn = split_stack_fn;
11505 /* Get more stack space. We pass in the desired stack space and the
11506 size of the arguments to copy to the new stack. In 32-bit mode
11507 we push the parameters; __morestack will return on a new stack
11508 anyhow. In 64-bit mode we pass the parameters in r10 and
11510 allocate_rtx = GEN_INT (allocate);
11511 args_size = crtl->args.size >= 0 ? crtl->args.size : 0;
11512 call_fusage = NULL_RTX;
11517 reg10 = gen_rtx_REG (Pmode, R10_REG);
11518 reg11 = gen_rtx_REG (Pmode, R11_REG);
11520 /* If this function uses a static chain, it will be in %r10.
11521 Preserve it across the call to __morestack. */
11522 if (DECL_STATIC_CHAIN (cfun->decl))
11526 rax = gen_rtx_REG (Pmode, AX_REG);
11527 emit_move_insn (rax, reg10);
11528 use_reg (&call_fusage, rax);
11531 if (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
11533 HOST_WIDE_INT argval;
11535 /* When using the large model we need to load the address
11536 into a register, and we've run out of registers. So we
11537 switch to a different calling convention, and we call a
11538 different function: __morestack_large. We pass the
11539 argument size in the upper 32 bits of r10 and pass the
11540 frame size in the lower 32 bits. */
11541 gcc_assert ((allocate & (HOST_WIDE_INT) 0xffffffff) == allocate);
11542 gcc_assert ((args_size & 0xffffffff) == args_size);
11544 if (split_stack_fn_large == NULL_RTX)
11545 split_stack_fn_large =
11546 gen_rtx_SYMBOL_REF (Pmode, "__morestack_large_model");
11548 if (ix86_cmodel == CM_LARGE_PIC)
11552 label = gen_label_rtx ();
11553 emit_label (label);
11554 LABEL_PRESERVE_P (label) = 1;
11555 emit_insn (gen_set_rip_rex64 (reg10, label));
11556 emit_insn (gen_set_got_offset_rex64 (reg11, label));
11557 emit_insn (gen_adddi3 (reg10, reg10, reg11));
11558 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, split_stack_fn_large),
11560 x = gen_rtx_CONST (Pmode, x);
11561 emit_move_insn (reg11, x);
11562 x = gen_rtx_PLUS (Pmode, reg10, reg11);
11563 x = gen_const_mem (Pmode, x);
11564 emit_move_insn (reg11, x);
11567 emit_move_insn (reg11, split_stack_fn_large);
11571 argval = ((args_size << 16) << 16) + allocate;
11572 emit_move_insn (reg10, GEN_INT (argval));
11576 emit_move_insn (reg10, allocate_rtx);
11577 emit_move_insn (reg11, GEN_INT (args_size));
11578 use_reg (&call_fusage, reg11);
11581 use_reg (&call_fusage, reg10);
11585 emit_insn (gen_push (GEN_INT (args_size)));
11586 emit_insn (gen_push (allocate_rtx));
11588 call_insn = ix86_expand_call (NULL_RTX, gen_rtx_MEM (QImode, fn),
11589 GEN_INT (UNITS_PER_WORD), constm1_rtx,
11591 add_function_usage_to (call_insn, call_fusage);
11593 /* In order to make call/return prediction work right, we now need
11594 to execute a return instruction. See
11595 libgcc/config/i386/morestack.S for the details on how this works.
11597 For flow purposes gcc must not see this as a return
11598 instruction--we need control flow to continue at the subsequent
11599 label. Therefore, we use an unspec. */
11600 gcc_assert (crtl->args.pops_args < 65536);
11601 emit_insn (gen_split_stack_return (GEN_INT (crtl->args.pops_args)));
11603 /* If we are in 64-bit mode and this function uses a static chain,
11604 we saved %r10 in %rax before calling _morestack. */
11605 if (TARGET_64BIT && DECL_STATIC_CHAIN (cfun->decl))
11606 emit_move_insn (gen_rtx_REG (Pmode, R10_REG),
11607 gen_rtx_REG (Pmode, AX_REG));
11609 /* If this function calls va_start, we need to store a pointer to
11610 the arguments on the old stack, because they may not have been
11611 all copied to the new stack. At this point the old stack can be
11612 found at the frame pointer value used by __morestack, because
11613 __morestack has set that up before calling back to us. Here we
11614 store that pointer in a scratch register, and in
11615 ix86_expand_prologue we store the scratch register in a stack
11617 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11619 unsigned int scratch_regno;
11623 scratch_regno = split_stack_prologue_scratch_regno ();
11624 scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
11625 frame_reg = gen_rtx_REG (Pmode, BP_REG);
11629 return address within this function
11630 return address of caller of this function
11632 So we add three words to get to the stack arguments.
11636 return address within this function
11637 first argument to __morestack
11638 second argument to __morestack
11639 return address of caller of this function
11641 So we add five words to get to the stack arguments.
11643 words = TARGET_64BIT ? 3 : 5;
11644 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11645 gen_rtx_PLUS (Pmode, frame_reg,
11646 GEN_INT (words * UNITS_PER_WORD))));
11648 varargs_label = gen_label_rtx ();
11649 emit_jump_insn (gen_jump (varargs_label));
11650 JUMP_LABEL (get_last_insn ()) = varargs_label;
11655 emit_label (label);
11656 LABEL_NUSES (label) = 1;
11658 /* If this function calls va_start, we now have to set the scratch
11659 register for the case where we do not call __morestack. In this
11660 case we need to set it based on the stack pointer. */
11661 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11663 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11664 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11665 GEN_INT (UNITS_PER_WORD))));
11667 emit_label (varargs_label);
11668 LABEL_NUSES (varargs_label) = 1;
11672 /* We may have to tell the dataflow pass that the split stack prologue
11673 is initializing a scratch register. */
11676 ix86_live_on_entry (bitmap regs)
11678 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11680 gcc_assert (flag_split_stack);
11681 bitmap_set_bit (regs, split_stack_prologue_scratch_regno ());
11685 /* Extract the parts of an RTL expression that is a valid memory address
11686 for an instruction. Return 0 if the structure of the address is
11687 grossly off. Return -1 if the address contains ASHIFT, so it is not
11688 strictly valid, but still used for computing length of lea instruction. */
11691 ix86_decompose_address (rtx addr, struct ix86_address *out)
11693 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
11694 rtx base_reg, index_reg;
11695 HOST_WIDE_INT scale = 1;
11696 rtx scale_rtx = NULL_RTX;
11699 enum ix86_address_seg seg = SEG_DEFAULT;
11701 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
11703 else if (GET_CODE (addr) == PLUS)
11705 rtx addends[4], op;
11713 addends[n++] = XEXP (op, 1);
11716 while (GET_CODE (op) == PLUS);
11721 for (i = n; i >= 0; --i)
11724 switch (GET_CODE (op))
11729 index = XEXP (op, 0);
11730 scale_rtx = XEXP (op, 1);
11736 index = XEXP (op, 0);
11737 tmp = XEXP (op, 1);
11738 if (!CONST_INT_P (tmp))
11740 scale = INTVAL (tmp);
11741 if ((unsigned HOST_WIDE_INT) scale > 3)
11743 scale = 1 << scale;
11747 if (XINT (op, 1) == UNSPEC_TP
11748 && TARGET_TLS_DIRECT_SEG_REFS
11749 && seg == SEG_DEFAULT)
11750 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
11779 else if (GET_CODE (addr) == MULT)
11781 index = XEXP (addr, 0); /* index*scale */
11782 scale_rtx = XEXP (addr, 1);
11784 else if (GET_CODE (addr) == ASHIFT)
11786 /* We're called for lea too, which implements ashift on occasion. */
11787 index = XEXP (addr, 0);
11788 tmp = XEXP (addr, 1);
11789 if (!CONST_INT_P (tmp))
11791 scale = INTVAL (tmp);
11792 if ((unsigned HOST_WIDE_INT) scale > 3)
11794 scale = 1 << scale;
11798 disp = addr; /* displacement */
11800 /* Extract the integral value of scale. */
11803 if (!CONST_INT_P (scale_rtx))
11805 scale = INTVAL (scale_rtx);
11808 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
11809 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
11811 /* Avoid useless 0 displacement. */
11812 if (disp == const0_rtx && (base || index))
11815 /* Allow arg pointer and stack pointer as index if there is not scaling. */
11816 if (base_reg && index_reg && scale == 1
11817 && (index_reg == arg_pointer_rtx
11818 || index_reg == frame_pointer_rtx
11819 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
11822 tmp = base, base = index, index = tmp;
11823 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
11826 /* Special case: %ebp cannot be encoded as a base without a displacement.
11830 && (base_reg == hard_frame_pointer_rtx
11831 || base_reg == frame_pointer_rtx
11832 || base_reg == arg_pointer_rtx
11833 || (REG_P (base_reg)
11834 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
11835 || REGNO (base_reg) == R13_REG))))
11838 /* Special case: on K6, [%esi] makes the instruction vector decoded.
11839 Avoid this by transforming to [%esi+0].
11840 Reload calls address legitimization without cfun defined, so we need
11841 to test cfun for being non-NULL. */
11842 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
11843 && base_reg && !index_reg && !disp
11844 && REG_P (base_reg) && REGNO (base_reg) == SI_REG)
11847 /* Special case: encode reg+reg instead of reg*2. */
11848 if (!base && index && scale == 2)
11849 base = index, base_reg = index_reg, scale = 1;
11851 /* Special case: scaling cannot be encoded without base or displacement. */
11852 if (!base && !disp && index && scale != 1)
11856 out->index = index;
11858 out->scale = scale;
11864 /* Return cost of the memory address x.
11865 For i386, it is better to use a complex address than let gcc copy
11866 the address into a reg and make a new pseudo. But not if the address
11867 requires to two regs - that would mean more pseudos with longer
11870 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
11872 struct ix86_address parts;
11874 int ok = ix86_decompose_address (x, &parts);
11878 if (parts.base && GET_CODE (parts.base) == SUBREG)
11879 parts.base = SUBREG_REG (parts.base);
11880 if (parts.index && GET_CODE (parts.index) == SUBREG)
11881 parts.index = SUBREG_REG (parts.index);
11883 /* Attempt to minimize number of registers in the address. */
11885 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
11887 && (!REG_P (parts.index)
11888 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
11892 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
11894 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
11895 && parts.base != parts.index)
11898 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
11899 since it's predecode logic can't detect the length of instructions
11900 and it degenerates to vector decoded. Increase cost of such
11901 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
11902 to split such addresses or even refuse such addresses at all.
11904 Following addressing modes are affected:
11909 The first and last case may be avoidable by explicitly coding the zero in
11910 memory address, but I don't have AMD-K6 machine handy to check this
11914 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
11915 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
11916 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
11922 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
11923 this is used for to form addresses to local data when -fPIC is in
11927 darwin_local_data_pic (rtx disp)
11929 return (GET_CODE (disp) == UNSPEC
11930 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
11933 /* Determine if a given RTX is a valid constant. We already know this
11934 satisfies CONSTANT_P. */
11937 legitimate_constant_p (rtx x)
11939 switch (GET_CODE (x))
11944 if (GET_CODE (x) == PLUS)
11946 if (!CONST_INT_P (XEXP (x, 1)))
11951 if (TARGET_MACHO && darwin_local_data_pic (x))
11954 /* Only some unspecs are valid as "constants". */
11955 if (GET_CODE (x) == UNSPEC)
11956 switch (XINT (x, 1))
11959 case UNSPEC_GOTOFF:
11960 case UNSPEC_PLTOFF:
11961 return TARGET_64BIT;
11963 case UNSPEC_NTPOFF:
11964 x = XVECEXP (x, 0, 0);
11965 return (GET_CODE (x) == SYMBOL_REF
11966 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
11967 case UNSPEC_DTPOFF:
11968 x = XVECEXP (x, 0, 0);
11969 return (GET_CODE (x) == SYMBOL_REF
11970 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
11975 /* We must have drilled down to a symbol. */
11976 if (GET_CODE (x) == LABEL_REF)
11978 if (GET_CODE (x) != SYMBOL_REF)
11983 /* TLS symbols are never valid. */
11984 if (SYMBOL_REF_TLS_MODEL (x))
11987 /* DLLIMPORT symbols are never valid. */
11988 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
11989 && SYMBOL_REF_DLLIMPORT_P (x))
11993 /* mdynamic-no-pic */
11994 if (MACHO_DYNAMIC_NO_PIC_P)
11995 return machopic_symbol_defined_p (x);
12000 if (GET_MODE (x) == TImode
12001 && x != CONST0_RTX (TImode)
12007 if (!standard_sse_constant_p (x))
12014 /* Otherwise we handle everything else in the move patterns. */
12018 /* Determine if it's legal to put X into the constant pool. This
12019 is not possible for the address of thread-local symbols, which
12020 is checked above. */
12023 ix86_cannot_force_const_mem (rtx x)
12025 /* We can always put integral constants and vectors in memory. */
12026 switch (GET_CODE (x))
12036 return !legitimate_constant_p (x);
12040 /* Nonzero if the constant value X is a legitimate general operand
12041 when generating PIC code. It is given that flag_pic is on and
12042 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
12045 legitimate_pic_operand_p (rtx x)
12049 switch (GET_CODE (x))
12052 inner = XEXP (x, 0);
12053 if (GET_CODE (inner) == PLUS
12054 && CONST_INT_P (XEXP (inner, 1)))
12055 inner = XEXP (inner, 0);
12057 /* Only some unspecs are valid as "constants". */
12058 if (GET_CODE (inner) == UNSPEC)
12059 switch (XINT (inner, 1))
12062 case UNSPEC_GOTOFF:
12063 case UNSPEC_PLTOFF:
12064 return TARGET_64BIT;
12066 x = XVECEXP (inner, 0, 0);
12067 return (GET_CODE (x) == SYMBOL_REF
12068 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
12069 case UNSPEC_MACHOPIC_OFFSET:
12070 return legitimate_pic_address_disp_p (x);
12078 return legitimate_pic_address_disp_p (x);
12085 /* Determine if a given CONST RTX is a valid memory displacement
12089 legitimate_pic_address_disp_p (rtx disp)
12093 /* In 64bit mode we can allow direct addresses of symbols and labels
12094 when they are not dynamic symbols. */
12097 rtx op0 = disp, op1;
12099 switch (GET_CODE (disp))
12105 if (GET_CODE (XEXP (disp, 0)) != PLUS)
12107 op0 = XEXP (XEXP (disp, 0), 0);
12108 op1 = XEXP (XEXP (disp, 0), 1);
12109 if (!CONST_INT_P (op1)
12110 || INTVAL (op1) >= 16*1024*1024
12111 || INTVAL (op1) < -16*1024*1024)
12113 if (GET_CODE (op0) == LABEL_REF)
12115 if (GET_CODE (op0) != SYMBOL_REF)
12120 /* TLS references should always be enclosed in UNSPEC. */
12121 if (SYMBOL_REF_TLS_MODEL (op0))
12123 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
12124 && ix86_cmodel != CM_LARGE_PIC)
12132 if (GET_CODE (disp) != CONST)
12134 disp = XEXP (disp, 0);
12138 /* We are unsafe to allow PLUS expressions. This limit allowed distance
12139 of GOT tables. We should not need these anyway. */
12140 if (GET_CODE (disp) != UNSPEC
12141 || (XINT (disp, 1) != UNSPEC_GOTPCREL
12142 && XINT (disp, 1) != UNSPEC_GOTOFF
12143 && XINT (disp, 1) != UNSPEC_PCREL
12144 && XINT (disp, 1) != UNSPEC_PLTOFF))
12147 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
12148 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
12154 if (GET_CODE (disp) == PLUS)
12156 if (!CONST_INT_P (XEXP (disp, 1)))
12158 disp = XEXP (disp, 0);
12162 if (TARGET_MACHO && darwin_local_data_pic (disp))
12165 if (GET_CODE (disp) != UNSPEC)
12168 switch (XINT (disp, 1))
12173 /* We need to check for both symbols and labels because VxWorks loads
12174 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
12176 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
12177 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
12178 case UNSPEC_GOTOFF:
12179 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
12180 While ABI specify also 32bit relocation but we don't produce it in
12181 small PIC model at all. */
12182 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
12183 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
12185 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
12187 case UNSPEC_GOTTPOFF:
12188 case UNSPEC_GOTNTPOFF:
12189 case UNSPEC_INDNTPOFF:
12192 disp = XVECEXP (disp, 0, 0);
12193 return (GET_CODE (disp) == SYMBOL_REF
12194 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
12195 case UNSPEC_NTPOFF:
12196 disp = XVECEXP (disp, 0, 0);
12197 return (GET_CODE (disp) == SYMBOL_REF
12198 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
12199 case UNSPEC_DTPOFF:
12200 disp = XVECEXP (disp, 0, 0);
12201 return (GET_CODE (disp) == SYMBOL_REF
12202 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
12208 /* Recognizes RTL expressions that are valid memory addresses for an
12209 instruction. The MODE argument is the machine mode for the MEM
12210 expression that wants to use this address.
12212 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
12213 convert common non-canonical forms to canonical form so that they will
12217 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
12218 rtx addr, bool strict)
12220 struct ix86_address parts;
12221 rtx base, index, disp;
12222 HOST_WIDE_INT scale;
12224 if (ix86_decompose_address (addr, &parts) <= 0)
12225 /* Decomposition failed. */
12229 index = parts.index;
12231 scale = parts.scale;
12233 /* Validate base register.
12235 Don't allow SUBREG's that span more than a word here. It can lead to spill
12236 failures when the base is one word out of a two word structure, which is
12237 represented internally as a DImode int. */
12245 else if (GET_CODE (base) == SUBREG
12246 && REG_P (SUBREG_REG (base))
12247 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
12249 reg = SUBREG_REG (base);
12251 /* Base is not a register. */
12254 if (GET_MODE (base) != Pmode)
12255 /* Base is not in Pmode. */
12258 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
12259 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
12260 /* Base is not valid. */
12264 /* Validate index register.
12266 Don't allow SUBREG's that span more than a word here -- same as above. */
12274 else if (GET_CODE (index) == SUBREG
12275 && REG_P (SUBREG_REG (index))
12276 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
12278 reg = SUBREG_REG (index);
12280 /* Index is not a register. */
12283 if (GET_MODE (index) != Pmode)
12284 /* Index is not in Pmode. */
12287 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
12288 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
12289 /* Index is not valid. */
12293 /* Validate scale factor. */
12297 /* Scale without index. */
12300 if (scale != 2 && scale != 4 && scale != 8)
12301 /* Scale is not a valid multiplier. */
12305 /* Validate displacement. */
12308 if (GET_CODE (disp) == CONST
12309 && GET_CODE (XEXP (disp, 0)) == UNSPEC
12310 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
12311 switch (XINT (XEXP (disp, 0), 1))
12313 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
12314 used. While ABI specify also 32bit relocations, we don't produce
12315 them at all and use IP relative instead. */
12317 case UNSPEC_GOTOFF:
12318 gcc_assert (flag_pic);
12320 goto is_legitimate_pic;
12322 /* 64bit address unspec. */
12325 case UNSPEC_GOTPCREL:
12327 gcc_assert (flag_pic);
12328 goto is_legitimate_pic;
12330 case UNSPEC_GOTTPOFF:
12331 case UNSPEC_GOTNTPOFF:
12332 case UNSPEC_INDNTPOFF:
12333 case UNSPEC_NTPOFF:
12334 case UNSPEC_DTPOFF:
12337 case UNSPEC_STACK_CHECK:
12338 gcc_assert (flag_split_stack);
12342 /* Invalid address unspec. */
12346 else if (SYMBOLIC_CONST (disp)
12350 && MACHOPIC_INDIRECT
12351 && !machopic_operand_p (disp)
12357 if (TARGET_64BIT && (index || base))
12359 /* foo@dtpoff(%rX) is ok. */
12360 if (GET_CODE (disp) != CONST
12361 || GET_CODE (XEXP (disp, 0)) != PLUS
12362 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
12363 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
12364 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
12365 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
12366 /* Non-constant pic memory reference. */
12369 else if ((!TARGET_MACHO || flag_pic)
12370 && ! legitimate_pic_address_disp_p (disp))
12371 /* Displacement is an invalid pic construct. */
12374 else if (MACHO_DYNAMIC_NO_PIC_P && !legitimate_constant_p (disp))
12375 /* displacment must be referenced via non_lazy_pointer */
12379 /* This code used to verify that a symbolic pic displacement
12380 includes the pic_offset_table_rtx register.
12382 While this is good idea, unfortunately these constructs may
12383 be created by "adds using lea" optimization for incorrect
12392 This code is nonsensical, but results in addressing
12393 GOT table with pic_offset_table_rtx base. We can't
12394 just refuse it easily, since it gets matched by
12395 "addsi3" pattern, that later gets split to lea in the
12396 case output register differs from input. While this
12397 can be handled by separate addsi pattern for this case
12398 that never results in lea, this seems to be easier and
12399 correct fix for crash to disable this test. */
12401 else if (GET_CODE (disp) != LABEL_REF
12402 && !CONST_INT_P (disp)
12403 && (GET_CODE (disp) != CONST
12404 || !legitimate_constant_p (disp))
12405 && (GET_CODE (disp) != SYMBOL_REF
12406 || !legitimate_constant_p (disp)))
12407 /* Displacement is not constant. */
12409 else if (TARGET_64BIT
12410 && !x86_64_immediate_operand (disp, VOIDmode))
12411 /* Displacement is out of range. */
12415 /* Everything looks valid. */
12419 /* Determine if a given RTX is a valid constant address. */
12422 constant_address_p (rtx x)
12424 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
12427 /* Return a unique alias set for the GOT. */
12429 static alias_set_type
12430 ix86_GOT_alias_set (void)
12432 static alias_set_type set = -1;
12434 set = new_alias_set ();
12438 /* Return a legitimate reference for ORIG (an address) using the
12439 register REG. If REG is 0, a new pseudo is generated.
12441 There are two types of references that must be handled:
12443 1. Global data references must load the address from the GOT, via
12444 the PIC reg. An insn is emitted to do this load, and the reg is
12447 2. Static data references, constant pool addresses, and code labels
12448 compute the address as an offset from the GOT, whose base is in
12449 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
12450 differentiate them from global data objects. The returned
12451 address is the PIC reg + an unspec constant.
12453 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
12454 reg also appears in the address. */
12457 legitimize_pic_address (rtx orig, rtx reg)
12460 rtx new_rtx = orig;
12464 if (TARGET_MACHO && !TARGET_64BIT)
12467 reg = gen_reg_rtx (Pmode);
12468 /* Use the generic Mach-O PIC machinery. */
12469 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
12473 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
12475 else if (TARGET_64BIT
12476 && ix86_cmodel != CM_SMALL_PIC
12477 && gotoff_operand (addr, Pmode))
12480 /* This symbol may be referenced via a displacement from the PIC
12481 base address (@GOTOFF). */
12483 if (reload_in_progress)
12484 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12485 if (GET_CODE (addr) == CONST)
12486 addr = XEXP (addr, 0);
12487 if (GET_CODE (addr) == PLUS)
12489 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
12491 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
12494 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
12495 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12497 tmpreg = gen_reg_rtx (Pmode);
12500 emit_move_insn (tmpreg, new_rtx);
12504 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
12505 tmpreg, 1, OPTAB_DIRECT);
12508 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
12510 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
12512 /* This symbol may be referenced via a displacement from the PIC
12513 base address (@GOTOFF). */
12515 if (reload_in_progress)
12516 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12517 if (GET_CODE (addr) == CONST)
12518 addr = XEXP (addr, 0);
12519 if (GET_CODE (addr) == PLUS)
12521 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
12523 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
12526 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
12527 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12528 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12532 emit_move_insn (reg, new_rtx);
12536 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
12537 /* We can't use @GOTOFF for text labels on VxWorks;
12538 see gotoff_operand. */
12539 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
12541 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
12543 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
12544 return legitimize_dllimport_symbol (addr, true);
12545 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
12546 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
12547 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
12549 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
12550 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
12554 /* For x64 PE-COFF there is no GOT table. So we use address
12556 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
12558 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_PCREL);
12559 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12562 reg = gen_reg_rtx (Pmode);
12563 emit_move_insn (reg, new_rtx);
12566 else if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
12568 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
12569 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12570 new_rtx = gen_const_mem (Pmode, new_rtx);
12571 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
12574 reg = gen_reg_rtx (Pmode);
12575 /* Use directly gen_movsi, otherwise the address is loaded
12576 into register for CSE. We don't want to CSE this addresses,
12577 instead we CSE addresses from the GOT table, so skip this. */
12578 emit_insn (gen_movsi (reg, new_rtx));
12583 /* This symbol must be referenced via a load from the
12584 Global Offset Table (@GOT). */
12586 if (reload_in_progress)
12587 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12588 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
12589 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12591 new_rtx = force_reg (Pmode, new_rtx);
12592 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12593 new_rtx = gen_const_mem (Pmode, new_rtx);
12594 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
12597 reg = gen_reg_rtx (Pmode);
12598 emit_move_insn (reg, new_rtx);
12604 if (CONST_INT_P (addr)
12605 && !x86_64_immediate_operand (addr, VOIDmode))
12609 emit_move_insn (reg, addr);
12613 new_rtx = force_reg (Pmode, addr);
12615 else if (GET_CODE (addr) == CONST)
12617 addr = XEXP (addr, 0);
12619 /* We must match stuff we generate before. Assume the only
12620 unspecs that can get here are ours. Not that we could do
12621 anything with them anyway.... */
12622 if (GET_CODE (addr) == UNSPEC
12623 || (GET_CODE (addr) == PLUS
12624 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
12626 gcc_assert (GET_CODE (addr) == PLUS);
12628 if (GET_CODE (addr) == PLUS)
12630 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
12632 /* Check first to see if this is a constant offset from a @GOTOFF
12633 symbol reference. */
12634 if (gotoff_operand (op0, Pmode)
12635 && CONST_INT_P (op1))
12639 if (reload_in_progress)
12640 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12641 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
12643 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
12644 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12645 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12649 emit_move_insn (reg, new_rtx);
12655 if (INTVAL (op1) < -16*1024*1024
12656 || INTVAL (op1) >= 16*1024*1024)
12658 if (!x86_64_immediate_operand (op1, Pmode))
12659 op1 = force_reg (Pmode, op1);
12660 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
12666 base = legitimize_pic_address (XEXP (addr, 0), reg);
12667 new_rtx = legitimize_pic_address (XEXP (addr, 1),
12668 base == reg ? NULL_RTX : reg);
12670 if (CONST_INT_P (new_rtx))
12671 new_rtx = plus_constant (base, INTVAL (new_rtx));
12674 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
12676 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
12677 new_rtx = XEXP (new_rtx, 1);
12679 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
12687 /* Load the thread pointer. If TO_REG is true, force it into a register. */
12690 get_thread_pointer (int to_reg)
12694 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
12698 reg = gen_reg_rtx (Pmode);
12699 insn = gen_rtx_SET (VOIDmode, reg, tp);
12700 insn = emit_insn (insn);
12705 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
12706 false if we expect this to be used for a memory address and true if
12707 we expect to load the address into a register. */
12710 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
12712 rtx dest, base, off, pic, tp;
12717 case TLS_MODEL_GLOBAL_DYNAMIC:
12718 dest = gen_reg_rtx (Pmode);
12719 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
12721 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
12723 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
12726 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
12727 insns = get_insns ();
12730 RTL_CONST_CALL_P (insns) = 1;
12731 emit_libcall_block (insns, dest, rax, x);
12733 else if (TARGET_64BIT && TARGET_GNU2_TLS)
12734 emit_insn (gen_tls_global_dynamic_64 (dest, x));
12736 emit_insn (gen_tls_global_dynamic_32 (dest, x));
12738 if (TARGET_GNU2_TLS)
12740 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
12742 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
12746 case TLS_MODEL_LOCAL_DYNAMIC:
12747 base = gen_reg_rtx (Pmode);
12748 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
12750 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
12752 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
12755 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
12756 insns = get_insns ();
12759 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
12760 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
12761 RTL_CONST_CALL_P (insns) = 1;
12762 emit_libcall_block (insns, base, rax, note);
12764 else if (TARGET_64BIT && TARGET_GNU2_TLS)
12765 emit_insn (gen_tls_local_dynamic_base_64 (base));
12767 emit_insn (gen_tls_local_dynamic_base_32 (base));
12769 if (TARGET_GNU2_TLS)
12771 rtx x = ix86_tls_module_base ();
12773 set_unique_reg_note (get_last_insn (), REG_EQUIV,
12774 gen_rtx_MINUS (Pmode, x, tp));
12777 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
12778 off = gen_rtx_CONST (Pmode, off);
12780 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
12782 if (TARGET_GNU2_TLS)
12784 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
12786 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
12791 case TLS_MODEL_INITIAL_EXEC:
12794 if (TARGET_SUN_TLS)
12796 /* The Sun linker took the AMD64 TLS spec literally
12797 and can only handle %rax as destination of the
12798 initial executable code sequence. */
12800 dest = gen_reg_rtx (Pmode);
12801 emit_insn (gen_tls_initial_exec_64_sun (dest, x));
12806 type = UNSPEC_GOTNTPOFF;
12810 if (reload_in_progress)
12811 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12812 pic = pic_offset_table_rtx;
12813 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
12815 else if (!TARGET_ANY_GNU_TLS)
12817 pic = gen_reg_rtx (Pmode);
12818 emit_insn (gen_set_got (pic));
12819 type = UNSPEC_GOTTPOFF;
12824 type = UNSPEC_INDNTPOFF;
12827 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
12828 off = gen_rtx_CONST (Pmode, off);
12830 off = gen_rtx_PLUS (Pmode, pic, off);
12831 off = gen_const_mem (Pmode, off);
12832 set_mem_alias_set (off, ix86_GOT_alias_set ());
12834 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12836 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
12837 off = force_reg (Pmode, off);
12838 return gen_rtx_PLUS (Pmode, base, off);
12842 base = get_thread_pointer (true);
12843 dest = gen_reg_rtx (Pmode);
12844 emit_insn (gen_subsi3 (dest, base, off));
12848 case TLS_MODEL_LOCAL_EXEC:
12849 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
12850 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12851 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
12852 off = gen_rtx_CONST (Pmode, off);
12854 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12856 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
12857 return gen_rtx_PLUS (Pmode, base, off);
12861 base = get_thread_pointer (true);
12862 dest = gen_reg_rtx (Pmode);
12863 emit_insn (gen_subsi3 (dest, base, off));
12868 gcc_unreachable ();
12874 /* Create or return the unique __imp_DECL dllimport symbol corresponding
12877 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
12878 htab_t dllimport_map;
12881 get_dllimport_decl (tree decl)
12883 struct tree_map *h, in;
12886 const char *prefix;
12887 size_t namelen, prefixlen;
12892 if (!dllimport_map)
12893 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
12895 in.hash = htab_hash_pointer (decl);
12896 in.base.from = decl;
12897 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
12898 h = (struct tree_map *) *loc;
12902 *loc = h = ggc_alloc_tree_map ();
12904 h->base.from = decl;
12905 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
12906 VAR_DECL, NULL, ptr_type_node);
12907 DECL_ARTIFICIAL (to) = 1;
12908 DECL_IGNORED_P (to) = 1;
12909 DECL_EXTERNAL (to) = 1;
12910 TREE_READONLY (to) = 1;
12912 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
12913 name = targetm.strip_name_encoding (name);
12914 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
12915 ? "*__imp_" : "*__imp__";
12916 namelen = strlen (name);
12917 prefixlen = strlen (prefix);
12918 imp_name = (char *) alloca (namelen + prefixlen + 1);
12919 memcpy (imp_name, prefix, prefixlen);
12920 memcpy (imp_name + prefixlen, name, namelen + 1);
12922 name = ggc_alloc_string (imp_name, namelen + prefixlen);
12923 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
12924 SET_SYMBOL_REF_DECL (rtl, to);
12925 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
12927 rtl = gen_const_mem (Pmode, rtl);
12928 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
12930 SET_DECL_RTL (to, rtl);
12931 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
12936 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
12937 true if we require the result be a register. */
12940 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
12945 gcc_assert (SYMBOL_REF_DECL (symbol));
12946 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
12948 x = DECL_RTL (imp_decl);
12950 x = force_reg (Pmode, x);
12954 /* Try machine-dependent ways of modifying an illegitimate address
12955 to be legitimate. If we find one, return the new, valid address.
12956 This macro is used in only one place: `memory_address' in explow.c.
12958 OLDX is the address as it was before break_out_memory_refs was called.
12959 In some cases it is useful to look at this to decide what needs to be done.
12961 It is always safe for this macro to do nothing. It exists to recognize
12962 opportunities to optimize the output.
12964 For the 80386, we handle X+REG by loading X into a register R and
12965 using R+REG. R will go in a general reg and indexing will be used.
12966 However, if REG is a broken-out memory address or multiplication,
12967 nothing needs to be done because REG can certainly go in a general reg.
12969 When -fpic is used, special handling is needed for symbolic references.
12970 See comments by legitimize_pic_address in i386.c for details. */
12973 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
12974 enum machine_mode mode)
12979 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
12981 return legitimize_tls_address (x, (enum tls_model) log, false);
12982 if (GET_CODE (x) == CONST
12983 && GET_CODE (XEXP (x, 0)) == PLUS
12984 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
12985 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
12987 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
12988 (enum tls_model) log, false);
12989 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
12992 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
12994 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
12995 return legitimize_dllimport_symbol (x, true);
12996 if (GET_CODE (x) == CONST
12997 && GET_CODE (XEXP (x, 0)) == PLUS
12998 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
12999 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
13001 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
13002 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
13006 if (flag_pic && SYMBOLIC_CONST (x))
13007 return legitimize_pic_address (x, 0);
13010 if (MACHO_DYNAMIC_NO_PIC_P && SYMBOLIC_CONST (x))
13011 return machopic_indirect_data_reference (x, 0);
13014 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
13015 if (GET_CODE (x) == ASHIFT
13016 && CONST_INT_P (XEXP (x, 1))
13017 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
13020 log = INTVAL (XEXP (x, 1));
13021 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
13022 GEN_INT (1 << log));
13025 if (GET_CODE (x) == PLUS)
13027 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
13029 if (GET_CODE (XEXP (x, 0)) == ASHIFT
13030 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
13031 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
13034 log = INTVAL (XEXP (XEXP (x, 0), 1));
13035 XEXP (x, 0) = gen_rtx_MULT (Pmode,
13036 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
13037 GEN_INT (1 << log));
13040 if (GET_CODE (XEXP (x, 1)) == ASHIFT
13041 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
13042 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
13045 log = INTVAL (XEXP (XEXP (x, 1), 1));
13046 XEXP (x, 1) = gen_rtx_MULT (Pmode,
13047 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
13048 GEN_INT (1 << log));
13051 /* Put multiply first if it isn't already. */
13052 if (GET_CODE (XEXP (x, 1)) == MULT)
13054 rtx tmp = XEXP (x, 0);
13055 XEXP (x, 0) = XEXP (x, 1);
13060 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
13061 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
13062 created by virtual register instantiation, register elimination, and
13063 similar optimizations. */
13064 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
13067 x = gen_rtx_PLUS (Pmode,
13068 gen_rtx_PLUS (Pmode, XEXP (x, 0),
13069 XEXP (XEXP (x, 1), 0)),
13070 XEXP (XEXP (x, 1), 1));
13074 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
13075 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
13076 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
13077 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
13078 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
13079 && CONSTANT_P (XEXP (x, 1)))
13082 rtx other = NULL_RTX;
13084 if (CONST_INT_P (XEXP (x, 1)))
13086 constant = XEXP (x, 1);
13087 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
13089 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
13091 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
13092 other = XEXP (x, 1);
13100 x = gen_rtx_PLUS (Pmode,
13101 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
13102 XEXP (XEXP (XEXP (x, 0), 1), 0)),
13103 plus_constant (other, INTVAL (constant)));
13107 if (changed && ix86_legitimate_address_p (mode, x, false))
13110 if (GET_CODE (XEXP (x, 0)) == MULT)
13113 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
13116 if (GET_CODE (XEXP (x, 1)) == MULT)
13119 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
13123 && REG_P (XEXP (x, 1))
13124 && REG_P (XEXP (x, 0)))
13127 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
13130 x = legitimize_pic_address (x, 0);
13133 if (changed && ix86_legitimate_address_p (mode, x, false))
13136 if (REG_P (XEXP (x, 0)))
13138 rtx temp = gen_reg_rtx (Pmode);
13139 rtx val = force_operand (XEXP (x, 1), temp);
13141 emit_move_insn (temp, val);
13143 XEXP (x, 1) = temp;
13147 else if (REG_P (XEXP (x, 1)))
13149 rtx temp = gen_reg_rtx (Pmode);
13150 rtx val = force_operand (XEXP (x, 0), temp);
13152 emit_move_insn (temp, val);
13154 XEXP (x, 0) = temp;
13162 /* Print an integer constant expression in assembler syntax. Addition
13163 and subtraction are the only arithmetic that may appear in these
13164 expressions. FILE is the stdio stream to write to, X is the rtx, and
13165 CODE is the operand print code from the output string. */
13168 output_pic_addr_const (FILE *file, rtx x, int code)
13172 switch (GET_CODE (x))
13175 gcc_assert (flag_pic);
13180 if (TARGET_64BIT || ! TARGET_MACHO_BRANCH_ISLANDS)
13181 output_addr_const (file, x);
13184 const char *name = XSTR (x, 0);
13186 /* Mark the decl as referenced so that cgraph will
13187 output the function. */
13188 if (SYMBOL_REF_DECL (x))
13189 mark_decl_referenced (SYMBOL_REF_DECL (x));
13192 if (MACHOPIC_INDIRECT
13193 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
13194 name = machopic_indirection_name (x, /*stub_p=*/true);
13196 assemble_name (file, name);
13198 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
13199 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
13200 fputs ("@PLT", file);
13207 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
13208 assemble_name (asm_out_file, buf);
13212 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
13216 /* This used to output parentheses around the expression,
13217 but that does not work on the 386 (either ATT or BSD assembler). */
13218 output_pic_addr_const (file, XEXP (x, 0), code);
13222 if (GET_MODE (x) == VOIDmode)
13224 /* We can use %d if the number is <32 bits and positive. */
13225 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
13226 fprintf (file, "0x%lx%08lx",
13227 (unsigned long) CONST_DOUBLE_HIGH (x),
13228 (unsigned long) CONST_DOUBLE_LOW (x));
13230 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
13233 /* We can't handle floating point constants;
13234 TARGET_PRINT_OPERAND must handle them. */
13235 output_operand_lossage ("floating constant misused");
13239 /* Some assemblers need integer constants to appear first. */
13240 if (CONST_INT_P (XEXP (x, 0)))
13242 output_pic_addr_const (file, XEXP (x, 0), code);
13244 output_pic_addr_const (file, XEXP (x, 1), code);
13248 gcc_assert (CONST_INT_P (XEXP (x, 1)));
13249 output_pic_addr_const (file, XEXP (x, 1), code);
13251 output_pic_addr_const (file, XEXP (x, 0), code);
13257 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
13258 output_pic_addr_const (file, XEXP (x, 0), code);
13260 output_pic_addr_const (file, XEXP (x, 1), code);
13262 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
13266 if (XINT (x, 1) == UNSPEC_STACK_CHECK)
13268 bool f = i386_asm_output_addr_const_extra (file, x);
13273 gcc_assert (XVECLEN (x, 0) == 1);
13274 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
13275 switch (XINT (x, 1))
13278 fputs ("@GOT", file);
13280 case UNSPEC_GOTOFF:
13281 fputs ("@GOTOFF", file);
13283 case UNSPEC_PLTOFF:
13284 fputs ("@PLTOFF", file);
13287 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
13288 "(%rip)" : "[rip]", file);
13290 case UNSPEC_GOTPCREL:
13291 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
13292 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
13294 case UNSPEC_GOTTPOFF:
13295 /* FIXME: This might be @TPOFF in Sun ld too. */
13296 fputs ("@gottpoff", file);
13299 fputs ("@tpoff", file);
13301 case UNSPEC_NTPOFF:
13303 fputs ("@tpoff", file);
13305 fputs ("@ntpoff", file);
13307 case UNSPEC_DTPOFF:
13308 fputs ("@dtpoff", file);
13310 case UNSPEC_GOTNTPOFF:
13312 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
13313 "@gottpoff(%rip)": "@gottpoff[rip]", file);
13315 fputs ("@gotntpoff", file);
13317 case UNSPEC_INDNTPOFF:
13318 fputs ("@indntpoff", file);
13321 case UNSPEC_MACHOPIC_OFFSET:
13323 machopic_output_function_base_name (file);
13327 output_operand_lossage ("invalid UNSPEC as operand");
13333 output_operand_lossage ("invalid expression as operand");
13337 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
13338 We need to emit DTP-relative relocations. */
13340 static void ATTRIBUTE_UNUSED
13341 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
13343 fputs (ASM_LONG, file);
13344 output_addr_const (file, x);
13345 fputs ("@dtpoff", file);
13351 fputs (", 0", file);
13354 gcc_unreachable ();
13358 /* Return true if X is a representation of the PIC register. This copes
13359 with calls from ix86_find_base_term, where the register might have
13360 been replaced by a cselib value. */
13363 ix86_pic_register_p (rtx x)
13365 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
13366 return (pic_offset_table_rtx
13367 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
13369 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
13372 /* Helper function for ix86_delegitimize_address.
13373 Attempt to delegitimize TLS local-exec accesses. */
13376 ix86_delegitimize_tls_address (rtx orig_x)
13378 rtx x = orig_x, unspec;
13379 struct ix86_address addr;
13381 if (!TARGET_TLS_DIRECT_SEG_REFS)
13385 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
13387 if (ix86_decompose_address (x, &addr) == 0
13388 || addr.seg != (TARGET_64BIT ? SEG_FS : SEG_GS)
13389 || addr.disp == NULL_RTX
13390 || GET_CODE (addr.disp) != CONST)
13392 unspec = XEXP (addr.disp, 0);
13393 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
13394 unspec = XEXP (unspec, 0);
13395 if (GET_CODE (unspec) != UNSPEC || XINT (unspec, 1) != UNSPEC_NTPOFF)
13397 x = XVECEXP (unspec, 0, 0);
13398 gcc_assert (GET_CODE (x) == SYMBOL_REF);
13399 if (unspec != XEXP (addr.disp, 0))
13400 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.disp, 0), 1));
13403 rtx idx = addr.index;
13404 if (addr.scale != 1)
13405 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
13406 x = gen_rtx_PLUS (Pmode, idx, x);
13409 x = gen_rtx_PLUS (Pmode, addr.base, x);
13410 if (MEM_P (orig_x))
13411 x = replace_equiv_address_nv (orig_x, x);
13415 /* In the name of slightly smaller debug output, and to cater to
13416 general assembler lossage, recognize PIC+GOTOFF and turn it back
13417 into a direct symbol reference.
13419 On Darwin, this is necessary to avoid a crash, because Darwin
13420 has a different PIC label for each routine but the DWARF debugging
13421 information is not associated with any particular routine, so it's
13422 necessary to remove references to the PIC label from RTL stored by
13423 the DWARF output code. */
13426 ix86_delegitimize_address (rtx x)
13428 rtx orig_x = delegitimize_mem_from_attrs (x);
13429 /* addend is NULL or some rtx if x is something+GOTOFF where
13430 something doesn't include the PIC register. */
13431 rtx addend = NULL_RTX;
13432 /* reg_addend is NULL or a multiple of some register. */
13433 rtx reg_addend = NULL_RTX;
13434 /* const_addend is NULL or a const_int. */
13435 rtx const_addend = NULL_RTX;
13436 /* This is the result, or NULL. */
13437 rtx result = NULL_RTX;
13446 if (GET_CODE (x) != CONST
13447 || GET_CODE (XEXP (x, 0)) != UNSPEC
13448 || (XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
13449 && XINT (XEXP (x, 0), 1) != UNSPEC_PCREL)
13450 || !MEM_P (orig_x))
13451 return ix86_delegitimize_tls_address (orig_x);
13452 x = XVECEXP (XEXP (x, 0), 0, 0);
13453 if (GET_MODE (orig_x) != Pmode)
13455 x = simplify_gen_subreg (GET_MODE (orig_x), x, Pmode, 0);
13462 if (GET_CODE (x) != PLUS
13463 || GET_CODE (XEXP (x, 1)) != CONST)
13464 return ix86_delegitimize_tls_address (orig_x);
13466 if (ix86_pic_register_p (XEXP (x, 0)))
13467 /* %ebx + GOT/GOTOFF */
13469 else if (GET_CODE (XEXP (x, 0)) == PLUS)
13471 /* %ebx + %reg * scale + GOT/GOTOFF */
13472 reg_addend = XEXP (x, 0);
13473 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
13474 reg_addend = XEXP (reg_addend, 1);
13475 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
13476 reg_addend = XEXP (reg_addend, 0);
13479 reg_addend = NULL_RTX;
13480 addend = XEXP (x, 0);
13484 addend = XEXP (x, 0);
13486 x = XEXP (XEXP (x, 1), 0);
13487 if (GET_CODE (x) == PLUS
13488 && CONST_INT_P (XEXP (x, 1)))
13490 const_addend = XEXP (x, 1);
13494 if (GET_CODE (x) == UNSPEC
13495 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
13496 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
13497 result = XVECEXP (x, 0, 0);
13499 if (TARGET_MACHO && darwin_local_data_pic (x)
13500 && !MEM_P (orig_x))
13501 result = XVECEXP (x, 0, 0);
13504 return ix86_delegitimize_tls_address (orig_x);
13507 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
13509 result = gen_rtx_PLUS (Pmode, reg_addend, result);
13512 /* If the rest of original X doesn't involve the PIC register, add
13513 addend and subtract pic_offset_table_rtx. This can happen e.g.
13515 leal (%ebx, %ecx, 4), %ecx
13517 movl foo@GOTOFF(%ecx), %edx
13518 in which case we return (%ecx - %ebx) + foo. */
13519 if (pic_offset_table_rtx)
13520 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
13521 pic_offset_table_rtx),
13526 if (GET_MODE (orig_x) != Pmode && MEM_P (orig_x))
13528 result = simplify_gen_subreg (GET_MODE (orig_x), result, Pmode, 0);
13529 if (result == NULL_RTX)
13535 /* If X is a machine specific address (i.e. a symbol or label being
13536 referenced as a displacement from the GOT implemented using an
13537 UNSPEC), then return the base term. Otherwise return X. */
13540 ix86_find_base_term (rtx x)
13546 if (GET_CODE (x) != CONST)
13548 term = XEXP (x, 0);
13549 if (GET_CODE (term) == PLUS
13550 && (CONST_INT_P (XEXP (term, 1))
13551 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
13552 term = XEXP (term, 0);
13553 if (GET_CODE (term) != UNSPEC
13554 || (XINT (term, 1) != UNSPEC_GOTPCREL
13555 && XINT (term, 1) != UNSPEC_PCREL))
13558 return XVECEXP (term, 0, 0);
13561 return ix86_delegitimize_address (x);
13565 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
13566 int fp, FILE *file)
13568 const char *suffix;
13570 if (mode == CCFPmode || mode == CCFPUmode)
13572 code = ix86_fp_compare_code_to_integer (code);
13576 code = reverse_condition (code);
13627 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
13631 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
13632 Those same assemblers have the same but opposite lossage on cmov. */
13633 if (mode == CCmode)
13634 suffix = fp ? "nbe" : "a";
13635 else if (mode == CCCmode)
13638 gcc_unreachable ();
13654 gcc_unreachable ();
13658 gcc_assert (mode == CCmode || mode == CCCmode);
13675 gcc_unreachable ();
13679 /* ??? As above. */
13680 gcc_assert (mode == CCmode || mode == CCCmode);
13681 suffix = fp ? "nb" : "ae";
13684 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
13688 /* ??? As above. */
13689 if (mode == CCmode)
13691 else if (mode == CCCmode)
13692 suffix = fp ? "nb" : "ae";
13694 gcc_unreachable ();
13697 suffix = fp ? "u" : "p";
13700 suffix = fp ? "nu" : "np";
13703 gcc_unreachable ();
13705 fputs (suffix, file);
13708 /* Print the name of register X to FILE based on its machine mode and number.
13709 If CODE is 'w', pretend the mode is HImode.
13710 If CODE is 'b', pretend the mode is QImode.
13711 If CODE is 'k', pretend the mode is SImode.
13712 If CODE is 'q', pretend the mode is DImode.
13713 If CODE is 'x', pretend the mode is V4SFmode.
13714 If CODE is 't', pretend the mode is V8SFmode.
13715 If CODE is 'h', pretend the reg is the 'high' byte register.
13716 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
13717 If CODE is 'd', duplicate the operand for AVX instruction.
13721 print_reg (rtx x, int code, FILE *file)
13724 bool duplicated = code == 'd' && TARGET_AVX;
13726 gcc_assert (x == pc_rtx
13727 || (REGNO (x) != ARG_POINTER_REGNUM
13728 && REGNO (x) != FRAME_POINTER_REGNUM
13729 && REGNO (x) != FLAGS_REG
13730 && REGNO (x) != FPSR_REG
13731 && REGNO (x) != FPCR_REG));
13733 if (ASSEMBLER_DIALECT == ASM_ATT)
13738 gcc_assert (TARGET_64BIT);
13739 fputs ("rip", file);
13743 if (code == 'w' || MMX_REG_P (x))
13745 else if (code == 'b')
13747 else if (code == 'k')
13749 else if (code == 'q')
13751 else if (code == 'y')
13753 else if (code == 'h')
13755 else if (code == 'x')
13757 else if (code == 't')
13760 code = GET_MODE_SIZE (GET_MODE (x));
13762 /* Irritatingly, AMD extended registers use different naming convention
13763 from the normal registers. */
13764 if (REX_INT_REG_P (x))
13766 gcc_assert (TARGET_64BIT);
13770 error ("extended registers have no high halves");
13773 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
13776 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
13779 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
13782 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
13785 error ("unsupported operand size for extended register");
13795 if (STACK_TOP_P (x))
13804 if (! ANY_FP_REG_P (x))
13805 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
13810 reg = hi_reg_name[REGNO (x)];
13813 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
13815 reg = qi_reg_name[REGNO (x)];
13818 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
13820 reg = qi_high_reg_name[REGNO (x)];
13825 gcc_assert (!duplicated);
13827 fputs (hi_reg_name[REGNO (x)] + 1, file);
13832 gcc_unreachable ();
13838 if (ASSEMBLER_DIALECT == ASM_ATT)
13839 fprintf (file, ", %%%s", reg);
13841 fprintf (file, ", %s", reg);
13845 /* Locate some local-dynamic symbol still in use by this function
13846 so that we can print its name in some tls_local_dynamic_base
13850 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
13854 if (GET_CODE (x) == SYMBOL_REF
13855 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
13857 cfun->machine->some_ld_name = XSTR (x, 0);
13864 static const char *
13865 get_some_local_dynamic_name (void)
13869 if (cfun->machine->some_ld_name)
13870 return cfun->machine->some_ld_name;
13872 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
13873 if (NONDEBUG_INSN_P (insn)
13874 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
13875 return cfun->machine->some_ld_name;
13880 /* Meaning of CODE:
13881 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
13882 C -- print opcode suffix for set/cmov insn.
13883 c -- like C, but print reversed condition
13884 F,f -- likewise, but for floating-point.
13885 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
13887 R -- print the prefix for register names.
13888 z -- print the opcode suffix for the size of the current operand.
13889 Z -- likewise, with special suffixes for x87 instructions.
13890 * -- print a star (in certain assembler syntax)
13891 A -- print an absolute memory reference.
13892 w -- print the operand as if it's a "word" (HImode) even if it isn't.
13893 s -- print a shift double count, followed by the assemblers argument
13895 b -- print the QImode name of the register for the indicated operand.
13896 %b0 would print %al if operands[0] is reg 0.
13897 w -- likewise, print the HImode name of the register.
13898 k -- likewise, print the SImode name of the register.
13899 q -- likewise, print the DImode name of the register.
13900 x -- likewise, print the V4SFmode name of the register.
13901 t -- likewise, print the V8SFmode name of the register.
13902 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
13903 y -- print "st(0)" instead of "st" as a register.
13904 d -- print duplicated register operand for AVX instruction.
13905 D -- print condition for SSE cmp instruction.
13906 P -- if PIC, print an @PLT suffix.
13907 X -- don't print any sort of PIC '@' suffix for a symbol.
13908 & -- print some in-use local-dynamic symbol name.
13909 H -- print a memory address offset by 8; used for sse high-parts
13910 Y -- print condition for XOP pcom* instruction.
13911 + -- print a branch hint as 'cs' or 'ds' prefix
13912 ; -- print a semicolon (after prefixes due to bug in older gas).
13913 @ -- print a segment register of thread base pointer load
13917 ix86_print_operand (FILE *file, rtx x, int code)
13924 if (ASSEMBLER_DIALECT == ASM_ATT)
13930 const char *name = get_some_local_dynamic_name ();
13932 output_operand_lossage ("'%%&' used without any "
13933 "local dynamic TLS references");
13935 assemble_name (file, name);
13940 switch (ASSEMBLER_DIALECT)
13947 /* Intel syntax. For absolute addresses, registers should not
13948 be surrounded by braces. */
13952 ix86_print_operand (file, x, 0);
13959 gcc_unreachable ();
13962 ix86_print_operand (file, x, 0);
13967 if (ASSEMBLER_DIALECT == ASM_ATT)
13972 if (ASSEMBLER_DIALECT == ASM_ATT)
13977 if (ASSEMBLER_DIALECT == ASM_ATT)
13982 if (ASSEMBLER_DIALECT == ASM_ATT)
13987 if (ASSEMBLER_DIALECT == ASM_ATT)
13992 if (ASSEMBLER_DIALECT == ASM_ATT)
13997 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
13999 /* Opcodes don't get size suffixes if using Intel opcodes. */
14000 if (ASSEMBLER_DIALECT == ASM_INTEL)
14003 switch (GET_MODE_SIZE (GET_MODE (x)))
14022 output_operand_lossage
14023 ("invalid operand size for operand code '%c'", code);
14028 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
14030 (0, "non-integer operand used with operand code '%c'", code);
14034 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
14035 if (ASSEMBLER_DIALECT == ASM_INTEL)
14038 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
14040 switch (GET_MODE_SIZE (GET_MODE (x)))
14043 #ifdef HAVE_AS_IX86_FILDS
14053 #ifdef HAVE_AS_IX86_FILDQ
14056 fputs ("ll", file);
14064 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
14066 /* 387 opcodes don't get size suffixes
14067 if the operands are registers. */
14068 if (STACK_REG_P (x))
14071 switch (GET_MODE_SIZE (GET_MODE (x)))
14092 output_operand_lossage
14093 ("invalid operand type used with operand code '%c'", code);
14097 output_operand_lossage
14098 ("invalid operand size for operand code '%c'", code);
14115 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
14117 ix86_print_operand (file, x, 0);
14118 fputs (", ", file);
14123 /* Little bit of braindamage here. The SSE compare instructions
14124 does use completely different names for the comparisons that the
14125 fp conditional moves. */
14128 switch (GET_CODE (x))
14131 fputs ("eq", file);
14134 fputs ("eq_us", file);
14137 fputs ("lt", file);
14140 fputs ("nge", file);
14143 fputs ("le", file);
14146 fputs ("ngt", file);
14149 fputs ("unord", file);
14152 fputs ("neq", file);
14155 fputs ("neq_oq", file);
14158 fputs ("ge", file);
14161 fputs ("nlt", file);
14164 fputs ("gt", file);
14167 fputs ("nle", file);
14170 fputs ("ord", file);
14173 output_operand_lossage ("operand is not a condition code, "
14174 "invalid operand code 'D'");
14180 switch (GET_CODE (x))
14184 fputs ("eq", file);
14188 fputs ("lt", file);
14192 fputs ("le", file);
14195 fputs ("unord", file);
14199 fputs ("neq", file);
14203 fputs ("nlt", file);
14207 fputs ("nle", file);
14210 fputs ("ord", file);
14213 output_operand_lossage ("operand is not a condition code, "
14214 "invalid operand code 'D'");
14220 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14221 if (ASSEMBLER_DIALECT == ASM_ATT)
14223 switch (GET_MODE (x))
14225 case HImode: putc ('w', file); break;
14227 case SFmode: putc ('l', file); break;
14229 case DFmode: putc ('q', file); break;
14230 default: gcc_unreachable ();
14237 if (!COMPARISON_P (x))
14239 output_operand_lossage ("operand is neither a constant nor a "
14240 "condition code, invalid operand code "
14244 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
14247 if (!COMPARISON_P (x))
14249 output_operand_lossage ("operand is neither a constant nor a "
14250 "condition code, invalid operand code "
14254 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14255 if (ASSEMBLER_DIALECT == ASM_ATT)
14258 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
14261 /* Like above, but reverse condition */
14263 /* Check to see if argument to %c is really a constant
14264 and not a condition code which needs to be reversed. */
14265 if (!COMPARISON_P (x))
14267 output_operand_lossage ("operand is neither a constant nor a "
14268 "condition code, invalid operand "
14272 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
14275 if (!COMPARISON_P (x))
14277 output_operand_lossage ("operand is neither a constant nor a "
14278 "condition code, invalid operand "
14282 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14283 if (ASSEMBLER_DIALECT == ASM_ATT)
14286 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
14290 /* It doesn't actually matter what mode we use here, as we're
14291 only going to use this for printing. */
14292 x = adjust_address_nv (x, DImode, 8);
14300 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
14303 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
14306 int pred_val = INTVAL (XEXP (x, 0));
14308 if (pred_val < REG_BR_PROB_BASE * 45 / 100
14309 || pred_val > REG_BR_PROB_BASE * 55 / 100)
14311 int taken = pred_val > REG_BR_PROB_BASE / 2;
14312 int cputaken = final_forward_branch_p (current_output_insn) == 0;
14314 /* Emit hints only in the case default branch prediction
14315 heuristics would fail. */
14316 if (taken != cputaken)
14318 /* We use 3e (DS) prefix for taken branches and
14319 2e (CS) prefix for not taken branches. */
14321 fputs ("ds ; ", file);
14323 fputs ("cs ; ", file);
14331 switch (GET_CODE (x))
14334 fputs ("neq", file);
14337 fputs ("eq", file);
14341 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
14345 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
14349 fputs ("le", file);
14353 fputs ("lt", file);
14356 fputs ("unord", file);
14359 fputs ("ord", file);
14362 fputs ("ueq", file);
14365 fputs ("nlt", file);
14368 fputs ("nle", file);
14371 fputs ("ule", file);
14374 fputs ("ult", file);
14377 fputs ("une", file);
14380 output_operand_lossage ("operand is not a condition code, "
14381 "invalid operand code 'Y'");
14387 #ifndef HAVE_AS_IX86_REP_LOCK_PREFIX
14393 if (ASSEMBLER_DIALECT == ASM_ATT)
14396 /* The kernel uses a different segment register for performance
14397 reasons; a system call would not have to trash the userspace
14398 segment register, which would be expensive. */
14399 if (TARGET_64BIT && ix86_cmodel != CM_KERNEL)
14400 fputs ("fs", file);
14402 fputs ("gs", file);
14406 output_operand_lossage ("invalid operand code '%c'", code);
14411 print_reg (x, code, file);
14413 else if (MEM_P (x))
14415 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
14416 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
14417 && GET_MODE (x) != BLKmode)
14420 switch (GET_MODE_SIZE (GET_MODE (x)))
14422 case 1: size = "BYTE"; break;
14423 case 2: size = "WORD"; break;
14424 case 4: size = "DWORD"; break;
14425 case 8: size = "QWORD"; break;
14426 case 12: size = "TBYTE"; break;
14428 if (GET_MODE (x) == XFmode)
14433 case 32: size = "YMMWORD"; break;
14435 gcc_unreachable ();
14438 /* Check for explicit size override (codes 'b', 'w' and 'k') */
14441 else if (code == 'w')
14443 else if (code == 'k')
14446 fputs (size, file);
14447 fputs (" PTR ", file);
14451 /* Avoid (%rip) for call operands. */
14452 if (CONSTANT_ADDRESS_P (x) && code == 'P'
14453 && !CONST_INT_P (x))
14454 output_addr_const (file, x);
14455 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
14456 output_operand_lossage ("invalid constraints for operand");
14458 output_address (x);
14461 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
14466 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
14467 REAL_VALUE_TO_TARGET_SINGLE (r, l);
14469 if (ASSEMBLER_DIALECT == ASM_ATT)
14471 /* Sign extend 32bit SFmode immediate to 8 bytes. */
14473 fprintf (file, "0x%08llx", (unsigned long long) (int) l);
14475 fprintf (file, "0x%08x", (unsigned int) l);
14478 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
14483 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
14484 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
14486 if (ASSEMBLER_DIALECT == ASM_ATT)
14488 fprintf (file, "0x%lx%08lx", l[1] & 0xffffffff, l[0] & 0xffffffff);
14491 /* These float cases don't actually occur as immediate operands. */
14492 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == XFmode)
14496 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
14497 fputs (dstr, file);
14502 /* We have patterns that allow zero sets of memory, for instance.
14503 In 64-bit mode, we should probably support all 8-byte vectors,
14504 since we can in fact encode that into an immediate. */
14505 if (GET_CODE (x) == CONST_VECTOR)
14507 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
14513 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
14515 if (ASSEMBLER_DIALECT == ASM_ATT)
14518 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
14519 || GET_CODE (x) == LABEL_REF)
14521 if (ASSEMBLER_DIALECT == ASM_ATT)
14524 fputs ("OFFSET FLAT:", file);
14527 if (CONST_INT_P (x))
14528 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
14529 else if (flag_pic || MACHOPIC_INDIRECT)
14530 output_pic_addr_const (file, x, code);
14532 output_addr_const (file, x);
14537 ix86_print_operand_punct_valid_p (unsigned char code)
14539 return (code == '@' || code == '*' || code == '+'
14540 || code == '&' || code == ';');
14543 /* Print a memory operand whose address is ADDR. */
14546 ix86_print_operand_address (FILE *file, rtx addr)
14548 struct ix86_address parts;
14549 rtx base, index, disp;
14551 int ok = ix86_decompose_address (addr, &parts);
14556 index = parts.index;
14558 scale = parts.scale;
14566 if (ASSEMBLER_DIALECT == ASM_ATT)
14568 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
14571 gcc_unreachable ();
14574 /* Use one byte shorter RIP relative addressing for 64bit mode. */
14575 if (TARGET_64BIT && !base && !index)
14579 if (GET_CODE (disp) == CONST
14580 && GET_CODE (XEXP (disp, 0)) == PLUS
14581 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
14582 symbol = XEXP (XEXP (disp, 0), 0);
14584 if (GET_CODE (symbol) == LABEL_REF
14585 || (GET_CODE (symbol) == SYMBOL_REF
14586 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
14589 if (!base && !index)
14591 /* Displacement only requires special attention. */
14593 if (CONST_INT_P (disp))
14595 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
14596 fputs ("ds:", file);
14597 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
14600 output_pic_addr_const (file, disp, 0);
14602 output_addr_const (file, disp);
14606 if (ASSEMBLER_DIALECT == ASM_ATT)
14611 output_pic_addr_const (file, disp, 0);
14612 else if (GET_CODE (disp) == LABEL_REF)
14613 output_asm_label (disp);
14615 output_addr_const (file, disp);
14620 print_reg (base, 0, file);
14624 print_reg (index, 0, file);
14626 fprintf (file, ",%d", scale);
14632 rtx offset = NULL_RTX;
14636 /* Pull out the offset of a symbol; print any symbol itself. */
14637 if (GET_CODE (disp) == CONST
14638 && GET_CODE (XEXP (disp, 0)) == PLUS
14639 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
14641 offset = XEXP (XEXP (disp, 0), 1);
14642 disp = gen_rtx_CONST (VOIDmode,
14643 XEXP (XEXP (disp, 0), 0));
14647 output_pic_addr_const (file, disp, 0);
14648 else if (GET_CODE (disp) == LABEL_REF)
14649 output_asm_label (disp);
14650 else if (CONST_INT_P (disp))
14653 output_addr_const (file, disp);
14659 print_reg (base, 0, file);
14662 if (INTVAL (offset) >= 0)
14664 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
14668 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
14675 print_reg (index, 0, file);
14677 fprintf (file, "*%d", scale);
14684 /* Implementation of TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
14687 i386_asm_output_addr_const_extra (FILE *file, rtx x)
14691 if (GET_CODE (x) != UNSPEC)
14694 op = XVECEXP (x, 0, 0);
14695 switch (XINT (x, 1))
14697 case UNSPEC_GOTTPOFF:
14698 output_addr_const (file, op);
14699 /* FIXME: This might be @TPOFF in Sun ld. */
14700 fputs ("@gottpoff", file);
14703 output_addr_const (file, op);
14704 fputs ("@tpoff", file);
14706 case UNSPEC_NTPOFF:
14707 output_addr_const (file, op);
14709 fputs ("@tpoff", file);
14711 fputs ("@ntpoff", file);
14713 case UNSPEC_DTPOFF:
14714 output_addr_const (file, op);
14715 fputs ("@dtpoff", file);
14717 case UNSPEC_GOTNTPOFF:
14718 output_addr_const (file, op);
14720 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
14721 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
14723 fputs ("@gotntpoff", file);
14725 case UNSPEC_INDNTPOFF:
14726 output_addr_const (file, op);
14727 fputs ("@indntpoff", file);
14730 case UNSPEC_MACHOPIC_OFFSET:
14731 output_addr_const (file, op);
14733 machopic_output_function_base_name (file);
14737 case UNSPEC_STACK_CHECK:
14741 gcc_assert (flag_split_stack);
14743 #ifdef TARGET_THREAD_SPLIT_STACK_OFFSET
14744 offset = TARGET_THREAD_SPLIT_STACK_OFFSET;
14746 gcc_unreachable ();
14749 fprintf (file, "%s:%d", TARGET_64BIT ? "%fs" : "%gs", offset);
14760 /* Split one or more double-mode RTL references into pairs of half-mode
14761 references. The RTL can be REG, offsettable MEM, integer constant, or
14762 CONST_DOUBLE. "operands" is a pointer to an array of double-mode RTLs to
14763 split and "num" is its length. lo_half and hi_half are output arrays
14764 that parallel "operands". */
14767 split_double_mode (enum machine_mode mode, rtx operands[],
14768 int num, rtx lo_half[], rtx hi_half[])
14770 enum machine_mode half_mode;
14776 half_mode = DImode;
14779 half_mode = SImode;
14782 gcc_unreachable ();
14785 byte = GET_MODE_SIZE (half_mode);
14789 rtx op = operands[num];
14791 /* simplify_subreg refuse to split volatile memory addresses,
14792 but we still have to handle it. */
14795 lo_half[num] = adjust_address (op, half_mode, 0);
14796 hi_half[num] = adjust_address (op, half_mode, byte);
14800 lo_half[num] = simplify_gen_subreg (half_mode, op,
14801 GET_MODE (op) == VOIDmode
14802 ? mode : GET_MODE (op), 0);
14803 hi_half[num] = simplify_gen_subreg (half_mode, op,
14804 GET_MODE (op) == VOIDmode
14805 ? mode : GET_MODE (op), byte);
14810 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
14811 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
14812 is the expression of the binary operation. The output may either be
14813 emitted here, or returned to the caller, like all output_* functions.
14815 There is no guarantee that the operands are the same mode, as they
14816 might be within FLOAT or FLOAT_EXTEND expressions. */
14818 #ifndef SYSV386_COMPAT
14819 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
14820 wants to fix the assemblers because that causes incompatibility
14821 with gcc. No-one wants to fix gcc because that causes
14822 incompatibility with assemblers... You can use the option of
14823 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
14824 #define SYSV386_COMPAT 1
14828 output_387_binary_op (rtx insn, rtx *operands)
14830 static char buf[40];
14833 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
14835 #ifdef ENABLE_CHECKING
14836 /* Even if we do not want to check the inputs, this documents input
14837 constraints. Which helps in understanding the following code. */
14838 if (STACK_REG_P (operands[0])
14839 && ((REG_P (operands[1])
14840 && REGNO (operands[0]) == REGNO (operands[1])
14841 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
14842 || (REG_P (operands[2])
14843 && REGNO (operands[0]) == REGNO (operands[2])
14844 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
14845 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
14848 gcc_assert (is_sse);
14851 switch (GET_CODE (operands[3]))
14854 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14855 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14863 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14864 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14872 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14873 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14881 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14882 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14890 gcc_unreachable ();
14897 strcpy (buf, ssep);
14898 if (GET_MODE (operands[0]) == SFmode)
14899 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
14901 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
14905 strcpy (buf, ssep + 1);
14906 if (GET_MODE (operands[0]) == SFmode)
14907 strcat (buf, "ss\t{%2, %0|%0, %2}");
14909 strcat (buf, "sd\t{%2, %0|%0, %2}");
14915 switch (GET_CODE (operands[3]))
14919 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
14921 rtx temp = operands[2];
14922 operands[2] = operands[1];
14923 operands[1] = temp;
14926 /* know operands[0] == operands[1]. */
14928 if (MEM_P (operands[2]))
14934 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
14936 if (STACK_TOP_P (operands[0]))
14937 /* How is it that we are storing to a dead operand[2]?
14938 Well, presumably operands[1] is dead too. We can't
14939 store the result to st(0) as st(0) gets popped on this
14940 instruction. Instead store to operands[2] (which I
14941 think has to be st(1)). st(1) will be popped later.
14942 gcc <= 2.8.1 didn't have this check and generated
14943 assembly code that the Unixware assembler rejected. */
14944 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
14946 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
14950 if (STACK_TOP_P (operands[0]))
14951 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
14953 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
14958 if (MEM_P (operands[1]))
14964 if (MEM_P (operands[2]))
14970 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
14973 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
14974 derived assemblers, confusingly reverse the direction of
14975 the operation for fsub{r} and fdiv{r} when the
14976 destination register is not st(0). The Intel assembler
14977 doesn't have this brain damage. Read !SYSV386_COMPAT to
14978 figure out what the hardware really does. */
14979 if (STACK_TOP_P (operands[0]))
14980 p = "{p\t%0, %2|rp\t%2, %0}";
14982 p = "{rp\t%2, %0|p\t%0, %2}";
14984 if (STACK_TOP_P (operands[0]))
14985 /* As above for fmul/fadd, we can't store to st(0). */
14986 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
14988 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
14993 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
14996 if (STACK_TOP_P (operands[0]))
14997 p = "{rp\t%0, %1|p\t%1, %0}";
14999 p = "{p\t%1, %0|rp\t%0, %1}";
15001 if (STACK_TOP_P (operands[0]))
15002 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
15004 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
15009 if (STACK_TOP_P (operands[0]))
15011 if (STACK_TOP_P (operands[1]))
15012 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
15014 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
15017 else if (STACK_TOP_P (operands[1]))
15020 p = "{\t%1, %0|r\t%0, %1}";
15022 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
15028 p = "{r\t%2, %0|\t%0, %2}";
15030 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
15036 gcc_unreachable ();
15043 /* Return needed mode for entity in optimize_mode_switching pass. */
15046 ix86_mode_needed (int entity, rtx insn)
15048 enum attr_i387_cw mode;
15050 /* The mode UNINITIALIZED is used to store control word after a
15051 function call or ASM pattern. The mode ANY specify that function
15052 has no requirements on the control word and make no changes in the
15053 bits we are interested in. */
15056 || (NONJUMP_INSN_P (insn)
15057 && (asm_noperands (PATTERN (insn)) >= 0
15058 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
15059 return I387_CW_UNINITIALIZED;
15061 if (recog_memoized (insn) < 0)
15062 return I387_CW_ANY;
15064 mode = get_attr_i387_cw (insn);
15069 if (mode == I387_CW_TRUNC)
15074 if (mode == I387_CW_FLOOR)
15079 if (mode == I387_CW_CEIL)
15084 if (mode == I387_CW_MASK_PM)
15089 gcc_unreachable ();
15092 return I387_CW_ANY;
15095 /* Output code to initialize control word copies used by trunc?f?i and
15096 rounding patterns. CURRENT_MODE is set to current control word,
15097 while NEW_MODE is set to new control word. */
15100 emit_i387_cw_initialization (int mode)
15102 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
15105 enum ix86_stack_slot slot;
15107 rtx reg = gen_reg_rtx (HImode);
15109 emit_insn (gen_x86_fnstcw_1 (stored_mode));
15110 emit_move_insn (reg, copy_rtx (stored_mode));
15112 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
15113 || optimize_function_for_size_p (cfun))
15117 case I387_CW_TRUNC:
15118 /* round toward zero (truncate) */
15119 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
15120 slot = SLOT_CW_TRUNC;
15123 case I387_CW_FLOOR:
15124 /* round down toward -oo */
15125 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
15126 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
15127 slot = SLOT_CW_FLOOR;
15131 /* round up toward +oo */
15132 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
15133 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
15134 slot = SLOT_CW_CEIL;
15137 case I387_CW_MASK_PM:
15138 /* mask precision exception for nearbyint() */
15139 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
15140 slot = SLOT_CW_MASK_PM;
15144 gcc_unreachable ();
15151 case I387_CW_TRUNC:
15152 /* round toward zero (truncate) */
15153 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
15154 slot = SLOT_CW_TRUNC;
15157 case I387_CW_FLOOR:
15158 /* round down toward -oo */
15159 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
15160 slot = SLOT_CW_FLOOR;
15164 /* round up toward +oo */
15165 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
15166 slot = SLOT_CW_CEIL;
15169 case I387_CW_MASK_PM:
15170 /* mask precision exception for nearbyint() */
15171 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
15172 slot = SLOT_CW_MASK_PM;
15176 gcc_unreachable ();
15180 gcc_assert (slot < MAX_386_STACK_LOCALS);
15182 new_mode = assign_386_stack_local (HImode, slot);
15183 emit_move_insn (new_mode, reg);
15186 /* Output code for INSN to convert a float to a signed int. OPERANDS
15187 are the insn operands. The output may be [HSD]Imode and the input
15188 operand may be [SDX]Fmode. */
15191 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
15193 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
15194 int dimode_p = GET_MODE (operands[0]) == DImode;
15195 int round_mode = get_attr_i387_cw (insn);
15197 /* Jump through a hoop or two for DImode, since the hardware has no
15198 non-popping instruction. We used to do this a different way, but
15199 that was somewhat fragile and broke with post-reload splitters. */
15200 if ((dimode_p || fisttp) && !stack_top_dies)
15201 output_asm_insn ("fld\t%y1", operands);
15203 gcc_assert (STACK_TOP_P (operands[1]));
15204 gcc_assert (MEM_P (operands[0]));
15205 gcc_assert (GET_MODE (operands[1]) != TFmode);
15208 output_asm_insn ("fisttp%Z0\t%0", operands);
15211 if (round_mode != I387_CW_ANY)
15212 output_asm_insn ("fldcw\t%3", operands);
15213 if (stack_top_dies || dimode_p)
15214 output_asm_insn ("fistp%Z0\t%0", operands);
15216 output_asm_insn ("fist%Z0\t%0", operands);
15217 if (round_mode != I387_CW_ANY)
15218 output_asm_insn ("fldcw\t%2", operands);
15224 /* Output code for x87 ffreep insn. The OPNO argument, which may only
15225 have the values zero or one, indicates the ffreep insn's operand
15226 from the OPERANDS array. */
15228 static const char *
15229 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
15231 if (TARGET_USE_FFREEP)
15232 #ifdef HAVE_AS_IX86_FFREEP
15233 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
15236 static char retval[32];
15237 int regno = REGNO (operands[opno]);
15239 gcc_assert (FP_REGNO_P (regno));
15241 regno -= FIRST_STACK_REG;
15243 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
15248 return opno ? "fstp\t%y1" : "fstp\t%y0";
15252 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
15253 should be used. UNORDERED_P is true when fucom should be used. */
15256 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
15258 int stack_top_dies;
15259 rtx cmp_op0, cmp_op1;
15260 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
15264 cmp_op0 = operands[0];
15265 cmp_op1 = operands[1];
15269 cmp_op0 = operands[1];
15270 cmp_op1 = operands[2];
15275 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
15276 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
15277 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
15278 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
15280 if (GET_MODE (operands[0]) == SFmode)
15282 return &ucomiss[TARGET_AVX ? 0 : 1];
15284 return &comiss[TARGET_AVX ? 0 : 1];
15287 return &ucomisd[TARGET_AVX ? 0 : 1];
15289 return &comisd[TARGET_AVX ? 0 : 1];
15292 gcc_assert (STACK_TOP_P (cmp_op0));
15294 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
15296 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
15298 if (stack_top_dies)
15300 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
15301 return output_387_ffreep (operands, 1);
15304 return "ftst\n\tfnstsw\t%0";
15307 if (STACK_REG_P (cmp_op1)
15309 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
15310 && REGNO (cmp_op1) != FIRST_STACK_REG)
15312 /* If both the top of the 387 stack dies, and the other operand
15313 is also a stack register that dies, then this must be a
15314 `fcompp' float compare */
15318 /* There is no double popping fcomi variant. Fortunately,
15319 eflags is immune from the fstp's cc clobbering. */
15321 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
15323 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
15324 return output_387_ffreep (operands, 0);
15329 return "fucompp\n\tfnstsw\t%0";
15331 return "fcompp\n\tfnstsw\t%0";
15336 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
15338 static const char * const alt[16] =
15340 "fcom%Z2\t%y2\n\tfnstsw\t%0",
15341 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
15342 "fucom%Z2\t%y2\n\tfnstsw\t%0",
15343 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
15345 "ficom%Z2\t%y2\n\tfnstsw\t%0",
15346 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
15350 "fcomi\t{%y1, %0|%0, %y1}",
15351 "fcomip\t{%y1, %0|%0, %y1}",
15352 "fucomi\t{%y1, %0|%0, %y1}",
15353 "fucomip\t{%y1, %0|%0, %y1}",
15364 mask = eflags_p << 3;
15365 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
15366 mask |= unordered_p << 1;
15367 mask |= stack_top_dies;
15369 gcc_assert (mask < 16);
15378 ix86_output_addr_vec_elt (FILE *file, int value)
15380 const char *directive = ASM_LONG;
15384 directive = ASM_QUAD;
15386 gcc_assert (!TARGET_64BIT);
15389 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
15393 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
15395 const char *directive = ASM_LONG;
15398 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
15399 directive = ASM_QUAD;
15401 gcc_assert (!TARGET_64BIT);
15403 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
15404 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
15405 fprintf (file, "%s%s%d-%s%d\n",
15406 directive, LPREFIX, value, LPREFIX, rel);
15407 else if (HAVE_AS_GOTOFF_IN_DATA)
15408 fprintf (file, ASM_LONG "%s%d@GOTOFF\n", LPREFIX, value);
15410 else if (TARGET_MACHO)
15412 fprintf (file, ASM_LONG "%s%d-", LPREFIX, value);
15413 machopic_output_function_base_name (file);
15418 asm_fprintf (file, ASM_LONG "%U%s+[.-%s%d]\n",
15419 GOT_SYMBOL_NAME, LPREFIX, value);
15422 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
15426 ix86_expand_clear (rtx dest)
15430 /* We play register width games, which are only valid after reload. */
15431 gcc_assert (reload_completed);
15433 /* Avoid HImode and its attendant prefix byte. */
15434 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
15435 dest = gen_rtx_REG (SImode, REGNO (dest));
15436 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
15438 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
15439 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
15441 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
15442 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
15448 /* X is an unchanging MEM. If it is a constant pool reference, return
15449 the constant pool rtx, else NULL. */
15452 maybe_get_pool_constant (rtx x)
15454 x = ix86_delegitimize_address (XEXP (x, 0));
15456 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
15457 return get_pool_constant (x);
15463 ix86_expand_move (enum machine_mode mode, rtx operands[])
15466 enum tls_model model;
15471 if (GET_CODE (op1) == SYMBOL_REF)
15473 model = SYMBOL_REF_TLS_MODEL (op1);
15476 op1 = legitimize_tls_address (op1, model, true);
15477 op1 = force_operand (op1, op0);
15481 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
15482 && SYMBOL_REF_DLLIMPORT_P (op1))
15483 op1 = legitimize_dllimport_symbol (op1, false);
15485 else if (GET_CODE (op1) == CONST
15486 && GET_CODE (XEXP (op1, 0)) == PLUS
15487 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
15489 rtx addend = XEXP (XEXP (op1, 0), 1);
15490 rtx symbol = XEXP (XEXP (op1, 0), 0);
15493 model = SYMBOL_REF_TLS_MODEL (symbol);
15495 tmp = legitimize_tls_address (symbol, model, true);
15496 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
15497 && SYMBOL_REF_DLLIMPORT_P (symbol))
15498 tmp = legitimize_dllimport_symbol (symbol, true);
15502 tmp = force_operand (tmp, NULL);
15503 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
15504 op0, 1, OPTAB_DIRECT);
15510 if ((flag_pic || MACHOPIC_INDIRECT)
15511 && mode == Pmode && symbolic_operand (op1, Pmode))
15513 if (TARGET_MACHO && !TARGET_64BIT)
15516 /* dynamic-no-pic */
15517 if (MACHOPIC_INDIRECT)
15519 rtx temp = ((reload_in_progress
15520 || ((op0 && REG_P (op0))
15522 ? op0 : gen_reg_rtx (Pmode));
15523 op1 = machopic_indirect_data_reference (op1, temp);
15525 op1 = machopic_legitimize_pic_address (op1, mode,
15526 temp == op1 ? 0 : temp);
15528 if (op0 != op1 && GET_CODE (op0) != MEM)
15530 rtx insn = gen_rtx_SET (VOIDmode, op0, op1);
15534 if (GET_CODE (op0) == MEM)
15535 op1 = force_reg (Pmode, op1);
15539 if (GET_CODE (temp) != REG)
15540 temp = gen_reg_rtx (Pmode);
15541 temp = legitimize_pic_address (op1, temp);
15546 /* dynamic-no-pic */
15552 op1 = force_reg (Pmode, op1);
15553 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
15555 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
15556 op1 = legitimize_pic_address (op1, reg);
15565 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
15566 || !push_operand (op0, mode))
15568 op1 = force_reg (mode, op1);
15570 if (push_operand (op0, mode)
15571 && ! general_no_elim_operand (op1, mode))
15572 op1 = copy_to_mode_reg (mode, op1);
15574 /* Force large constants in 64bit compilation into register
15575 to get them CSEed. */
15576 if (can_create_pseudo_p ()
15577 && (mode == DImode) && TARGET_64BIT
15578 && immediate_operand (op1, mode)
15579 && !x86_64_zext_immediate_operand (op1, VOIDmode)
15580 && !register_operand (op0, mode)
15582 op1 = copy_to_mode_reg (mode, op1);
15584 if (can_create_pseudo_p ()
15585 && FLOAT_MODE_P (mode)
15586 && GET_CODE (op1) == CONST_DOUBLE)
15588 /* If we are loading a floating point constant to a register,
15589 force the value to memory now, since we'll get better code
15590 out the back end. */
15592 op1 = validize_mem (force_const_mem (mode, op1));
15593 if (!register_operand (op0, mode))
15595 rtx temp = gen_reg_rtx (mode);
15596 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
15597 emit_move_insn (op0, temp);
15603 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
15607 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
15609 rtx op0 = operands[0], op1 = operands[1];
15610 unsigned int align = GET_MODE_ALIGNMENT (mode);
15612 /* Force constants other than zero into memory. We do not know how
15613 the instructions used to build constants modify the upper 64 bits
15614 of the register, once we have that information we may be able
15615 to handle some of them more efficiently. */
15616 if (can_create_pseudo_p ()
15617 && register_operand (op0, mode)
15618 && (CONSTANT_P (op1)
15619 || (GET_CODE (op1) == SUBREG
15620 && CONSTANT_P (SUBREG_REG (op1))))
15621 && !standard_sse_constant_p (op1))
15622 op1 = validize_mem (force_const_mem (mode, op1));
15624 /* We need to check memory alignment for SSE mode since attribute
15625 can make operands unaligned. */
15626 if (can_create_pseudo_p ()
15627 && SSE_REG_MODE_P (mode)
15628 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
15629 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
15633 /* ix86_expand_vector_move_misalign() does not like constants ... */
15634 if (CONSTANT_P (op1)
15635 || (GET_CODE (op1) == SUBREG
15636 && CONSTANT_P (SUBREG_REG (op1))))
15637 op1 = validize_mem (force_const_mem (mode, op1));
15639 /* ... nor both arguments in memory. */
15640 if (!register_operand (op0, mode)
15641 && !register_operand (op1, mode))
15642 op1 = force_reg (mode, op1);
15644 tmp[0] = op0; tmp[1] = op1;
15645 ix86_expand_vector_move_misalign (mode, tmp);
15649 /* Make operand1 a register if it isn't already. */
15650 if (can_create_pseudo_p ()
15651 && !register_operand (op0, mode)
15652 && !register_operand (op1, mode))
15654 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
15658 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
15661 /* Split 32-byte AVX unaligned load and store if needed. */
15664 ix86_avx256_split_vector_move_misalign (rtx op0, rtx op1)
15667 rtx (*extract) (rtx, rtx, rtx);
15668 rtx (*move_unaligned) (rtx, rtx);
15669 enum machine_mode mode;
15671 switch (GET_MODE (op0))
15674 gcc_unreachable ();
15676 extract = gen_avx_vextractf128v32qi;
15677 move_unaligned = gen_avx_movdqu256;
15681 extract = gen_avx_vextractf128v8sf;
15682 move_unaligned = gen_avx_movups256;
15686 extract = gen_avx_vextractf128v4df;
15687 move_unaligned = gen_avx_movupd256;
15692 if (MEM_P (op1) && TARGET_AVX256_SPLIT_UNALIGNED_LOAD)
15694 rtx r = gen_reg_rtx (mode);
15695 m = adjust_address (op1, mode, 0);
15696 emit_move_insn (r, m);
15697 m = adjust_address (op1, mode, 16);
15698 r = gen_rtx_VEC_CONCAT (GET_MODE (op0), r, m);
15699 emit_move_insn (op0, r);
15701 else if (MEM_P (op0) && TARGET_AVX256_SPLIT_UNALIGNED_STORE)
15703 m = adjust_address (op0, mode, 0);
15704 emit_insn (extract (m, op1, const0_rtx));
15705 m = adjust_address (op0, mode, 16);
15706 emit_insn (extract (m, op1, const1_rtx));
15709 emit_insn (move_unaligned (op0, op1));
15712 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
15713 straight to ix86_expand_vector_move. */
15714 /* Code generation for scalar reg-reg moves of single and double precision data:
15715 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
15719 if (x86_sse_partial_reg_dependency == true)
15724 Code generation for scalar loads of double precision data:
15725 if (x86_sse_split_regs == true)
15726 movlpd mem, reg (gas syntax)
15730 Code generation for unaligned packed loads of single precision data
15731 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
15732 if (x86_sse_unaligned_move_optimal)
15735 if (x86_sse_partial_reg_dependency == true)
15747 Code generation for unaligned packed loads of double precision data
15748 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
15749 if (x86_sse_unaligned_move_optimal)
15752 if (x86_sse_split_regs == true)
15765 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
15774 switch (GET_MODE_CLASS (mode))
15776 case MODE_VECTOR_INT:
15778 switch (GET_MODE_SIZE (mode))
15781 /* If we're optimizing for size, movups is the smallest. */
15782 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15784 op0 = gen_lowpart (V4SFmode, op0);
15785 op1 = gen_lowpart (V4SFmode, op1);
15786 emit_insn (gen_avx_movups (op0, op1));
15789 op0 = gen_lowpart (V16QImode, op0);
15790 op1 = gen_lowpart (V16QImode, op1);
15791 emit_insn (gen_avx_movdqu (op0, op1));
15794 op0 = gen_lowpart (V32QImode, op0);
15795 op1 = gen_lowpart (V32QImode, op1);
15796 ix86_avx256_split_vector_move_misalign (op0, op1);
15799 gcc_unreachable ();
15802 case MODE_VECTOR_FLOAT:
15803 op0 = gen_lowpart (mode, op0);
15804 op1 = gen_lowpart (mode, op1);
15809 emit_insn (gen_avx_movups (op0, op1));
15812 ix86_avx256_split_vector_move_misalign (op0, op1);
15815 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15817 op0 = gen_lowpart (V4SFmode, op0);
15818 op1 = gen_lowpart (V4SFmode, op1);
15819 emit_insn (gen_avx_movups (op0, op1));
15822 emit_insn (gen_avx_movupd (op0, op1));
15825 ix86_avx256_split_vector_move_misalign (op0, op1);
15828 gcc_unreachable ();
15833 gcc_unreachable ();
15841 /* If we're optimizing for size, movups is the smallest. */
15842 if (optimize_insn_for_size_p ()
15843 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15845 op0 = gen_lowpart (V4SFmode, op0);
15846 op1 = gen_lowpart (V4SFmode, op1);
15847 emit_insn (gen_sse_movups (op0, op1));
15851 /* ??? If we have typed data, then it would appear that using
15852 movdqu is the only way to get unaligned data loaded with
15854 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
15856 op0 = gen_lowpart (V16QImode, op0);
15857 op1 = gen_lowpart (V16QImode, op1);
15858 emit_insn (gen_sse2_movdqu (op0, op1));
15862 if (TARGET_SSE2 && mode == V2DFmode)
15866 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
15868 op0 = gen_lowpart (V2DFmode, op0);
15869 op1 = gen_lowpart (V2DFmode, op1);
15870 emit_insn (gen_sse2_movupd (op0, op1));
15874 /* When SSE registers are split into halves, we can avoid
15875 writing to the top half twice. */
15876 if (TARGET_SSE_SPLIT_REGS)
15878 emit_clobber (op0);
15883 /* ??? Not sure about the best option for the Intel chips.
15884 The following would seem to satisfy; the register is
15885 entirely cleared, breaking the dependency chain. We
15886 then store to the upper half, with a dependency depth
15887 of one. A rumor has it that Intel recommends two movsd
15888 followed by an unpacklpd, but this is unconfirmed. And
15889 given that the dependency depth of the unpacklpd would
15890 still be one, I'm not sure why this would be better. */
15891 zero = CONST0_RTX (V2DFmode);
15894 m = adjust_address (op1, DFmode, 0);
15895 emit_insn (gen_sse2_loadlpd (op0, zero, m));
15896 m = adjust_address (op1, DFmode, 8);
15897 emit_insn (gen_sse2_loadhpd (op0, op0, m));
15901 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
15903 op0 = gen_lowpart (V4SFmode, op0);
15904 op1 = gen_lowpart (V4SFmode, op1);
15905 emit_insn (gen_sse_movups (op0, op1));
15909 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
15910 emit_move_insn (op0, CONST0_RTX (mode));
15912 emit_clobber (op0);
15914 if (mode != V4SFmode)
15915 op0 = gen_lowpart (V4SFmode, op0);
15916 m = adjust_address (op1, V2SFmode, 0);
15917 emit_insn (gen_sse_loadlps (op0, op0, m));
15918 m = adjust_address (op1, V2SFmode, 8);
15919 emit_insn (gen_sse_loadhps (op0, op0, m));
15922 else if (MEM_P (op0))
15924 /* If we're optimizing for size, movups is the smallest. */
15925 if (optimize_insn_for_size_p ()
15926 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15928 op0 = gen_lowpart (V4SFmode, op0);
15929 op1 = gen_lowpart (V4SFmode, op1);
15930 emit_insn (gen_sse_movups (op0, op1));
15934 /* ??? Similar to above, only less clear because of quote
15935 typeless stores unquote. */
15936 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
15937 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
15939 op0 = gen_lowpart (V16QImode, op0);
15940 op1 = gen_lowpart (V16QImode, op1);
15941 emit_insn (gen_sse2_movdqu (op0, op1));
15945 if (TARGET_SSE2 && mode == V2DFmode)
15947 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
15949 op0 = gen_lowpart (V2DFmode, op0);
15950 op1 = gen_lowpart (V2DFmode, op1);
15951 emit_insn (gen_sse2_movupd (op0, op1));
15955 m = adjust_address (op0, DFmode, 0);
15956 emit_insn (gen_sse2_storelpd (m, op1));
15957 m = adjust_address (op0, DFmode, 8);
15958 emit_insn (gen_sse2_storehpd (m, op1));
15963 if (mode != V4SFmode)
15964 op1 = gen_lowpart (V4SFmode, op1);
15966 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
15968 op0 = gen_lowpart (V4SFmode, op0);
15969 emit_insn (gen_sse_movups (op0, op1));
15973 m = adjust_address (op0, V2SFmode, 0);
15974 emit_insn (gen_sse_storelps (m, op1));
15975 m = adjust_address (op0, V2SFmode, 8);
15976 emit_insn (gen_sse_storehps (m, op1));
15981 gcc_unreachable ();
15984 /* Expand a push in MODE. This is some mode for which we do not support
15985 proper push instructions, at least from the registers that we expect
15986 the value to live in. */
15989 ix86_expand_push (enum machine_mode mode, rtx x)
15993 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
15994 GEN_INT (-GET_MODE_SIZE (mode)),
15995 stack_pointer_rtx, 1, OPTAB_DIRECT);
15996 if (tmp != stack_pointer_rtx)
15997 emit_move_insn (stack_pointer_rtx, tmp);
15999 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
16001 /* When we push an operand onto stack, it has to be aligned at least
16002 at the function argument boundary. However since we don't have
16003 the argument type, we can't determine the actual argument
16005 emit_move_insn (tmp, x);
16008 /* Helper function of ix86_fixup_binary_operands to canonicalize
16009 operand order. Returns true if the operands should be swapped. */
16012 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
16015 rtx dst = operands[0];
16016 rtx src1 = operands[1];
16017 rtx src2 = operands[2];
16019 /* If the operation is not commutative, we can't do anything. */
16020 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
16023 /* Highest priority is that src1 should match dst. */
16024 if (rtx_equal_p (dst, src1))
16026 if (rtx_equal_p (dst, src2))
16029 /* Next highest priority is that immediate constants come second. */
16030 if (immediate_operand (src2, mode))
16032 if (immediate_operand (src1, mode))
16035 /* Lowest priority is that memory references should come second. */
16045 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
16046 destination to use for the operation. If different from the true
16047 destination in operands[0], a copy operation will be required. */
16050 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
16053 rtx dst = operands[0];
16054 rtx src1 = operands[1];
16055 rtx src2 = operands[2];
16057 /* Canonicalize operand order. */
16058 if (ix86_swap_binary_operands_p (code, mode, operands))
16062 /* It is invalid to swap operands of different modes. */
16063 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
16070 /* Both source operands cannot be in memory. */
16071 if (MEM_P (src1) && MEM_P (src2))
16073 /* Optimization: Only read from memory once. */
16074 if (rtx_equal_p (src1, src2))
16076 src2 = force_reg (mode, src2);
16080 src2 = force_reg (mode, src2);
16083 /* If the destination is memory, and we do not have matching source
16084 operands, do things in registers. */
16085 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
16086 dst = gen_reg_rtx (mode);
16088 /* Source 1 cannot be a constant. */
16089 if (CONSTANT_P (src1))
16090 src1 = force_reg (mode, src1);
16092 /* Source 1 cannot be a non-matching memory. */
16093 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
16094 src1 = force_reg (mode, src1);
16096 operands[1] = src1;
16097 operands[2] = src2;
16101 /* Similarly, but assume that the destination has already been
16102 set up properly. */
16105 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
16106 enum machine_mode mode, rtx operands[])
16108 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
16109 gcc_assert (dst == operands[0]);
16112 /* Attempt to expand a binary operator. Make the expansion closer to the
16113 actual machine, then just general_operand, which will allow 3 separate
16114 memory references (one output, two input) in a single insn. */
16117 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
16120 rtx src1, src2, dst, op, clob;
16122 dst = ix86_fixup_binary_operands (code, mode, operands);
16123 src1 = operands[1];
16124 src2 = operands[2];
16126 /* Emit the instruction. */
16128 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
16129 if (reload_in_progress)
16131 /* Reload doesn't know about the flags register, and doesn't know that
16132 it doesn't want to clobber it. We can only do this with PLUS. */
16133 gcc_assert (code == PLUS);
16136 else if (reload_completed
16138 && !rtx_equal_p (dst, src1))
16140 /* This is going to be an LEA; avoid splitting it later. */
16145 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
16146 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
16149 /* Fix up the destination if needed. */
16150 if (dst != operands[0])
16151 emit_move_insn (operands[0], dst);
16154 /* Return TRUE or FALSE depending on whether the binary operator meets the
16155 appropriate constraints. */
16158 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
16161 rtx dst = operands[0];
16162 rtx src1 = operands[1];
16163 rtx src2 = operands[2];
16165 /* Both source operands cannot be in memory. */
16166 if (MEM_P (src1) && MEM_P (src2))
16169 /* Canonicalize operand order for commutative operators. */
16170 if (ix86_swap_binary_operands_p (code, mode, operands))
16177 /* If the destination is memory, we must have a matching source operand. */
16178 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
16181 /* Source 1 cannot be a constant. */
16182 if (CONSTANT_P (src1))
16185 /* Source 1 cannot be a non-matching memory. */
16186 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
16188 /* Support "andhi/andsi/anddi" as a zero-extending move. */
16189 return (code == AND
16192 || (TARGET_64BIT && mode == DImode))
16193 && CONST_INT_P (src2)
16194 && (INTVAL (src2) == 0xff
16195 || INTVAL (src2) == 0xffff));
16201 /* Attempt to expand a unary operator. Make the expansion closer to the
16202 actual machine, then just general_operand, which will allow 2 separate
16203 memory references (one output, one input) in a single insn. */
16206 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
16209 int matching_memory;
16210 rtx src, dst, op, clob;
16215 /* If the destination is memory, and we do not have matching source
16216 operands, do things in registers. */
16217 matching_memory = 0;
16220 if (rtx_equal_p (dst, src))
16221 matching_memory = 1;
16223 dst = gen_reg_rtx (mode);
16226 /* When source operand is memory, destination must match. */
16227 if (MEM_P (src) && !matching_memory)
16228 src = force_reg (mode, src);
16230 /* Emit the instruction. */
16232 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
16233 if (reload_in_progress || code == NOT)
16235 /* Reload doesn't know about the flags register, and doesn't know that
16236 it doesn't want to clobber it. */
16237 gcc_assert (code == NOT);
16242 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
16243 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
16246 /* Fix up the destination if needed. */
16247 if (dst != operands[0])
16248 emit_move_insn (operands[0], dst);
16251 /* Split 32bit/64bit divmod with 8bit unsigned divmod if dividend and
16252 divisor are within the range [0-255]. */
16255 ix86_split_idivmod (enum machine_mode mode, rtx operands[],
16258 rtx end_label, qimode_label;
16259 rtx insn, div, mod;
16260 rtx scratch, tmp0, tmp1, tmp2;
16261 rtx (*gen_divmod4_1) (rtx, rtx, rtx, rtx);
16262 rtx (*gen_zero_extend) (rtx, rtx);
16263 rtx (*gen_test_ccno_1) (rtx, rtx);
16268 gen_divmod4_1 = signed_p ? gen_divmodsi4_1 : gen_udivmodsi4_1;
16269 gen_test_ccno_1 = gen_testsi_ccno_1;
16270 gen_zero_extend = gen_zero_extendqisi2;
16273 gen_divmod4_1 = signed_p ? gen_divmoddi4_1 : gen_udivmoddi4_1;
16274 gen_test_ccno_1 = gen_testdi_ccno_1;
16275 gen_zero_extend = gen_zero_extendqidi2;
16278 gcc_unreachable ();
16281 end_label = gen_label_rtx ();
16282 qimode_label = gen_label_rtx ();
16284 scratch = gen_reg_rtx (mode);
16286 /* Use 8bit unsigned divimod if dividend and divisor are within
16287 the range [0-255]. */
16288 emit_move_insn (scratch, operands[2]);
16289 scratch = expand_simple_binop (mode, IOR, scratch, operands[3],
16290 scratch, 1, OPTAB_DIRECT);
16291 emit_insn (gen_test_ccno_1 (scratch, GEN_INT (-0x100)));
16292 tmp0 = gen_rtx_REG (CCNOmode, FLAGS_REG);
16293 tmp0 = gen_rtx_EQ (VOIDmode, tmp0, const0_rtx);
16294 tmp0 = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp0,
16295 gen_rtx_LABEL_REF (VOIDmode, qimode_label),
16297 insn = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp0));
16298 predict_jump (REG_BR_PROB_BASE * 50 / 100);
16299 JUMP_LABEL (insn) = qimode_label;
16301 /* Generate original signed/unsigned divimod. */
16302 div = gen_divmod4_1 (operands[0], operands[1],
16303 operands[2], operands[3]);
16306 /* Branch to the end. */
16307 emit_jump_insn (gen_jump (end_label));
16310 /* Generate 8bit unsigned divide. */
16311 emit_label (qimode_label);
16312 /* Don't use operands[0] for result of 8bit divide since not all
16313 registers support QImode ZERO_EXTRACT. */
16314 tmp0 = simplify_gen_subreg (HImode, scratch, mode, 0);
16315 tmp1 = simplify_gen_subreg (HImode, operands[2], mode, 0);
16316 tmp2 = simplify_gen_subreg (QImode, operands[3], mode, 0);
16317 emit_insn (gen_udivmodhiqi3 (tmp0, tmp1, tmp2));
16321 div = gen_rtx_DIV (SImode, operands[2], operands[3]);
16322 mod = gen_rtx_MOD (SImode, operands[2], operands[3]);
16326 div = gen_rtx_UDIV (SImode, operands[2], operands[3]);
16327 mod = gen_rtx_UMOD (SImode, operands[2], operands[3]);
16330 /* Extract remainder from AH. */
16331 tmp1 = gen_rtx_ZERO_EXTRACT (mode, tmp0, GEN_INT (8), GEN_INT (8));
16332 if (REG_P (operands[1]))
16333 insn = emit_move_insn (operands[1], tmp1);
16336 /* Need a new scratch register since the old one has result
16338 scratch = gen_reg_rtx (mode);
16339 emit_move_insn (scratch, tmp1);
16340 insn = emit_move_insn (operands[1], scratch);
16342 set_unique_reg_note (insn, REG_EQUAL, mod);
16344 /* Zero extend quotient from AL. */
16345 tmp1 = gen_lowpart (QImode, tmp0);
16346 insn = emit_insn (gen_zero_extend (operands[0], tmp1));
16347 set_unique_reg_note (insn, REG_EQUAL, div);
16349 emit_label (end_label);
16352 #define LEA_SEARCH_THRESHOLD 12
16354 /* Search backward for non-agu definition of register number REGNO1
16355 or register number REGNO2 in INSN's basic block until
16356 1. Pass LEA_SEARCH_THRESHOLD instructions, or
16357 2. Reach BB boundary, or
16358 3. Reach agu definition.
16359 Returns the distance between the non-agu definition point and INSN.
16360 If no definition point, returns -1. */
16363 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
16366 basic_block bb = BLOCK_FOR_INSN (insn);
16369 enum attr_type insn_type;
16371 if (insn != BB_HEAD (bb))
16373 rtx prev = PREV_INSN (insn);
16374 while (prev && distance < LEA_SEARCH_THRESHOLD)
16376 if (NONDEBUG_INSN_P (prev))
16379 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
16380 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16381 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16382 && (regno1 == DF_REF_REGNO (*def_rec)
16383 || regno2 == DF_REF_REGNO (*def_rec)))
16385 insn_type = get_attr_type (prev);
16386 if (insn_type != TYPE_LEA)
16390 if (prev == BB_HEAD (bb))
16392 prev = PREV_INSN (prev);
16396 if (distance < LEA_SEARCH_THRESHOLD)
16400 bool simple_loop = false;
16402 FOR_EACH_EDGE (e, ei, bb->preds)
16405 simple_loop = true;
16411 rtx prev = BB_END (bb);
16414 && distance < LEA_SEARCH_THRESHOLD)
16416 if (NONDEBUG_INSN_P (prev))
16419 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
16420 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16421 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16422 && (regno1 == DF_REF_REGNO (*def_rec)
16423 || regno2 == DF_REF_REGNO (*def_rec)))
16425 insn_type = get_attr_type (prev);
16426 if (insn_type != TYPE_LEA)
16430 prev = PREV_INSN (prev);
16438 /* get_attr_type may modify recog data. We want to make sure
16439 that recog data is valid for instruction INSN, on which
16440 distance_non_agu_define is called. INSN is unchanged here. */
16441 extract_insn_cached (insn);
16445 /* Return the distance between INSN and the next insn that uses
16446 register number REGNO0 in memory address. Return -1 if no such
16447 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
16450 distance_agu_use (unsigned int regno0, rtx insn)
16452 basic_block bb = BLOCK_FOR_INSN (insn);
16457 if (insn != BB_END (bb))
16459 rtx next = NEXT_INSN (insn);
16460 while (next && distance < LEA_SEARCH_THRESHOLD)
16462 if (NONDEBUG_INSN_P (next))
16466 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
16467 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
16468 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
16469 && regno0 == DF_REF_REGNO (*use_rec))
16471 /* Return DISTANCE if OP0 is used in memory
16472 address in NEXT. */
16476 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
16477 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16478 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16479 && regno0 == DF_REF_REGNO (*def_rec))
16481 /* Return -1 if OP0 is set in NEXT. */
16485 if (next == BB_END (bb))
16487 next = NEXT_INSN (next);
16491 if (distance < LEA_SEARCH_THRESHOLD)
16495 bool simple_loop = false;
16497 FOR_EACH_EDGE (e, ei, bb->succs)
16500 simple_loop = true;
16506 rtx next = BB_HEAD (bb);
16509 && distance < LEA_SEARCH_THRESHOLD)
16511 if (NONDEBUG_INSN_P (next))
16515 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
16516 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
16517 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
16518 && regno0 == DF_REF_REGNO (*use_rec))
16520 /* Return DISTANCE if OP0 is used in memory
16521 address in NEXT. */
16525 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
16526 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16527 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16528 && regno0 == DF_REF_REGNO (*def_rec))
16530 /* Return -1 if OP0 is set in NEXT. */
16535 next = NEXT_INSN (next);
16543 /* Define this macro to tune LEA priority vs ADD, it take effect when
16544 there is a dilemma of choicing LEA or ADD
16545 Negative value: ADD is more preferred than LEA
16547 Positive value: LEA is more preferred than ADD*/
16548 #define IX86_LEA_PRIORITY 2
16550 /* Return true if it is ok to optimize an ADD operation to LEA
16551 operation to avoid flag register consumation. For most processors,
16552 ADD is faster than LEA. For the processors like ATOM, if the
16553 destination register of LEA holds an actual address which will be
16554 used soon, LEA is better and otherwise ADD is better. */
16557 ix86_lea_for_add_ok (rtx insn, rtx operands[])
16559 unsigned int regno0 = true_regnum (operands[0]);
16560 unsigned int regno1 = true_regnum (operands[1]);
16561 unsigned int regno2 = true_regnum (operands[2]);
16563 /* If a = b + c, (a!=b && a!=c), must use lea form. */
16564 if (regno0 != regno1 && regno0 != regno2)
16567 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
16571 int dist_define, dist_use;
16573 /* Return false if REGNO0 isn't used in memory address. */
16574 dist_use = distance_agu_use (regno0, insn);
16578 dist_define = distance_non_agu_define (regno1, regno2, insn);
16579 if (dist_define <= 0)
16582 /* If this insn has both backward non-agu dependence and forward
16583 agu dependence, the one with short distance take effect. */
16584 if ((dist_define + IX86_LEA_PRIORITY) < dist_use)
16591 /* Return true if destination reg of SET_BODY is shift count of
16595 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
16601 /* Retrieve destination of SET_BODY. */
16602 switch (GET_CODE (set_body))
16605 set_dest = SET_DEST (set_body);
16606 if (!set_dest || !REG_P (set_dest))
16610 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
16611 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
16619 /* Retrieve shift count of USE_BODY. */
16620 switch (GET_CODE (use_body))
16623 shift_rtx = XEXP (use_body, 1);
16626 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
16627 if (ix86_dep_by_shift_count_body (set_body,
16628 XVECEXP (use_body, 0, i)))
16636 && (GET_CODE (shift_rtx) == ASHIFT
16637 || GET_CODE (shift_rtx) == LSHIFTRT
16638 || GET_CODE (shift_rtx) == ASHIFTRT
16639 || GET_CODE (shift_rtx) == ROTATE
16640 || GET_CODE (shift_rtx) == ROTATERT))
16642 rtx shift_count = XEXP (shift_rtx, 1);
16644 /* Return true if shift count is dest of SET_BODY. */
16645 if (REG_P (shift_count)
16646 && true_regnum (set_dest) == true_regnum (shift_count))
16653 /* Return true if destination reg of SET_INSN is shift count of
16657 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
16659 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
16660 PATTERN (use_insn));
16663 /* Return TRUE or FALSE depending on whether the unary operator meets the
16664 appropriate constraints. */
16667 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
16668 enum machine_mode mode ATTRIBUTE_UNUSED,
16669 rtx operands[2] ATTRIBUTE_UNUSED)
16671 /* If one of operands is memory, source and destination must match. */
16672 if ((MEM_P (operands[0])
16673 || MEM_P (operands[1]))
16674 && ! rtx_equal_p (operands[0], operands[1]))
16679 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
16680 are ok, keeping in mind the possible movddup alternative. */
16683 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
16685 if (MEM_P (operands[0]))
16686 return rtx_equal_p (operands[0], operands[1 + high]);
16687 if (MEM_P (operands[1]) && MEM_P (operands[2]))
16688 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
16692 /* Post-reload splitter for converting an SF or DFmode value in an
16693 SSE register into an unsigned SImode. */
16696 ix86_split_convert_uns_si_sse (rtx operands[])
16698 enum machine_mode vecmode;
16699 rtx value, large, zero_or_two31, input, two31, x;
16701 large = operands[1];
16702 zero_or_two31 = operands[2];
16703 input = operands[3];
16704 two31 = operands[4];
16705 vecmode = GET_MODE (large);
16706 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
16708 /* Load up the value into the low element. We must ensure that the other
16709 elements are valid floats -- zero is the easiest such value. */
16712 if (vecmode == V4SFmode)
16713 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
16715 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
16719 input = gen_rtx_REG (vecmode, REGNO (input));
16720 emit_move_insn (value, CONST0_RTX (vecmode));
16721 if (vecmode == V4SFmode)
16722 emit_insn (gen_sse_movss (value, value, input));
16724 emit_insn (gen_sse2_movsd (value, value, input));
16727 emit_move_insn (large, two31);
16728 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
16730 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
16731 emit_insn (gen_rtx_SET (VOIDmode, large, x));
16733 x = gen_rtx_AND (vecmode, zero_or_two31, large);
16734 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
16736 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
16737 emit_insn (gen_rtx_SET (VOIDmode, value, x));
16739 large = gen_rtx_REG (V4SImode, REGNO (large));
16740 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
16742 x = gen_rtx_REG (V4SImode, REGNO (value));
16743 if (vecmode == V4SFmode)
16744 emit_insn (gen_sse2_cvttps2dq (x, value));
16746 emit_insn (gen_sse2_cvttpd2dq (x, value));
16749 emit_insn (gen_xorv4si3 (value, value, large));
16752 /* Convert an unsigned DImode value into a DFmode, using only SSE.
16753 Expects the 64-bit DImode to be supplied in a pair of integral
16754 registers. Requires SSE2; will use SSE3 if available. For x86_32,
16755 -mfpmath=sse, !optimize_size only. */
16758 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
16760 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
16761 rtx int_xmm, fp_xmm;
16762 rtx biases, exponents;
16765 int_xmm = gen_reg_rtx (V4SImode);
16766 if (TARGET_INTER_UNIT_MOVES)
16767 emit_insn (gen_movdi_to_sse (int_xmm, input));
16768 else if (TARGET_SSE_SPLIT_REGS)
16770 emit_clobber (int_xmm);
16771 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
16775 x = gen_reg_rtx (V2DImode);
16776 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
16777 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
16780 x = gen_rtx_CONST_VECTOR (V4SImode,
16781 gen_rtvec (4, GEN_INT (0x43300000UL),
16782 GEN_INT (0x45300000UL),
16783 const0_rtx, const0_rtx));
16784 exponents = validize_mem (force_const_mem (V4SImode, x));
16786 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
16787 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
16789 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
16790 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
16791 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
16792 (0x1.0p84 + double(fp_value_hi_xmm)).
16793 Note these exponents differ by 32. */
16795 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
16797 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
16798 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
16799 real_ldexp (&bias_lo_rvt, &dconst1, 52);
16800 real_ldexp (&bias_hi_rvt, &dconst1, 84);
16801 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
16802 x = const_double_from_real_value (bias_hi_rvt, DFmode);
16803 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
16804 biases = validize_mem (force_const_mem (V2DFmode, biases));
16805 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
16807 /* Add the upper and lower DFmode values together. */
16809 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
16812 x = copy_to_mode_reg (V2DFmode, fp_xmm);
16813 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
16814 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
16817 ix86_expand_vector_extract (false, target, fp_xmm, 0);
16820 /* Not used, but eases macroization of patterns. */
16822 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
16823 rtx input ATTRIBUTE_UNUSED)
16825 gcc_unreachable ();
16828 /* Convert an unsigned SImode value into a DFmode. Only currently used
16829 for SSE, but applicable anywhere. */
16832 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
16834 REAL_VALUE_TYPE TWO31r;
16837 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
16838 NULL, 1, OPTAB_DIRECT);
16840 fp = gen_reg_rtx (DFmode);
16841 emit_insn (gen_floatsidf2 (fp, x));
16843 real_ldexp (&TWO31r, &dconst1, 31);
16844 x = const_double_from_real_value (TWO31r, DFmode);
16846 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
16848 emit_move_insn (target, x);
16851 /* Convert a signed DImode value into a DFmode. Only used for SSE in
16852 32-bit mode; otherwise we have a direct convert instruction. */
16855 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
16857 REAL_VALUE_TYPE TWO32r;
16858 rtx fp_lo, fp_hi, x;
16860 fp_lo = gen_reg_rtx (DFmode);
16861 fp_hi = gen_reg_rtx (DFmode);
16863 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
16865 real_ldexp (&TWO32r, &dconst1, 32);
16866 x = const_double_from_real_value (TWO32r, DFmode);
16867 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
16869 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
16871 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
16874 emit_move_insn (target, x);
16877 /* Convert an unsigned SImode value into a SFmode, using only SSE.
16878 For x86_32, -mfpmath=sse, !optimize_size only. */
16880 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
16882 REAL_VALUE_TYPE ONE16r;
16883 rtx fp_hi, fp_lo, int_hi, int_lo, x;
16885 real_ldexp (&ONE16r, &dconst1, 16);
16886 x = const_double_from_real_value (ONE16r, SFmode);
16887 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
16888 NULL, 0, OPTAB_DIRECT);
16889 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
16890 NULL, 0, OPTAB_DIRECT);
16891 fp_hi = gen_reg_rtx (SFmode);
16892 fp_lo = gen_reg_rtx (SFmode);
16893 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
16894 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
16895 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
16897 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
16899 if (!rtx_equal_p (target, fp_hi))
16900 emit_move_insn (target, fp_hi);
16903 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
16904 then replicate the value for all elements of the vector
16908 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
16915 v = gen_rtvec (4, value, value, value, value);
16916 return gen_rtx_CONST_VECTOR (V4SImode, v);
16920 v = gen_rtvec (2, value, value);
16921 return gen_rtx_CONST_VECTOR (V2DImode, v);
16925 v = gen_rtvec (8, value, value, value, value,
16926 value, value, value, value);
16928 v = gen_rtvec (8, value, CONST0_RTX (SFmode),
16929 CONST0_RTX (SFmode), CONST0_RTX (SFmode),
16930 CONST0_RTX (SFmode), CONST0_RTX (SFmode),
16931 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
16932 return gen_rtx_CONST_VECTOR (V8SFmode, v);
16936 v = gen_rtvec (4, value, value, value, value);
16938 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
16939 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
16940 return gen_rtx_CONST_VECTOR (V4SFmode, v);
16944 v = gen_rtvec (4, value, value, value, value);
16946 v = gen_rtvec (4, value, CONST0_RTX (DFmode),
16947 CONST0_RTX (DFmode), CONST0_RTX (DFmode));
16948 return gen_rtx_CONST_VECTOR (V4DFmode, v);
16952 v = gen_rtvec (2, value, value);
16954 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
16955 return gen_rtx_CONST_VECTOR (V2DFmode, v);
16958 gcc_unreachable ();
16962 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
16963 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
16964 for an SSE register. If VECT is true, then replicate the mask for
16965 all elements of the vector register. If INVERT is true, then create
16966 a mask excluding the sign bit. */
16969 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
16971 enum machine_mode vec_mode, imode;
16972 HOST_WIDE_INT hi, lo;
16977 /* Find the sign bit, sign extended to 2*HWI. */
16984 mode = GET_MODE_INNER (mode);
16986 lo = 0x80000000, hi = lo < 0;
16993 mode = GET_MODE_INNER (mode);
16995 if (HOST_BITS_PER_WIDE_INT >= 64)
16996 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
16998 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
17003 vec_mode = VOIDmode;
17004 if (HOST_BITS_PER_WIDE_INT >= 64)
17007 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
17014 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
17018 lo = ~lo, hi = ~hi;
17024 mask = immed_double_const (lo, hi, imode);
17026 vec = gen_rtvec (2, v, mask);
17027 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
17028 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
17035 gcc_unreachable ();
17039 lo = ~lo, hi = ~hi;
17041 /* Force this value into the low part of a fp vector constant. */
17042 mask = immed_double_const (lo, hi, imode);
17043 mask = gen_lowpart (mode, mask);
17045 if (vec_mode == VOIDmode)
17046 return force_reg (mode, mask);
17048 v = ix86_build_const_vector (vec_mode, vect, mask);
17049 return force_reg (vec_mode, v);
17052 /* Generate code for floating point ABS or NEG. */
17055 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
17058 rtx mask, set, dst, src;
17059 bool use_sse = false;
17060 bool vector_mode = VECTOR_MODE_P (mode);
17061 enum machine_mode vmode = mode;
17065 else if (mode == TFmode)
17067 else if (TARGET_SSE_MATH)
17069 use_sse = SSE_FLOAT_MODE_P (mode);
17070 if (mode == SFmode)
17072 else if (mode == DFmode)
17076 /* NEG and ABS performed with SSE use bitwise mask operations.
17077 Create the appropriate mask now. */
17079 mask = ix86_build_signbit_mask (vmode, vector_mode, code == ABS);
17086 set = gen_rtx_fmt_e (code, mode, src);
17087 set = gen_rtx_SET (VOIDmode, dst, set);
17094 use = gen_rtx_USE (VOIDmode, mask);
17096 par = gen_rtvec (2, set, use);
17099 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
17100 par = gen_rtvec (3, set, use, clob);
17102 emit_insn (gen_rtx_PARALLEL (VOIDmode, par));
17108 /* Expand a copysign operation. Special case operand 0 being a constant. */
17111 ix86_expand_copysign (rtx operands[])
17113 enum machine_mode mode, vmode;
17114 rtx dest, op0, op1, mask, nmask;
17116 dest = operands[0];
17120 mode = GET_MODE (dest);
17122 if (mode == SFmode)
17124 else if (mode == DFmode)
17129 if (GET_CODE (op0) == CONST_DOUBLE)
17131 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
17133 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
17134 op0 = simplify_unary_operation (ABS, mode, op0, mode);
17136 if (mode == SFmode || mode == DFmode)
17138 if (op0 == CONST0_RTX (mode))
17139 op0 = CONST0_RTX (vmode);
17142 rtx v = ix86_build_const_vector (vmode, false, op0);
17144 op0 = force_reg (vmode, v);
17147 else if (op0 != CONST0_RTX (mode))
17148 op0 = force_reg (mode, op0);
17150 mask = ix86_build_signbit_mask (vmode, 0, 0);
17152 if (mode == SFmode)
17153 copysign_insn = gen_copysignsf3_const;
17154 else if (mode == DFmode)
17155 copysign_insn = gen_copysigndf3_const;
17157 copysign_insn = gen_copysigntf3_const;
17159 emit_insn (copysign_insn (dest, op0, op1, mask));
17163 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
17165 nmask = ix86_build_signbit_mask (vmode, 0, 1);
17166 mask = ix86_build_signbit_mask (vmode, 0, 0);
17168 if (mode == SFmode)
17169 copysign_insn = gen_copysignsf3_var;
17170 else if (mode == DFmode)
17171 copysign_insn = gen_copysigndf3_var;
17173 copysign_insn = gen_copysigntf3_var;
17175 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
17179 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
17180 be a constant, and so has already been expanded into a vector constant. */
17183 ix86_split_copysign_const (rtx operands[])
17185 enum machine_mode mode, vmode;
17186 rtx dest, op0, mask, x;
17188 dest = operands[0];
17190 mask = operands[3];
17192 mode = GET_MODE (dest);
17193 vmode = GET_MODE (mask);
17195 dest = simplify_gen_subreg (vmode, dest, mode, 0);
17196 x = gen_rtx_AND (vmode, dest, mask);
17197 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17199 if (op0 != CONST0_RTX (vmode))
17201 x = gen_rtx_IOR (vmode, dest, op0);
17202 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17206 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
17207 so we have to do two masks. */
17210 ix86_split_copysign_var (rtx operands[])
17212 enum machine_mode mode, vmode;
17213 rtx dest, scratch, op0, op1, mask, nmask, x;
17215 dest = operands[0];
17216 scratch = operands[1];
17219 nmask = operands[4];
17220 mask = operands[5];
17222 mode = GET_MODE (dest);
17223 vmode = GET_MODE (mask);
17225 if (rtx_equal_p (op0, op1))
17227 /* Shouldn't happen often (it's useless, obviously), but when it does
17228 we'd generate incorrect code if we continue below. */
17229 emit_move_insn (dest, op0);
17233 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
17235 gcc_assert (REGNO (op1) == REGNO (scratch));
17237 x = gen_rtx_AND (vmode, scratch, mask);
17238 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
17241 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
17242 x = gen_rtx_NOT (vmode, dest);
17243 x = gen_rtx_AND (vmode, x, op0);
17244 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17248 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
17250 x = gen_rtx_AND (vmode, scratch, mask);
17252 else /* alternative 2,4 */
17254 gcc_assert (REGNO (mask) == REGNO (scratch));
17255 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
17256 x = gen_rtx_AND (vmode, scratch, op1);
17258 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
17260 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
17262 dest = simplify_gen_subreg (vmode, op0, mode, 0);
17263 x = gen_rtx_AND (vmode, dest, nmask);
17265 else /* alternative 3,4 */
17267 gcc_assert (REGNO (nmask) == REGNO (dest));
17269 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
17270 x = gen_rtx_AND (vmode, dest, op0);
17272 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17275 x = gen_rtx_IOR (vmode, dest, scratch);
17276 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17279 /* Return TRUE or FALSE depending on whether the first SET in INSN
17280 has source and destination with matching CC modes, and that the
17281 CC mode is at least as constrained as REQ_MODE. */
17284 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
17287 enum machine_mode set_mode;
17289 set = PATTERN (insn);
17290 if (GET_CODE (set) == PARALLEL)
17291 set = XVECEXP (set, 0, 0);
17292 gcc_assert (GET_CODE (set) == SET);
17293 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
17295 set_mode = GET_MODE (SET_DEST (set));
17299 if (req_mode != CCNOmode
17300 && (req_mode != CCmode
17301 || XEXP (SET_SRC (set), 1) != const0_rtx))
17305 if (req_mode == CCGCmode)
17309 if (req_mode == CCGOCmode || req_mode == CCNOmode)
17313 if (req_mode == CCZmode)
17324 gcc_unreachable ();
17327 return GET_MODE (SET_SRC (set)) == set_mode;
17330 /* Generate insn patterns to do an integer compare of OPERANDS. */
17333 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
17335 enum machine_mode cmpmode;
17338 cmpmode = SELECT_CC_MODE (code, op0, op1);
17339 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
17341 /* This is very simple, but making the interface the same as in the
17342 FP case makes the rest of the code easier. */
17343 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
17344 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
17346 /* Return the test that should be put into the flags user, i.e.
17347 the bcc, scc, or cmov instruction. */
17348 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
17351 /* Figure out whether to use ordered or unordered fp comparisons.
17352 Return the appropriate mode to use. */
17355 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
17357 /* ??? In order to make all comparisons reversible, we do all comparisons
17358 non-trapping when compiling for IEEE. Once gcc is able to distinguish
17359 all forms trapping and nontrapping comparisons, we can make inequality
17360 comparisons trapping again, since it results in better code when using
17361 FCOM based compares. */
17362 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
17366 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
17368 enum machine_mode mode = GET_MODE (op0);
17370 if (SCALAR_FLOAT_MODE_P (mode))
17372 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
17373 return ix86_fp_compare_mode (code);
17378 /* Only zero flag is needed. */
17379 case EQ: /* ZF=0 */
17380 case NE: /* ZF!=0 */
17382 /* Codes needing carry flag. */
17383 case GEU: /* CF=0 */
17384 case LTU: /* CF=1 */
17385 /* Detect overflow checks. They need just the carry flag. */
17386 if (GET_CODE (op0) == PLUS
17387 && rtx_equal_p (op1, XEXP (op0, 0)))
17391 case GTU: /* CF=0 & ZF=0 */
17392 case LEU: /* CF=1 | ZF=1 */
17393 /* Detect overflow checks. They need just the carry flag. */
17394 if (GET_CODE (op0) == MINUS
17395 && rtx_equal_p (op1, XEXP (op0, 0)))
17399 /* Codes possibly doable only with sign flag when
17400 comparing against zero. */
17401 case GE: /* SF=OF or SF=0 */
17402 case LT: /* SF<>OF or SF=1 */
17403 if (op1 == const0_rtx)
17406 /* For other cases Carry flag is not required. */
17408 /* Codes doable only with sign flag when comparing
17409 against zero, but we miss jump instruction for it
17410 so we need to use relational tests against overflow
17411 that thus needs to be zero. */
17412 case GT: /* ZF=0 & SF=OF */
17413 case LE: /* ZF=1 | SF<>OF */
17414 if (op1 == const0_rtx)
17418 /* strcmp pattern do (use flags) and combine may ask us for proper
17423 gcc_unreachable ();
17427 /* Return the fixed registers used for condition codes. */
17430 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
17437 /* If two condition code modes are compatible, return a condition code
17438 mode which is compatible with both. Otherwise, return
17441 static enum machine_mode
17442 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
17447 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
17450 if ((m1 == CCGCmode && m2 == CCGOCmode)
17451 || (m1 == CCGOCmode && m2 == CCGCmode))
17457 gcc_unreachable ();
17487 /* These are only compatible with themselves, which we already
17494 /* Return a comparison we can do and that it is equivalent to
17495 swap_condition (code) apart possibly from orderedness.
17496 But, never change orderedness if TARGET_IEEE_FP, returning
17497 UNKNOWN in that case if necessary. */
17499 static enum rtx_code
17500 ix86_fp_swap_condition (enum rtx_code code)
17504 case GT: /* GTU - CF=0 & ZF=0 */
17505 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
17506 case GE: /* GEU - CF=0 */
17507 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
17508 case UNLT: /* LTU - CF=1 */
17509 return TARGET_IEEE_FP ? UNKNOWN : GT;
17510 case UNLE: /* LEU - CF=1 | ZF=1 */
17511 return TARGET_IEEE_FP ? UNKNOWN : GE;
17513 return swap_condition (code);
17517 /* Return cost of comparison CODE using the best strategy for performance.
17518 All following functions do use number of instructions as a cost metrics.
17519 In future this should be tweaked to compute bytes for optimize_size and
17520 take into account performance of various instructions on various CPUs. */
17523 ix86_fp_comparison_cost (enum rtx_code code)
17527 /* The cost of code using bit-twiddling on %ah. */
17544 arith_cost = TARGET_IEEE_FP ? 5 : 4;
17548 arith_cost = TARGET_IEEE_FP ? 6 : 4;
17551 gcc_unreachable ();
17554 switch (ix86_fp_comparison_strategy (code))
17556 case IX86_FPCMP_COMI:
17557 return arith_cost > 4 ? 3 : 2;
17558 case IX86_FPCMP_SAHF:
17559 return arith_cost > 4 ? 4 : 3;
17565 /* Return strategy to use for floating-point. We assume that fcomi is always
17566 preferrable where available, since that is also true when looking at size
17567 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
17569 enum ix86_fpcmp_strategy
17570 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
17572 /* Do fcomi/sahf based test when profitable. */
17575 return IX86_FPCMP_COMI;
17577 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
17578 return IX86_FPCMP_SAHF;
17580 return IX86_FPCMP_ARITH;
17583 /* Swap, force into registers, or otherwise massage the two operands
17584 to a fp comparison. The operands are updated in place; the new
17585 comparison code is returned. */
17587 static enum rtx_code
17588 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
17590 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
17591 rtx op0 = *pop0, op1 = *pop1;
17592 enum machine_mode op_mode = GET_MODE (op0);
17593 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
17595 /* All of the unordered compare instructions only work on registers.
17596 The same is true of the fcomi compare instructions. The XFmode
17597 compare instructions require registers except when comparing
17598 against zero or when converting operand 1 from fixed point to
17602 && (fpcmp_mode == CCFPUmode
17603 || (op_mode == XFmode
17604 && ! (standard_80387_constant_p (op0) == 1
17605 || standard_80387_constant_p (op1) == 1)
17606 && GET_CODE (op1) != FLOAT)
17607 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
17609 op0 = force_reg (op_mode, op0);
17610 op1 = force_reg (op_mode, op1);
17614 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
17615 things around if they appear profitable, otherwise force op0
17616 into a register. */
17618 if (standard_80387_constant_p (op0) == 0
17620 && ! (standard_80387_constant_p (op1) == 0
17623 enum rtx_code new_code = ix86_fp_swap_condition (code);
17624 if (new_code != UNKNOWN)
17627 tmp = op0, op0 = op1, op1 = tmp;
17633 op0 = force_reg (op_mode, op0);
17635 if (CONSTANT_P (op1))
17637 int tmp = standard_80387_constant_p (op1);
17639 op1 = validize_mem (force_const_mem (op_mode, op1));
17643 op1 = force_reg (op_mode, op1);
17646 op1 = force_reg (op_mode, op1);
17650 /* Try to rearrange the comparison to make it cheaper. */
17651 if (ix86_fp_comparison_cost (code)
17652 > ix86_fp_comparison_cost (swap_condition (code))
17653 && (REG_P (op1) || can_create_pseudo_p ()))
17656 tmp = op0, op0 = op1, op1 = tmp;
17657 code = swap_condition (code);
17659 op0 = force_reg (op_mode, op0);
17667 /* Convert comparison codes we use to represent FP comparison to integer
17668 code that will result in proper branch. Return UNKNOWN if no such code
17672 ix86_fp_compare_code_to_integer (enum rtx_code code)
17701 /* Generate insn patterns to do a floating point compare of OPERANDS. */
17704 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
17706 enum machine_mode fpcmp_mode, intcmp_mode;
17709 fpcmp_mode = ix86_fp_compare_mode (code);
17710 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
17712 /* Do fcomi/sahf based test when profitable. */
17713 switch (ix86_fp_comparison_strategy (code))
17715 case IX86_FPCMP_COMI:
17716 intcmp_mode = fpcmp_mode;
17717 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
17718 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
17723 case IX86_FPCMP_SAHF:
17724 intcmp_mode = fpcmp_mode;
17725 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
17726 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
17730 scratch = gen_reg_rtx (HImode);
17731 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
17732 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
17735 case IX86_FPCMP_ARITH:
17736 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
17737 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
17738 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
17740 scratch = gen_reg_rtx (HImode);
17741 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
17743 /* In the unordered case, we have to check C2 for NaN's, which
17744 doesn't happen to work out to anything nice combination-wise.
17745 So do some bit twiddling on the value we've got in AH to come
17746 up with an appropriate set of condition codes. */
17748 intcmp_mode = CCNOmode;
17753 if (code == GT || !TARGET_IEEE_FP)
17755 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
17760 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17761 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
17762 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
17763 intcmp_mode = CCmode;
17769 if (code == LT && TARGET_IEEE_FP)
17771 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17772 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
17773 intcmp_mode = CCmode;
17778 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
17784 if (code == GE || !TARGET_IEEE_FP)
17786 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
17791 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17792 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
17798 if (code == LE && TARGET_IEEE_FP)
17800 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17801 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
17802 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
17803 intcmp_mode = CCmode;
17808 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
17814 if (code == EQ && TARGET_IEEE_FP)
17816 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17817 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
17818 intcmp_mode = CCmode;
17823 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
17829 if (code == NE && TARGET_IEEE_FP)
17831 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17832 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
17838 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
17844 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
17848 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
17853 gcc_unreachable ();
17861 /* Return the test that should be put into the flags user, i.e.
17862 the bcc, scc, or cmov instruction. */
17863 return gen_rtx_fmt_ee (code, VOIDmode,
17864 gen_rtx_REG (intcmp_mode, FLAGS_REG),
17869 ix86_expand_compare (enum rtx_code code, rtx op0, rtx op1)
17873 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
17874 ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
17876 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
17878 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
17879 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
17882 ret = ix86_expand_int_compare (code, op0, op1);
17888 ix86_expand_branch (enum rtx_code code, rtx op0, rtx op1, rtx label)
17890 enum machine_mode mode = GET_MODE (op0);
17902 tmp = ix86_expand_compare (code, op0, op1);
17903 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
17904 gen_rtx_LABEL_REF (VOIDmode, label),
17906 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
17913 /* Expand DImode branch into multiple compare+branch. */
17915 rtx lo[2], hi[2], label2;
17916 enum rtx_code code1, code2, code3;
17917 enum machine_mode submode;
17919 if (CONSTANT_P (op0) && !CONSTANT_P (op1))
17921 tmp = op0, op0 = op1, op1 = tmp;
17922 code = swap_condition (code);
17925 split_double_mode (mode, &op0, 1, lo+0, hi+0);
17926 split_double_mode (mode, &op1, 1, lo+1, hi+1);
17928 submode = mode == DImode ? SImode : DImode;
17930 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
17931 avoid two branches. This costs one extra insn, so disable when
17932 optimizing for size. */
17934 if ((code == EQ || code == NE)
17935 && (!optimize_insn_for_size_p ()
17936 || hi[1] == const0_rtx || lo[1] == const0_rtx))
17941 if (hi[1] != const0_rtx)
17942 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
17943 NULL_RTX, 0, OPTAB_WIDEN);
17946 if (lo[1] != const0_rtx)
17947 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
17948 NULL_RTX, 0, OPTAB_WIDEN);
17950 tmp = expand_binop (submode, ior_optab, xor1, xor0,
17951 NULL_RTX, 0, OPTAB_WIDEN);
17953 ix86_expand_branch (code, tmp, const0_rtx, label);
17957 /* Otherwise, if we are doing less-than or greater-or-equal-than,
17958 op1 is a constant and the low word is zero, then we can just
17959 examine the high word. Similarly for low word -1 and
17960 less-or-equal-than or greater-than. */
17962 if (CONST_INT_P (hi[1]))
17965 case LT: case LTU: case GE: case GEU:
17966 if (lo[1] == const0_rtx)
17968 ix86_expand_branch (code, hi[0], hi[1], label);
17972 case LE: case LEU: case GT: case GTU:
17973 if (lo[1] == constm1_rtx)
17975 ix86_expand_branch (code, hi[0], hi[1], label);
17983 /* Otherwise, we need two or three jumps. */
17985 label2 = gen_label_rtx ();
17988 code2 = swap_condition (code);
17989 code3 = unsigned_condition (code);
17993 case LT: case GT: case LTU: case GTU:
17996 case LE: code1 = LT; code2 = GT; break;
17997 case GE: code1 = GT; code2 = LT; break;
17998 case LEU: code1 = LTU; code2 = GTU; break;
17999 case GEU: code1 = GTU; code2 = LTU; break;
18001 case EQ: code1 = UNKNOWN; code2 = NE; break;
18002 case NE: code2 = UNKNOWN; break;
18005 gcc_unreachable ();
18010 * if (hi(a) < hi(b)) goto true;
18011 * if (hi(a) > hi(b)) goto false;
18012 * if (lo(a) < lo(b)) goto true;
18016 if (code1 != UNKNOWN)
18017 ix86_expand_branch (code1, hi[0], hi[1], label);
18018 if (code2 != UNKNOWN)
18019 ix86_expand_branch (code2, hi[0], hi[1], label2);
18021 ix86_expand_branch (code3, lo[0], lo[1], label);
18023 if (code2 != UNKNOWN)
18024 emit_label (label2);
18029 gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC);
18034 /* Split branch based on floating point condition. */
18036 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
18037 rtx target1, rtx target2, rtx tmp, rtx pushed)
18042 if (target2 != pc_rtx)
18045 code = reverse_condition_maybe_unordered (code);
18050 condition = ix86_expand_fp_compare (code, op1, op2,
18053 /* Remove pushed operand from stack. */
18055 ix86_free_from_memory (GET_MODE (pushed));
18057 i = emit_jump_insn (gen_rtx_SET
18059 gen_rtx_IF_THEN_ELSE (VOIDmode,
18060 condition, target1, target2)));
18061 if (split_branch_probability >= 0)
18062 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
18066 ix86_expand_setcc (rtx dest, enum rtx_code code, rtx op0, rtx op1)
18070 gcc_assert (GET_MODE (dest) == QImode);
18072 ret = ix86_expand_compare (code, op0, op1);
18073 PUT_MODE (ret, QImode);
18074 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
18077 /* Expand comparison setting or clearing carry flag. Return true when
18078 successful and set pop for the operation. */
18080 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
18082 enum machine_mode mode =
18083 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
18085 /* Do not handle double-mode compares that go through special path. */
18086 if (mode == (TARGET_64BIT ? TImode : DImode))
18089 if (SCALAR_FLOAT_MODE_P (mode))
18091 rtx compare_op, compare_seq;
18093 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
18095 /* Shortcut: following common codes never translate
18096 into carry flag compares. */
18097 if (code == EQ || code == NE || code == UNEQ || code == LTGT
18098 || code == ORDERED || code == UNORDERED)
18101 /* These comparisons require zero flag; swap operands so they won't. */
18102 if ((code == GT || code == UNLE || code == LE || code == UNGT)
18103 && !TARGET_IEEE_FP)
18108 code = swap_condition (code);
18111 /* Try to expand the comparison and verify that we end up with
18112 carry flag based comparison. This fails to be true only when
18113 we decide to expand comparison using arithmetic that is not
18114 too common scenario. */
18116 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
18117 compare_seq = get_insns ();
18120 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
18121 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
18122 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
18124 code = GET_CODE (compare_op);
18126 if (code != LTU && code != GEU)
18129 emit_insn (compare_seq);
18134 if (!INTEGRAL_MODE_P (mode))
18143 /* Convert a==0 into (unsigned)a<1. */
18146 if (op1 != const0_rtx)
18149 code = (code == EQ ? LTU : GEU);
18152 /* Convert a>b into b<a or a>=b-1. */
18155 if (CONST_INT_P (op1))
18157 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
18158 /* Bail out on overflow. We still can swap operands but that
18159 would force loading of the constant into register. */
18160 if (op1 == const0_rtx
18161 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
18163 code = (code == GTU ? GEU : LTU);
18170 code = (code == GTU ? LTU : GEU);
18174 /* Convert a>=0 into (unsigned)a<0x80000000. */
18177 if (mode == DImode || op1 != const0_rtx)
18179 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
18180 code = (code == LT ? GEU : LTU);
18184 if (mode == DImode || op1 != constm1_rtx)
18186 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
18187 code = (code == LE ? GEU : LTU);
18193 /* Swapping operands may cause constant to appear as first operand. */
18194 if (!nonimmediate_operand (op0, VOIDmode))
18196 if (!can_create_pseudo_p ())
18198 op0 = force_reg (mode, op0);
18200 *pop = ix86_expand_compare (code, op0, op1);
18201 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
18206 ix86_expand_int_movcc (rtx operands[])
18208 enum rtx_code code = GET_CODE (operands[1]), compare_code;
18209 rtx compare_seq, compare_op;
18210 enum machine_mode mode = GET_MODE (operands[0]);
18211 bool sign_bit_compare_p = false;
18212 rtx op0 = XEXP (operands[1], 0);
18213 rtx op1 = XEXP (operands[1], 1);
18216 compare_op = ix86_expand_compare (code, op0, op1);
18217 compare_seq = get_insns ();
18220 compare_code = GET_CODE (compare_op);
18222 if ((op1 == const0_rtx && (code == GE || code == LT))
18223 || (op1 == constm1_rtx && (code == GT || code == LE)))
18224 sign_bit_compare_p = true;
18226 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
18227 HImode insns, we'd be swallowed in word prefix ops. */
18229 if ((mode != HImode || TARGET_FAST_PREFIX)
18230 && (mode != (TARGET_64BIT ? TImode : DImode))
18231 && CONST_INT_P (operands[2])
18232 && CONST_INT_P (operands[3]))
18234 rtx out = operands[0];
18235 HOST_WIDE_INT ct = INTVAL (operands[2]);
18236 HOST_WIDE_INT cf = INTVAL (operands[3]);
18237 HOST_WIDE_INT diff;
18240 /* Sign bit compares are better done using shifts than we do by using
18242 if (sign_bit_compare_p
18243 || ix86_expand_carry_flag_compare (code, op0, op1, &compare_op))
18245 /* Detect overlap between destination and compare sources. */
18248 if (!sign_bit_compare_p)
18251 bool fpcmp = false;
18253 compare_code = GET_CODE (compare_op);
18255 flags = XEXP (compare_op, 0);
18257 if (GET_MODE (flags) == CCFPmode
18258 || GET_MODE (flags) == CCFPUmode)
18262 = ix86_fp_compare_code_to_integer (compare_code);
18265 /* To simplify rest of code, restrict to the GEU case. */
18266 if (compare_code == LTU)
18268 HOST_WIDE_INT tmp = ct;
18271 compare_code = reverse_condition (compare_code);
18272 code = reverse_condition (code);
18277 PUT_CODE (compare_op,
18278 reverse_condition_maybe_unordered
18279 (GET_CODE (compare_op)));
18281 PUT_CODE (compare_op,
18282 reverse_condition (GET_CODE (compare_op)));
18286 if (reg_overlap_mentioned_p (out, op0)
18287 || reg_overlap_mentioned_p (out, op1))
18288 tmp = gen_reg_rtx (mode);
18290 if (mode == DImode)
18291 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
18293 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
18294 flags, compare_op));
18298 if (code == GT || code == GE)
18299 code = reverse_condition (code);
18302 HOST_WIDE_INT tmp = ct;
18307 tmp = emit_store_flag (tmp, code, op0, op1, VOIDmode, 0, -1);
18320 tmp = expand_simple_binop (mode, PLUS,
18322 copy_rtx (tmp), 1, OPTAB_DIRECT);
18333 tmp = expand_simple_binop (mode, IOR,
18335 copy_rtx (tmp), 1, OPTAB_DIRECT);
18337 else if (diff == -1 && ct)
18347 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
18349 tmp = expand_simple_binop (mode, PLUS,
18350 copy_rtx (tmp), GEN_INT (cf),
18351 copy_rtx (tmp), 1, OPTAB_DIRECT);
18359 * andl cf - ct, dest
18369 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
18372 tmp = expand_simple_binop (mode, AND,
18374 gen_int_mode (cf - ct, mode),
18375 copy_rtx (tmp), 1, OPTAB_DIRECT);
18377 tmp = expand_simple_binop (mode, PLUS,
18378 copy_rtx (tmp), GEN_INT (ct),
18379 copy_rtx (tmp), 1, OPTAB_DIRECT);
18382 if (!rtx_equal_p (tmp, out))
18383 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
18390 enum machine_mode cmp_mode = GET_MODE (op0);
18393 tmp = ct, ct = cf, cf = tmp;
18396 if (SCALAR_FLOAT_MODE_P (cmp_mode))
18398 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
18400 /* We may be reversing unordered compare to normal compare, that
18401 is not valid in general (we may convert non-trapping condition
18402 to trapping one), however on i386 we currently emit all
18403 comparisons unordered. */
18404 compare_code = reverse_condition_maybe_unordered (compare_code);
18405 code = reverse_condition_maybe_unordered (code);
18409 compare_code = reverse_condition (compare_code);
18410 code = reverse_condition (code);
18414 compare_code = UNKNOWN;
18415 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
18416 && CONST_INT_P (op1))
18418 if (op1 == const0_rtx
18419 && (code == LT || code == GE))
18420 compare_code = code;
18421 else if (op1 == constm1_rtx)
18425 else if (code == GT)
18430 /* Optimize dest = (op0 < 0) ? -1 : cf. */
18431 if (compare_code != UNKNOWN
18432 && GET_MODE (op0) == GET_MODE (out)
18433 && (cf == -1 || ct == -1))
18435 /* If lea code below could be used, only optimize
18436 if it results in a 2 insn sequence. */
18438 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
18439 || diff == 3 || diff == 5 || diff == 9)
18440 || (compare_code == LT && ct == -1)
18441 || (compare_code == GE && cf == -1))
18444 * notl op1 (if necessary)
18452 code = reverse_condition (code);
18455 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, -1);
18457 out = expand_simple_binop (mode, IOR,
18459 out, 1, OPTAB_DIRECT);
18460 if (out != operands[0])
18461 emit_move_insn (operands[0], out);
18468 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
18469 || diff == 3 || diff == 5 || diff == 9)
18470 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
18472 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
18478 * lea cf(dest*(ct-cf)),dest
18482 * This also catches the degenerate setcc-only case.
18488 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, 1);
18491 /* On x86_64 the lea instruction operates on Pmode, so we need
18492 to get arithmetics done in proper mode to match. */
18494 tmp = copy_rtx (out);
18498 out1 = copy_rtx (out);
18499 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
18503 tmp = gen_rtx_PLUS (mode, tmp, out1);
18509 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
18512 if (!rtx_equal_p (tmp, out))
18515 out = force_operand (tmp, copy_rtx (out));
18517 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
18519 if (!rtx_equal_p (out, operands[0]))
18520 emit_move_insn (operands[0], copy_rtx (out));
18526 * General case: Jumpful:
18527 * xorl dest,dest cmpl op1, op2
18528 * cmpl op1, op2 movl ct, dest
18529 * setcc dest jcc 1f
18530 * decl dest movl cf, dest
18531 * andl (cf-ct),dest 1:
18534 * Size 20. Size 14.
18536 * This is reasonably steep, but branch mispredict costs are
18537 * high on modern cpus, so consider failing only if optimizing
18541 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
18542 && BRANCH_COST (optimize_insn_for_speed_p (),
18547 enum machine_mode cmp_mode = GET_MODE (op0);
18552 if (SCALAR_FLOAT_MODE_P (cmp_mode))
18554 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
18556 /* We may be reversing unordered compare to normal compare,
18557 that is not valid in general (we may convert non-trapping
18558 condition to trapping one), however on i386 we currently
18559 emit all comparisons unordered. */
18560 code = reverse_condition_maybe_unordered (code);
18564 code = reverse_condition (code);
18565 if (compare_code != UNKNOWN)
18566 compare_code = reverse_condition (compare_code);
18570 if (compare_code != UNKNOWN)
18572 /* notl op1 (if needed)
18577 For x < 0 (resp. x <= -1) there will be no notl,
18578 so if possible swap the constants to get rid of the
18580 True/false will be -1/0 while code below (store flag
18581 followed by decrement) is 0/-1, so the constants need
18582 to be exchanged once more. */
18584 if (compare_code == GE || !cf)
18586 code = reverse_condition (code);
18591 HOST_WIDE_INT tmp = cf;
18596 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, -1);
18600 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, 1);
18602 out = expand_simple_binop (mode, PLUS, copy_rtx (out),
18604 copy_rtx (out), 1, OPTAB_DIRECT);
18607 out = expand_simple_binop (mode, AND, copy_rtx (out),
18608 gen_int_mode (cf - ct, mode),
18609 copy_rtx (out), 1, OPTAB_DIRECT);
18611 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
18612 copy_rtx (out), 1, OPTAB_DIRECT);
18613 if (!rtx_equal_p (out, operands[0]))
18614 emit_move_insn (operands[0], copy_rtx (out));
18620 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
18622 /* Try a few things more with specific constants and a variable. */
18625 rtx var, orig_out, out, tmp;
18627 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
18630 /* If one of the two operands is an interesting constant, load a
18631 constant with the above and mask it in with a logical operation. */
18633 if (CONST_INT_P (operands[2]))
18636 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
18637 operands[3] = constm1_rtx, op = and_optab;
18638 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
18639 operands[3] = const0_rtx, op = ior_optab;
18643 else if (CONST_INT_P (operands[3]))
18646 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
18647 operands[2] = constm1_rtx, op = and_optab;
18648 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
18649 operands[2] = const0_rtx, op = ior_optab;
18656 orig_out = operands[0];
18657 tmp = gen_reg_rtx (mode);
18660 /* Recurse to get the constant loaded. */
18661 if (ix86_expand_int_movcc (operands) == 0)
18664 /* Mask in the interesting variable. */
18665 out = expand_binop (mode, op, var, tmp, orig_out, 0,
18667 if (!rtx_equal_p (out, orig_out))
18668 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
18674 * For comparison with above,
18684 if (! nonimmediate_operand (operands[2], mode))
18685 operands[2] = force_reg (mode, operands[2]);
18686 if (! nonimmediate_operand (operands[3], mode))
18687 operands[3] = force_reg (mode, operands[3]);
18689 if (! register_operand (operands[2], VOIDmode)
18691 || ! register_operand (operands[3], VOIDmode)))
18692 operands[2] = force_reg (mode, operands[2]);
18695 && ! register_operand (operands[3], VOIDmode))
18696 operands[3] = force_reg (mode, operands[3]);
18698 emit_insn (compare_seq);
18699 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
18700 gen_rtx_IF_THEN_ELSE (mode,
18701 compare_op, operands[2],
18706 /* Swap, force into registers, or otherwise massage the two operands
18707 to an sse comparison with a mask result. Thus we differ a bit from
18708 ix86_prepare_fp_compare_args which expects to produce a flags result.
18710 The DEST operand exists to help determine whether to commute commutative
18711 operators. The POP0/POP1 operands are updated in place. The new
18712 comparison code is returned, or UNKNOWN if not implementable. */
18714 static enum rtx_code
18715 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
18716 rtx *pop0, rtx *pop1)
18724 /* We have no LTGT as an operator. We could implement it with
18725 NE & ORDERED, but this requires an extra temporary. It's
18726 not clear that it's worth it. */
18733 /* These are supported directly. */
18740 /* For commutative operators, try to canonicalize the destination
18741 operand to be first in the comparison - this helps reload to
18742 avoid extra moves. */
18743 if (!dest || !rtx_equal_p (dest, *pop1))
18751 /* These are not supported directly. Swap the comparison operands
18752 to transform into something that is supported. */
18756 code = swap_condition (code);
18760 gcc_unreachable ();
18766 /* Detect conditional moves that exactly match min/max operational
18767 semantics. Note that this is IEEE safe, as long as we don't
18768 interchange the operands.
18770 Returns FALSE if this conditional move doesn't match a MIN/MAX,
18771 and TRUE if the operation is successful and instructions are emitted. */
18774 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
18775 rtx cmp_op1, rtx if_true, rtx if_false)
18777 enum machine_mode mode;
18783 else if (code == UNGE)
18786 if_true = if_false;
18792 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
18794 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
18799 mode = GET_MODE (dest);
18801 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
18802 but MODE may be a vector mode and thus not appropriate. */
18803 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
18805 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
18808 if_true = force_reg (mode, if_true);
18809 v = gen_rtvec (2, if_true, if_false);
18810 tmp = gen_rtx_UNSPEC (mode, v, u);
18814 code = is_min ? SMIN : SMAX;
18815 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
18818 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
18822 /* Expand an sse vector comparison. Return the register with the result. */
18825 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
18826 rtx op_true, rtx op_false)
18828 enum machine_mode mode = GET_MODE (dest);
18831 cmp_op0 = force_reg (mode, cmp_op0);
18832 if (!nonimmediate_operand (cmp_op1, mode))
18833 cmp_op1 = force_reg (mode, cmp_op1);
18836 || reg_overlap_mentioned_p (dest, op_true)
18837 || reg_overlap_mentioned_p (dest, op_false))
18838 dest = gen_reg_rtx (mode);
18840 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
18841 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18846 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
18847 operations. This is used for both scalar and vector conditional moves. */
18850 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
18852 enum machine_mode mode = GET_MODE (dest);
18855 if (op_false == CONST0_RTX (mode))
18857 op_true = force_reg (mode, op_true);
18858 x = gen_rtx_AND (mode, cmp, op_true);
18859 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18861 else if (op_true == CONST0_RTX (mode))
18863 op_false = force_reg (mode, op_false);
18864 x = gen_rtx_NOT (mode, cmp);
18865 x = gen_rtx_AND (mode, x, op_false);
18866 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18868 else if (TARGET_XOP)
18870 rtx pcmov = gen_rtx_SET (mode, dest,
18871 gen_rtx_IF_THEN_ELSE (mode, cmp,
18878 op_true = force_reg (mode, op_true);
18879 op_false = force_reg (mode, op_false);
18881 t2 = gen_reg_rtx (mode);
18883 t3 = gen_reg_rtx (mode);
18887 x = gen_rtx_AND (mode, op_true, cmp);
18888 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
18890 x = gen_rtx_NOT (mode, cmp);
18891 x = gen_rtx_AND (mode, x, op_false);
18892 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
18894 x = gen_rtx_IOR (mode, t3, t2);
18895 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18899 /* Expand a floating-point conditional move. Return true if successful. */
18902 ix86_expand_fp_movcc (rtx operands[])
18904 enum machine_mode mode = GET_MODE (operands[0]);
18905 enum rtx_code code = GET_CODE (operands[1]);
18906 rtx tmp, compare_op;
18907 rtx op0 = XEXP (operands[1], 0);
18908 rtx op1 = XEXP (operands[1], 1);
18910 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
18912 enum machine_mode cmode;
18914 /* Since we've no cmove for sse registers, don't force bad register
18915 allocation just to gain access to it. Deny movcc when the
18916 comparison mode doesn't match the move mode. */
18917 cmode = GET_MODE (op0);
18918 if (cmode == VOIDmode)
18919 cmode = GET_MODE (op1);
18923 code = ix86_prepare_sse_fp_compare_args (operands[0], code, &op0, &op1);
18924 if (code == UNKNOWN)
18927 if (ix86_expand_sse_fp_minmax (operands[0], code, op0, op1,
18928 operands[2], operands[3]))
18931 tmp = ix86_expand_sse_cmp (operands[0], code, op0, op1,
18932 operands[2], operands[3]);
18933 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
18937 /* The floating point conditional move instructions don't directly
18938 support conditions resulting from a signed integer comparison. */
18940 compare_op = ix86_expand_compare (code, op0, op1);
18941 if (!fcmov_comparison_operator (compare_op, VOIDmode))
18943 tmp = gen_reg_rtx (QImode);
18944 ix86_expand_setcc (tmp, code, op0, op1);
18946 compare_op = ix86_expand_compare (NE, tmp, const0_rtx);
18949 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
18950 gen_rtx_IF_THEN_ELSE (mode, compare_op,
18951 operands[2], operands[3])));
18956 /* Expand a floating-point vector conditional move; a vcond operation
18957 rather than a movcc operation. */
18960 ix86_expand_fp_vcond (rtx operands[])
18962 enum rtx_code code = GET_CODE (operands[3]);
18965 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
18966 &operands[4], &operands[5]);
18967 if (code == UNKNOWN)
18970 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
18971 operands[5], operands[1], operands[2]))
18974 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
18975 operands[1], operands[2]);
18976 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
18980 /* Expand a signed/unsigned integral vector conditional move. */
18983 ix86_expand_int_vcond (rtx operands[])
18985 enum machine_mode mode = GET_MODE (operands[0]);
18986 enum rtx_code code = GET_CODE (operands[3]);
18987 bool negate = false;
18990 cop0 = operands[4];
18991 cop1 = operands[5];
18993 /* XOP supports all of the comparisons on all vector int types. */
18996 /* Canonicalize the comparison to EQ, GT, GTU. */
19007 code = reverse_condition (code);
19013 code = reverse_condition (code);
19019 code = swap_condition (code);
19020 x = cop0, cop0 = cop1, cop1 = x;
19024 gcc_unreachable ();
19027 /* Only SSE4.1/SSE4.2 supports V2DImode. */
19028 if (mode == V2DImode)
19033 /* SSE4.1 supports EQ. */
19034 if (!TARGET_SSE4_1)
19040 /* SSE4.2 supports GT/GTU. */
19041 if (!TARGET_SSE4_2)
19046 gcc_unreachable ();
19050 /* Unsigned parallel compare is not supported by the hardware.
19051 Play some tricks to turn this into a signed comparison
19055 cop0 = force_reg (mode, cop0);
19063 rtx (*gen_sub3) (rtx, rtx, rtx);
19065 /* Subtract (-(INT MAX) - 1) from both operands to make
19067 mask = ix86_build_signbit_mask (mode, true, false);
19068 gen_sub3 = (mode == V4SImode
19069 ? gen_subv4si3 : gen_subv2di3);
19070 t1 = gen_reg_rtx (mode);
19071 emit_insn (gen_sub3 (t1, cop0, mask));
19073 t2 = gen_reg_rtx (mode);
19074 emit_insn (gen_sub3 (t2, cop1, mask));
19084 /* Perform a parallel unsigned saturating subtraction. */
19085 x = gen_reg_rtx (mode);
19086 emit_insn (gen_rtx_SET (VOIDmode, x,
19087 gen_rtx_US_MINUS (mode, cop0, cop1)));
19090 cop1 = CONST0_RTX (mode);
19096 gcc_unreachable ();
19101 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
19102 operands[1+negate], operands[2-negate]);
19104 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
19105 operands[2-negate]);
19109 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
19110 true if we should do zero extension, else sign extension. HIGH_P is
19111 true if we want the N/2 high elements, else the low elements. */
19114 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
19116 enum machine_mode imode = GET_MODE (operands[1]);
19117 rtx (*unpack)(rtx, rtx, rtx);
19124 unpack = gen_vec_interleave_highv16qi;
19126 unpack = gen_vec_interleave_lowv16qi;
19130 unpack = gen_vec_interleave_highv8hi;
19132 unpack = gen_vec_interleave_lowv8hi;
19136 unpack = gen_vec_interleave_highv4si;
19138 unpack = gen_vec_interleave_lowv4si;
19141 gcc_unreachable ();
19144 dest = gen_lowpart (imode, operands[0]);
19147 se = force_reg (imode, CONST0_RTX (imode));
19149 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
19150 operands[1], pc_rtx, pc_rtx);
19152 emit_insn (unpack (dest, operands[1], se));
19155 /* This function performs the same task as ix86_expand_sse_unpack,
19156 but with SSE4.1 instructions. */
19159 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
19161 enum machine_mode imode = GET_MODE (operands[1]);
19162 rtx (*unpack)(rtx, rtx);
19169 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
19171 unpack = gen_sse4_1_sign_extendv8qiv8hi2;
19175 unpack = gen_sse4_1_zero_extendv4hiv4si2;
19177 unpack = gen_sse4_1_sign_extendv4hiv4si2;
19181 unpack = gen_sse4_1_zero_extendv2siv2di2;
19183 unpack = gen_sse4_1_sign_extendv2siv2di2;
19186 gcc_unreachable ();
19189 dest = operands[0];
19192 /* Shift higher 8 bytes to lower 8 bytes. */
19193 src = gen_reg_rtx (imode);
19194 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, src),
19195 gen_lowpart (V1TImode, operands[1]),
19201 emit_insn (unpack (dest, src));
19204 /* Expand conditional increment or decrement using adb/sbb instructions.
19205 The default case using setcc followed by the conditional move can be
19206 done by generic code. */
19208 ix86_expand_int_addcc (rtx operands[])
19210 enum rtx_code code = GET_CODE (operands[1]);
19212 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
19214 rtx val = const0_rtx;
19215 bool fpcmp = false;
19216 enum machine_mode mode;
19217 rtx op0 = XEXP (operands[1], 0);
19218 rtx op1 = XEXP (operands[1], 1);
19220 if (operands[3] != const1_rtx
19221 && operands[3] != constm1_rtx)
19223 if (!ix86_expand_carry_flag_compare (code, op0, op1, &compare_op))
19225 code = GET_CODE (compare_op);
19227 flags = XEXP (compare_op, 0);
19229 if (GET_MODE (flags) == CCFPmode
19230 || GET_MODE (flags) == CCFPUmode)
19233 code = ix86_fp_compare_code_to_integer (code);
19240 PUT_CODE (compare_op,
19241 reverse_condition_maybe_unordered
19242 (GET_CODE (compare_op)));
19244 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
19247 mode = GET_MODE (operands[0]);
19249 /* Construct either adc or sbb insn. */
19250 if ((code == LTU) == (operands[3] == constm1_rtx))
19255 insn = gen_subqi3_carry;
19258 insn = gen_subhi3_carry;
19261 insn = gen_subsi3_carry;
19264 insn = gen_subdi3_carry;
19267 gcc_unreachable ();
19275 insn = gen_addqi3_carry;
19278 insn = gen_addhi3_carry;
19281 insn = gen_addsi3_carry;
19284 insn = gen_adddi3_carry;
19287 gcc_unreachable ();
19290 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
19296 /* Split operands 0 and 1 into half-mode parts. Similar to split_double_mode,
19297 but works for floating pointer parameters and nonoffsetable memories.
19298 For pushes, it returns just stack offsets; the values will be saved
19299 in the right order. Maximally three parts are generated. */
19302 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
19307 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
19309 size = (GET_MODE_SIZE (mode) + 4) / 8;
19311 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
19312 gcc_assert (size >= 2 && size <= 4);
19314 /* Optimize constant pool reference to immediates. This is used by fp
19315 moves, that force all constants to memory to allow combining. */
19316 if (MEM_P (operand) && MEM_READONLY_P (operand))
19318 rtx tmp = maybe_get_pool_constant (operand);
19323 if (MEM_P (operand) && !offsettable_memref_p (operand))
19325 /* The only non-offsetable memories we handle are pushes. */
19326 int ok = push_operand (operand, VOIDmode);
19330 operand = copy_rtx (operand);
19331 PUT_MODE (operand, Pmode);
19332 parts[0] = parts[1] = parts[2] = parts[3] = operand;
19336 if (GET_CODE (operand) == CONST_VECTOR)
19338 enum machine_mode imode = int_mode_for_mode (mode);
19339 /* Caution: if we looked through a constant pool memory above,
19340 the operand may actually have a different mode now. That's
19341 ok, since we want to pun this all the way back to an integer. */
19342 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
19343 gcc_assert (operand != NULL);
19349 if (mode == DImode)
19350 split_double_mode (mode, &operand, 1, &parts[0], &parts[1]);
19355 if (REG_P (operand))
19357 gcc_assert (reload_completed);
19358 for (i = 0; i < size; i++)
19359 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
19361 else if (offsettable_memref_p (operand))
19363 operand = adjust_address (operand, SImode, 0);
19364 parts[0] = operand;
19365 for (i = 1; i < size; i++)
19366 parts[i] = adjust_address (operand, SImode, 4 * i);
19368 else if (GET_CODE (operand) == CONST_DOUBLE)
19373 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
19377 real_to_target (l, &r, mode);
19378 parts[3] = gen_int_mode (l[3], SImode);
19379 parts[2] = gen_int_mode (l[2], SImode);
19382 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
19383 parts[2] = gen_int_mode (l[2], SImode);
19386 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
19389 gcc_unreachable ();
19391 parts[1] = gen_int_mode (l[1], SImode);
19392 parts[0] = gen_int_mode (l[0], SImode);
19395 gcc_unreachable ();
19400 if (mode == TImode)
19401 split_double_mode (mode, &operand, 1, &parts[0], &parts[1]);
19402 if (mode == XFmode || mode == TFmode)
19404 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
19405 if (REG_P (operand))
19407 gcc_assert (reload_completed);
19408 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
19409 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
19411 else if (offsettable_memref_p (operand))
19413 operand = adjust_address (operand, DImode, 0);
19414 parts[0] = operand;
19415 parts[1] = adjust_address (operand, upper_mode, 8);
19417 else if (GET_CODE (operand) == CONST_DOUBLE)
19422 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
19423 real_to_target (l, &r, mode);
19425 /* Do not use shift by 32 to avoid warning on 32bit systems. */
19426 if (HOST_BITS_PER_WIDE_INT >= 64)
19429 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
19430 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
19433 parts[0] = immed_double_const (l[0], l[1], DImode);
19435 if (upper_mode == SImode)
19436 parts[1] = gen_int_mode (l[2], SImode);
19437 else if (HOST_BITS_PER_WIDE_INT >= 64)
19440 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
19441 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
19444 parts[1] = immed_double_const (l[2], l[3], DImode);
19447 gcc_unreachable ();
19454 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
19455 Return false when normal moves are needed; true when all required
19456 insns have been emitted. Operands 2-4 contain the input values
19457 int the correct order; operands 5-7 contain the output values. */
19460 ix86_split_long_move (rtx operands[])
19465 int collisions = 0;
19466 enum machine_mode mode = GET_MODE (operands[0]);
19467 bool collisionparts[4];
19469 /* The DFmode expanders may ask us to move double.
19470 For 64bit target this is single move. By hiding the fact
19471 here we simplify i386.md splitters. */
19472 if (TARGET_64BIT && GET_MODE_SIZE (GET_MODE (operands[0])) == 8)
19474 /* Optimize constant pool reference to immediates. This is used by
19475 fp moves, that force all constants to memory to allow combining. */
19477 if (MEM_P (operands[1])
19478 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
19479 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
19480 operands[1] = get_pool_constant (XEXP (operands[1], 0));
19481 if (push_operand (operands[0], VOIDmode))
19483 operands[0] = copy_rtx (operands[0]);
19484 PUT_MODE (operands[0], Pmode);
19487 operands[0] = gen_lowpart (DImode, operands[0]);
19488 operands[1] = gen_lowpart (DImode, operands[1]);
19489 emit_move_insn (operands[0], operands[1]);
19493 /* The only non-offsettable memory we handle is push. */
19494 if (push_operand (operands[0], VOIDmode))
19497 gcc_assert (!MEM_P (operands[0])
19498 || offsettable_memref_p (operands[0]));
19500 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
19501 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
19503 /* When emitting push, take care for source operands on the stack. */
19504 if (push && MEM_P (operands[1])
19505 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
19507 rtx src_base = XEXP (part[1][nparts - 1], 0);
19509 /* Compensate for the stack decrement by 4. */
19510 if (!TARGET_64BIT && nparts == 3
19511 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
19512 src_base = plus_constant (src_base, 4);
19514 /* src_base refers to the stack pointer and is
19515 automatically decreased by emitted push. */
19516 for (i = 0; i < nparts; i++)
19517 part[1][i] = change_address (part[1][i],
19518 GET_MODE (part[1][i]), src_base);
19521 /* We need to do copy in the right order in case an address register
19522 of the source overlaps the destination. */
19523 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
19527 for (i = 0; i < nparts; i++)
19530 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
19531 if (collisionparts[i])
19535 /* Collision in the middle part can be handled by reordering. */
19536 if (collisions == 1 && nparts == 3 && collisionparts [1])
19538 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
19539 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
19541 else if (collisions == 1
19543 && (collisionparts [1] || collisionparts [2]))
19545 if (collisionparts [1])
19547 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
19548 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
19552 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
19553 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
19557 /* If there are more collisions, we can't handle it by reordering.
19558 Do an lea to the last part and use only one colliding move. */
19559 else if (collisions > 1)
19565 base = part[0][nparts - 1];
19567 /* Handle the case when the last part isn't valid for lea.
19568 Happens in 64-bit mode storing the 12-byte XFmode. */
19569 if (GET_MODE (base) != Pmode)
19570 base = gen_rtx_REG (Pmode, REGNO (base));
19572 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
19573 part[1][0] = replace_equiv_address (part[1][0], base);
19574 for (i = 1; i < nparts; i++)
19576 tmp = plus_constant (base, UNITS_PER_WORD * i);
19577 part[1][i] = replace_equiv_address (part[1][i], tmp);
19588 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
19589 emit_insn (gen_addsi3 (stack_pointer_rtx,
19590 stack_pointer_rtx, GEN_INT (-4)));
19591 emit_move_insn (part[0][2], part[1][2]);
19593 else if (nparts == 4)
19595 emit_move_insn (part[0][3], part[1][3]);
19596 emit_move_insn (part[0][2], part[1][2]);
19601 /* In 64bit mode we don't have 32bit push available. In case this is
19602 register, it is OK - we will just use larger counterpart. We also
19603 retype memory - these comes from attempt to avoid REX prefix on
19604 moving of second half of TFmode value. */
19605 if (GET_MODE (part[1][1]) == SImode)
19607 switch (GET_CODE (part[1][1]))
19610 part[1][1] = adjust_address (part[1][1], DImode, 0);
19614 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
19618 gcc_unreachable ();
19621 if (GET_MODE (part[1][0]) == SImode)
19622 part[1][0] = part[1][1];
19625 emit_move_insn (part[0][1], part[1][1]);
19626 emit_move_insn (part[0][0], part[1][0]);
19630 /* Choose correct order to not overwrite the source before it is copied. */
19631 if ((REG_P (part[0][0])
19632 && REG_P (part[1][1])
19633 && (REGNO (part[0][0]) == REGNO (part[1][1])
19635 && REGNO (part[0][0]) == REGNO (part[1][2]))
19637 && REGNO (part[0][0]) == REGNO (part[1][3]))))
19639 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
19641 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
19643 operands[2 + i] = part[0][j];
19644 operands[6 + i] = part[1][j];
19649 for (i = 0; i < nparts; i++)
19651 operands[2 + i] = part[0][i];
19652 operands[6 + i] = part[1][i];
19656 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
19657 if (optimize_insn_for_size_p ())
19659 for (j = 0; j < nparts - 1; j++)
19660 if (CONST_INT_P (operands[6 + j])
19661 && operands[6 + j] != const0_rtx
19662 && REG_P (operands[2 + j]))
19663 for (i = j; i < nparts - 1; i++)
19664 if (CONST_INT_P (operands[7 + i])
19665 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
19666 operands[7 + i] = operands[2 + j];
19669 for (i = 0; i < nparts; i++)
19670 emit_move_insn (operands[2 + i], operands[6 + i]);
19675 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
19676 left shift by a constant, either using a single shift or
19677 a sequence of add instructions. */
19680 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
19682 rtx (*insn)(rtx, rtx, rtx);
19685 || (count * ix86_cost->add <= ix86_cost->shift_const
19686 && !optimize_insn_for_size_p ()))
19688 insn = mode == DImode ? gen_addsi3 : gen_adddi3;
19689 while (count-- > 0)
19690 emit_insn (insn (operand, operand, operand));
19694 insn = mode == DImode ? gen_ashlsi3 : gen_ashldi3;
19695 emit_insn (insn (operand, operand, GEN_INT (count)));
19700 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
19702 rtx (*gen_ashl3)(rtx, rtx, rtx);
19703 rtx (*gen_shld)(rtx, rtx, rtx);
19704 int half_width = GET_MODE_BITSIZE (mode) >> 1;
19706 rtx low[2], high[2];
19709 if (CONST_INT_P (operands[2]))
19711 split_double_mode (mode, operands, 2, low, high);
19712 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
19714 if (count >= half_width)
19716 emit_move_insn (high[0], low[1]);
19717 emit_move_insn (low[0], const0_rtx);
19719 if (count > half_width)
19720 ix86_expand_ashl_const (high[0], count - half_width, mode);
19724 gen_shld = mode == DImode ? gen_x86_shld : gen_x86_64_shld;
19726 if (!rtx_equal_p (operands[0], operands[1]))
19727 emit_move_insn (operands[0], operands[1]);
19729 emit_insn (gen_shld (high[0], low[0], GEN_INT (count)));
19730 ix86_expand_ashl_const (low[0], count, mode);
19735 split_double_mode (mode, operands, 1, low, high);
19737 gen_ashl3 = mode == DImode ? gen_ashlsi3 : gen_ashldi3;
19739 if (operands[1] == const1_rtx)
19741 /* Assuming we've chosen a QImode capable registers, then 1 << N
19742 can be done with two 32/64-bit shifts, no branches, no cmoves. */
19743 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
19745 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
19747 ix86_expand_clear (low[0]);
19748 ix86_expand_clear (high[0]);
19749 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (half_width)));
19751 d = gen_lowpart (QImode, low[0]);
19752 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
19753 s = gen_rtx_EQ (QImode, flags, const0_rtx);
19754 emit_insn (gen_rtx_SET (VOIDmode, d, s));
19756 d = gen_lowpart (QImode, high[0]);
19757 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
19758 s = gen_rtx_NE (QImode, flags, const0_rtx);
19759 emit_insn (gen_rtx_SET (VOIDmode, d, s));
19762 /* Otherwise, we can get the same results by manually performing
19763 a bit extract operation on bit 5/6, and then performing the two
19764 shifts. The two methods of getting 0/1 into low/high are exactly
19765 the same size. Avoiding the shift in the bit extract case helps
19766 pentium4 a bit; no one else seems to care much either way. */
19769 enum machine_mode half_mode;
19770 rtx (*gen_lshr3)(rtx, rtx, rtx);
19771 rtx (*gen_and3)(rtx, rtx, rtx);
19772 rtx (*gen_xor3)(rtx, rtx, rtx);
19773 HOST_WIDE_INT bits;
19776 if (mode == DImode)
19778 half_mode = SImode;
19779 gen_lshr3 = gen_lshrsi3;
19780 gen_and3 = gen_andsi3;
19781 gen_xor3 = gen_xorsi3;
19786 half_mode = DImode;
19787 gen_lshr3 = gen_lshrdi3;
19788 gen_and3 = gen_anddi3;
19789 gen_xor3 = gen_xordi3;
19793 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
19794 x = gen_rtx_ZERO_EXTEND (half_mode, operands[2]);
19796 x = gen_lowpart (half_mode, operands[2]);
19797 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
19799 emit_insn (gen_lshr3 (high[0], high[0], GEN_INT (bits)));
19800 emit_insn (gen_and3 (high[0], high[0], const1_rtx));
19801 emit_move_insn (low[0], high[0]);
19802 emit_insn (gen_xor3 (low[0], low[0], const1_rtx));
19805 emit_insn (gen_ashl3 (low[0], low[0], operands[2]));
19806 emit_insn (gen_ashl3 (high[0], high[0], operands[2]));
19810 if (operands[1] == constm1_rtx)
19812 /* For -1 << N, we can avoid the shld instruction, because we
19813 know that we're shifting 0...31/63 ones into a -1. */
19814 emit_move_insn (low[0], constm1_rtx);
19815 if (optimize_insn_for_size_p ())
19816 emit_move_insn (high[0], low[0]);
19818 emit_move_insn (high[0], constm1_rtx);
19822 gen_shld = mode == DImode ? gen_x86_shld : gen_x86_64_shld;
19824 if (!rtx_equal_p (operands[0], operands[1]))
19825 emit_move_insn (operands[0], operands[1]);
19827 split_double_mode (mode, operands, 1, low, high);
19828 emit_insn (gen_shld (high[0], low[0], operands[2]));
19831 emit_insn (gen_ashl3 (low[0], low[0], operands[2]));
19833 if (TARGET_CMOVE && scratch)
19835 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
19836 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
19838 ix86_expand_clear (scratch);
19839 emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2], scratch));
19843 rtx (*gen_x86_shift_adj_2)(rtx, rtx, rtx)
19844 = mode == DImode ? gen_x86_shiftsi_adj_2 : gen_x86_shiftdi_adj_2;
19846 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
19851 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
19853 rtx (*gen_ashr3)(rtx, rtx, rtx)
19854 = mode == DImode ? gen_ashrsi3 : gen_ashrdi3;
19855 rtx (*gen_shrd)(rtx, rtx, rtx);
19856 int half_width = GET_MODE_BITSIZE (mode) >> 1;
19858 rtx low[2], high[2];
19861 if (CONST_INT_P (operands[2]))
19863 split_double_mode (mode, operands, 2, low, high);
19864 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
19866 if (count == GET_MODE_BITSIZE (mode) - 1)
19868 emit_move_insn (high[0], high[1]);
19869 emit_insn (gen_ashr3 (high[0], high[0],
19870 GEN_INT (half_width - 1)));
19871 emit_move_insn (low[0], high[0]);
19874 else if (count >= half_width)
19876 emit_move_insn (low[0], high[1]);
19877 emit_move_insn (high[0], low[0]);
19878 emit_insn (gen_ashr3 (high[0], high[0],
19879 GEN_INT (half_width - 1)));
19881 if (count > half_width)
19882 emit_insn (gen_ashr3 (low[0], low[0],
19883 GEN_INT (count - half_width)));
19887 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19889 if (!rtx_equal_p (operands[0], operands[1]))
19890 emit_move_insn (operands[0], operands[1]);
19892 emit_insn (gen_shrd (low[0], high[0], GEN_INT (count)));
19893 emit_insn (gen_ashr3 (high[0], high[0], GEN_INT (count)));
19898 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19900 if (!rtx_equal_p (operands[0], operands[1]))
19901 emit_move_insn (operands[0], operands[1]);
19903 split_double_mode (mode, operands, 1, low, high);
19905 emit_insn (gen_shrd (low[0], high[0], operands[2]));
19906 emit_insn (gen_ashr3 (high[0], high[0], operands[2]));
19908 if (TARGET_CMOVE && scratch)
19910 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
19911 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
19913 emit_move_insn (scratch, high[0]);
19914 emit_insn (gen_ashr3 (scratch, scratch,
19915 GEN_INT (half_width - 1)));
19916 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
19921 rtx (*gen_x86_shift_adj_3)(rtx, rtx, rtx)
19922 = mode == DImode ? gen_x86_shiftsi_adj_3 : gen_x86_shiftdi_adj_3;
19924 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
19930 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
19932 rtx (*gen_lshr3)(rtx, rtx, rtx)
19933 = mode == DImode ? gen_lshrsi3 : gen_lshrdi3;
19934 rtx (*gen_shrd)(rtx, rtx, rtx);
19935 int half_width = GET_MODE_BITSIZE (mode) >> 1;
19937 rtx low[2], high[2];
19940 if (CONST_INT_P (operands[2]))
19942 split_double_mode (mode, operands, 2, low, high);
19943 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
19945 if (count >= half_width)
19947 emit_move_insn (low[0], high[1]);
19948 ix86_expand_clear (high[0]);
19950 if (count > half_width)
19951 emit_insn (gen_lshr3 (low[0], low[0],
19952 GEN_INT (count - half_width)));
19956 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19958 if (!rtx_equal_p (operands[0], operands[1]))
19959 emit_move_insn (operands[0], operands[1]);
19961 emit_insn (gen_shrd (low[0], high[0], GEN_INT (count)));
19962 emit_insn (gen_lshr3 (high[0], high[0], GEN_INT (count)));
19967 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19969 if (!rtx_equal_p (operands[0], operands[1]))
19970 emit_move_insn (operands[0], operands[1]);
19972 split_double_mode (mode, operands, 1, low, high);
19974 emit_insn (gen_shrd (low[0], high[0], operands[2]));
19975 emit_insn (gen_lshr3 (high[0], high[0], operands[2]));
19977 if (TARGET_CMOVE && scratch)
19979 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
19980 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
19982 ix86_expand_clear (scratch);
19983 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
19988 rtx (*gen_x86_shift_adj_2)(rtx, rtx, rtx)
19989 = mode == DImode ? gen_x86_shiftsi_adj_2 : gen_x86_shiftdi_adj_2;
19991 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
19996 /* Predict just emitted jump instruction to be taken with probability PROB. */
19998 predict_jump (int prob)
20000 rtx insn = get_last_insn ();
20001 gcc_assert (JUMP_P (insn));
20002 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
20005 /* Helper function for the string operations below. Dest VARIABLE whether
20006 it is aligned to VALUE bytes. If true, jump to the label. */
20008 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
20010 rtx label = gen_label_rtx ();
20011 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
20012 if (GET_MODE (variable) == DImode)
20013 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
20015 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
20016 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
20019 predict_jump (REG_BR_PROB_BASE * 50 / 100);
20021 predict_jump (REG_BR_PROB_BASE * 90 / 100);
20025 /* Adjust COUNTER by the VALUE. */
20027 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
20029 rtx (*gen_add)(rtx, rtx, rtx)
20030 = GET_MODE (countreg) == DImode ? gen_adddi3 : gen_addsi3;
20032 emit_insn (gen_add (countreg, countreg, GEN_INT (-value)));
20035 /* Zero extend possibly SImode EXP to Pmode register. */
20037 ix86_zero_extend_to_Pmode (rtx exp)
20040 if (GET_MODE (exp) == VOIDmode)
20041 return force_reg (Pmode, exp);
20042 if (GET_MODE (exp) == Pmode)
20043 return copy_to_mode_reg (Pmode, exp);
20044 r = gen_reg_rtx (Pmode);
20045 emit_insn (gen_zero_extendsidi2 (r, exp));
20049 /* Divide COUNTREG by SCALE. */
20051 scale_counter (rtx countreg, int scale)
20057 if (CONST_INT_P (countreg))
20058 return GEN_INT (INTVAL (countreg) / scale);
20059 gcc_assert (REG_P (countreg));
20061 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
20062 GEN_INT (exact_log2 (scale)),
20063 NULL, 1, OPTAB_DIRECT);
20067 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
20068 DImode for constant loop counts. */
20070 static enum machine_mode
20071 counter_mode (rtx count_exp)
20073 if (GET_MODE (count_exp) != VOIDmode)
20074 return GET_MODE (count_exp);
20075 if (!CONST_INT_P (count_exp))
20077 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
20082 /* When SRCPTR is non-NULL, output simple loop to move memory
20083 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
20084 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
20085 equivalent loop to set memory by VALUE (supposed to be in MODE).
20087 The size is rounded down to whole number of chunk size moved at once.
20088 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
20092 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
20093 rtx destptr, rtx srcptr, rtx value,
20094 rtx count, enum machine_mode mode, int unroll,
20097 rtx out_label, top_label, iter, tmp;
20098 enum machine_mode iter_mode = counter_mode (count);
20099 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
20100 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
20106 top_label = gen_label_rtx ();
20107 out_label = gen_label_rtx ();
20108 iter = gen_reg_rtx (iter_mode);
20110 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
20111 NULL, 1, OPTAB_DIRECT);
20112 /* Those two should combine. */
20113 if (piece_size == const1_rtx)
20115 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
20117 predict_jump (REG_BR_PROB_BASE * 10 / 100);
20119 emit_move_insn (iter, const0_rtx);
20121 emit_label (top_label);
20123 tmp = convert_modes (Pmode, iter_mode, iter, true);
20124 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
20125 destmem = change_address (destmem, mode, x_addr);
20129 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
20130 srcmem = change_address (srcmem, mode, y_addr);
20132 /* When unrolling for chips that reorder memory reads and writes,
20133 we can save registers by using single temporary.
20134 Also using 4 temporaries is overkill in 32bit mode. */
20135 if (!TARGET_64BIT && 0)
20137 for (i = 0; i < unroll; i++)
20142 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
20144 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
20146 emit_move_insn (destmem, srcmem);
20152 gcc_assert (unroll <= 4);
20153 for (i = 0; i < unroll; i++)
20155 tmpreg[i] = gen_reg_rtx (mode);
20159 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
20161 emit_move_insn (tmpreg[i], srcmem);
20163 for (i = 0; i < unroll; i++)
20168 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
20170 emit_move_insn (destmem, tmpreg[i]);
20175 for (i = 0; i < unroll; i++)
20179 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
20180 emit_move_insn (destmem, value);
20183 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
20184 true, OPTAB_LIB_WIDEN);
20186 emit_move_insn (iter, tmp);
20188 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
20190 if (expected_size != -1)
20192 expected_size /= GET_MODE_SIZE (mode) * unroll;
20193 if (expected_size == 0)
20195 else if (expected_size > REG_BR_PROB_BASE)
20196 predict_jump (REG_BR_PROB_BASE - 1);
20198 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
20201 predict_jump (REG_BR_PROB_BASE * 80 / 100);
20202 iter = ix86_zero_extend_to_Pmode (iter);
20203 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
20204 true, OPTAB_LIB_WIDEN);
20205 if (tmp != destptr)
20206 emit_move_insn (destptr, tmp);
20209 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
20210 true, OPTAB_LIB_WIDEN);
20212 emit_move_insn (srcptr, tmp);
20214 emit_label (out_label);
20217 /* Output "rep; mov" instruction.
20218 Arguments have same meaning as for previous function */
20220 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
20221 rtx destptr, rtx srcptr,
20223 enum machine_mode mode)
20229 /* If the size is known, it is shorter to use rep movs. */
20230 if (mode == QImode && CONST_INT_P (count)
20231 && !(INTVAL (count) & 3))
20234 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
20235 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
20236 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
20237 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
20238 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
20239 if (mode != QImode)
20241 destexp = gen_rtx_ASHIFT (Pmode, countreg,
20242 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
20243 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
20244 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
20245 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
20246 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
20250 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
20251 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
20253 if (CONST_INT_P (count))
20255 count = GEN_INT (INTVAL (count)
20256 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
20257 destmem = shallow_copy_rtx (destmem);
20258 srcmem = shallow_copy_rtx (srcmem);
20259 set_mem_size (destmem, count);
20260 set_mem_size (srcmem, count);
20264 if (MEM_SIZE (destmem))
20265 set_mem_size (destmem, NULL_RTX);
20266 if (MEM_SIZE (srcmem))
20267 set_mem_size (srcmem, NULL_RTX);
20269 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
20273 /* Output "rep; stos" instruction.
20274 Arguments have same meaning as for previous function */
20276 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
20277 rtx count, enum machine_mode mode,
20283 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
20284 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
20285 value = force_reg (mode, gen_lowpart (mode, value));
20286 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
20287 if (mode != QImode)
20289 destexp = gen_rtx_ASHIFT (Pmode, countreg,
20290 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
20291 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
20294 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
20295 if (orig_value == const0_rtx && CONST_INT_P (count))
20297 count = GEN_INT (INTVAL (count)
20298 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
20299 destmem = shallow_copy_rtx (destmem);
20300 set_mem_size (destmem, count);
20302 else if (MEM_SIZE (destmem))
20303 set_mem_size (destmem, NULL_RTX);
20304 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
20308 emit_strmov (rtx destmem, rtx srcmem,
20309 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
20311 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
20312 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
20313 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20316 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
20318 expand_movmem_epilogue (rtx destmem, rtx srcmem,
20319 rtx destptr, rtx srcptr, rtx count, int max_size)
20322 if (CONST_INT_P (count))
20324 HOST_WIDE_INT countval = INTVAL (count);
20327 if ((countval & 0x10) && max_size > 16)
20331 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
20332 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
20335 gcc_unreachable ();
20338 if ((countval & 0x08) && max_size > 8)
20341 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
20344 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
20345 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
20349 if ((countval & 0x04) && max_size > 4)
20351 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
20354 if ((countval & 0x02) && max_size > 2)
20356 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
20359 if ((countval & 0x01) && max_size > 1)
20361 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
20368 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
20369 count, 1, OPTAB_DIRECT);
20370 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
20371 count, QImode, 1, 4);
20375 /* When there are stringops, we can cheaply increase dest and src pointers.
20376 Otherwise we save code size by maintaining offset (zero is readily
20377 available from preceding rep operation) and using x86 addressing modes.
20379 if (TARGET_SINGLE_STRINGOP)
20383 rtx label = ix86_expand_aligntest (count, 4, true);
20384 src = change_address (srcmem, SImode, srcptr);
20385 dest = change_address (destmem, SImode, destptr);
20386 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20387 emit_label (label);
20388 LABEL_NUSES (label) = 1;
20392 rtx label = ix86_expand_aligntest (count, 2, true);
20393 src = change_address (srcmem, HImode, srcptr);
20394 dest = change_address (destmem, HImode, destptr);
20395 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20396 emit_label (label);
20397 LABEL_NUSES (label) = 1;
20401 rtx label = ix86_expand_aligntest (count, 1, true);
20402 src = change_address (srcmem, QImode, srcptr);
20403 dest = change_address (destmem, QImode, destptr);
20404 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20405 emit_label (label);
20406 LABEL_NUSES (label) = 1;
20411 rtx offset = force_reg (Pmode, const0_rtx);
20416 rtx label = ix86_expand_aligntest (count, 4, true);
20417 src = change_address (srcmem, SImode, srcptr);
20418 dest = change_address (destmem, SImode, destptr);
20419 emit_move_insn (dest, src);
20420 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
20421 true, OPTAB_LIB_WIDEN);
20423 emit_move_insn (offset, tmp);
20424 emit_label (label);
20425 LABEL_NUSES (label) = 1;
20429 rtx label = ix86_expand_aligntest (count, 2, true);
20430 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
20431 src = change_address (srcmem, HImode, tmp);
20432 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
20433 dest = change_address (destmem, HImode, tmp);
20434 emit_move_insn (dest, src);
20435 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
20436 true, OPTAB_LIB_WIDEN);
20438 emit_move_insn (offset, tmp);
20439 emit_label (label);
20440 LABEL_NUSES (label) = 1;
20444 rtx label = ix86_expand_aligntest (count, 1, true);
20445 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
20446 src = change_address (srcmem, QImode, tmp);
20447 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
20448 dest = change_address (destmem, QImode, tmp);
20449 emit_move_insn (dest, src);
20450 emit_label (label);
20451 LABEL_NUSES (label) = 1;
20456 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
20458 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
20459 rtx count, int max_size)
20462 expand_simple_binop (counter_mode (count), AND, count,
20463 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
20464 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
20465 gen_lowpart (QImode, value), count, QImode,
20469 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
20471 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
20475 if (CONST_INT_P (count))
20477 HOST_WIDE_INT countval = INTVAL (count);
20480 if ((countval & 0x10) && max_size > 16)
20484 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
20485 emit_insn (gen_strset (destptr, dest, value));
20486 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
20487 emit_insn (gen_strset (destptr, dest, value));
20490 gcc_unreachable ();
20493 if ((countval & 0x08) && max_size > 8)
20497 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
20498 emit_insn (gen_strset (destptr, dest, value));
20502 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
20503 emit_insn (gen_strset (destptr, dest, value));
20504 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
20505 emit_insn (gen_strset (destptr, dest, value));
20509 if ((countval & 0x04) && max_size > 4)
20511 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
20512 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
20515 if ((countval & 0x02) && max_size > 2)
20517 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
20518 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
20521 if ((countval & 0x01) && max_size > 1)
20523 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
20524 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
20531 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
20536 rtx label = ix86_expand_aligntest (count, 16, true);
20539 dest = change_address (destmem, DImode, destptr);
20540 emit_insn (gen_strset (destptr, dest, value));
20541 emit_insn (gen_strset (destptr, dest, value));
20545 dest = change_address (destmem, SImode, destptr);
20546 emit_insn (gen_strset (destptr, dest, value));
20547 emit_insn (gen_strset (destptr, dest, value));
20548 emit_insn (gen_strset (destptr, dest, value));
20549 emit_insn (gen_strset (destptr, dest, value));
20551 emit_label (label);
20552 LABEL_NUSES (label) = 1;
20556 rtx label = ix86_expand_aligntest (count, 8, true);
20559 dest = change_address (destmem, DImode, destptr);
20560 emit_insn (gen_strset (destptr, dest, value));
20564 dest = change_address (destmem, SImode, destptr);
20565 emit_insn (gen_strset (destptr, dest, value));
20566 emit_insn (gen_strset (destptr, dest, value));
20568 emit_label (label);
20569 LABEL_NUSES (label) = 1;
20573 rtx label = ix86_expand_aligntest (count, 4, true);
20574 dest = change_address (destmem, SImode, destptr);
20575 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
20576 emit_label (label);
20577 LABEL_NUSES (label) = 1;
20581 rtx label = ix86_expand_aligntest (count, 2, true);
20582 dest = change_address (destmem, HImode, destptr);
20583 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
20584 emit_label (label);
20585 LABEL_NUSES (label) = 1;
20589 rtx label = ix86_expand_aligntest (count, 1, true);
20590 dest = change_address (destmem, QImode, destptr);
20591 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
20592 emit_label (label);
20593 LABEL_NUSES (label) = 1;
20597 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
20598 DESIRED_ALIGNMENT. */
20600 expand_movmem_prologue (rtx destmem, rtx srcmem,
20601 rtx destptr, rtx srcptr, rtx count,
20602 int align, int desired_alignment)
20604 if (align <= 1 && desired_alignment > 1)
20606 rtx label = ix86_expand_aligntest (destptr, 1, false);
20607 srcmem = change_address (srcmem, QImode, srcptr);
20608 destmem = change_address (destmem, QImode, destptr);
20609 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
20610 ix86_adjust_counter (count, 1);
20611 emit_label (label);
20612 LABEL_NUSES (label) = 1;
20614 if (align <= 2 && desired_alignment > 2)
20616 rtx label = ix86_expand_aligntest (destptr, 2, false);
20617 srcmem = change_address (srcmem, HImode, srcptr);
20618 destmem = change_address (destmem, HImode, destptr);
20619 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
20620 ix86_adjust_counter (count, 2);
20621 emit_label (label);
20622 LABEL_NUSES (label) = 1;
20624 if (align <= 4 && desired_alignment > 4)
20626 rtx label = ix86_expand_aligntest (destptr, 4, false);
20627 srcmem = change_address (srcmem, SImode, srcptr);
20628 destmem = change_address (destmem, SImode, destptr);
20629 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
20630 ix86_adjust_counter (count, 4);
20631 emit_label (label);
20632 LABEL_NUSES (label) = 1;
20634 gcc_assert (desired_alignment <= 8);
20637 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
20638 ALIGN_BYTES is how many bytes need to be copied. */
20640 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
20641 int desired_align, int align_bytes)
20644 rtx src_size, dst_size;
20646 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
20647 if (src_align_bytes >= 0)
20648 src_align_bytes = desired_align - src_align_bytes;
20649 src_size = MEM_SIZE (src);
20650 dst_size = MEM_SIZE (dst);
20651 if (align_bytes & 1)
20653 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
20654 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
20656 emit_insn (gen_strmov (destreg, dst, srcreg, src));
20658 if (align_bytes & 2)
20660 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
20661 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
20662 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
20663 set_mem_align (dst, 2 * BITS_PER_UNIT);
20664 if (src_align_bytes >= 0
20665 && (src_align_bytes & 1) == (align_bytes & 1)
20666 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
20667 set_mem_align (src, 2 * BITS_PER_UNIT);
20669 emit_insn (gen_strmov (destreg, dst, srcreg, src));
20671 if (align_bytes & 4)
20673 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
20674 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
20675 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
20676 set_mem_align (dst, 4 * BITS_PER_UNIT);
20677 if (src_align_bytes >= 0)
20679 unsigned int src_align = 0;
20680 if ((src_align_bytes & 3) == (align_bytes & 3))
20682 else if ((src_align_bytes & 1) == (align_bytes & 1))
20684 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
20685 set_mem_align (src, src_align * BITS_PER_UNIT);
20688 emit_insn (gen_strmov (destreg, dst, srcreg, src));
20690 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
20691 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
20692 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
20693 set_mem_align (dst, desired_align * BITS_PER_UNIT);
20694 if (src_align_bytes >= 0)
20696 unsigned int src_align = 0;
20697 if ((src_align_bytes & 7) == (align_bytes & 7))
20699 else if ((src_align_bytes & 3) == (align_bytes & 3))
20701 else if ((src_align_bytes & 1) == (align_bytes & 1))
20703 if (src_align > (unsigned int) desired_align)
20704 src_align = desired_align;
20705 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
20706 set_mem_align (src, src_align * BITS_PER_UNIT);
20709 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
20711 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
20716 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
20717 DESIRED_ALIGNMENT. */
20719 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
20720 int align, int desired_alignment)
20722 if (align <= 1 && desired_alignment > 1)
20724 rtx label = ix86_expand_aligntest (destptr, 1, false);
20725 destmem = change_address (destmem, QImode, destptr);
20726 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
20727 ix86_adjust_counter (count, 1);
20728 emit_label (label);
20729 LABEL_NUSES (label) = 1;
20731 if (align <= 2 && desired_alignment > 2)
20733 rtx label = ix86_expand_aligntest (destptr, 2, false);
20734 destmem = change_address (destmem, HImode, destptr);
20735 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
20736 ix86_adjust_counter (count, 2);
20737 emit_label (label);
20738 LABEL_NUSES (label) = 1;
20740 if (align <= 4 && desired_alignment > 4)
20742 rtx label = ix86_expand_aligntest (destptr, 4, false);
20743 destmem = change_address (destmem, SImode, destptr);
20744 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
20745 ix86_adjust_counter (count, 4);
20746 emit_label (label);
20747 LABEL_NUSES (label) = 1;
20749 gcc_assert (desired_alignment <= 8);
20752 /* Set enough from DST to align DST known to by aligned by ALIGN to
20753 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
20755 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
20756 int desired_align, int align_bytes)
20759 rtx dst_size = MEM_SIZE (dst);
20760 if (align_bytes & 1)
20762 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
20764 emit_insn (gen_strset (destreg, dst,
20765 gen_lowpart (QImode, value)));
20767 if (align_bytes & 2)
20769 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
20770 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
20771 set_mem_align (dst, 2 * BITS_PER_UNIT);
20773 emit_insn (gen_strset (destreg, dst,
20774 gen_lowpart (HImode, value)));
20776 if (align_bytes & 4)
20778 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
20779 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
20780 set_mem_align (dst, 4 * BITS_PER_UNIT);
20782 emit_insn (gen_strset (destreg, dst,
20783 gen_lowpart (SImode, value)));
20785 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
20786 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
20787 set_mem_align (dst, desired_align * BITS_PER_UNIT);
20789 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
20793 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
20794 static enum stringop_alg
20795 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
20796 int *dynamic_check)
20798 const struct stringop_algs * algs;
20799 bool optimize_for_speed;
20800 /* Algorithms using the rep prefix want at least edi and ecx;
20801 additionally, memset wants eax and memcpy wants esi. Don't
20802 consider such algorithms if the user has appropriated those
20803 registers for their own purposes. */
20804 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
20806 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
20808 #define ALG_USABLE_P(alg) (rep_prefix_usable \
20809 || (alg != rep_prefix_1_byte \
20810 && alg != rep_prefix_4_byte \
20811 && alg != rep_prefix_8_byte))
20812 const struct processor_costs *cost;
20814 /* Even if the string operation call is cold, we still might spend a lot
20815 of time processing large blocks. */
20816 if (optimize_function_for_size_p (cfun)
20817 || (optimize_insn_for_size_p ()
20818 && expected_size != -1 && expected_size < 256))
20819 optimize_for_speed = false;
20821 optimize_for_speed = true;
20823 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
20825 *dynamic_check = -1;
20827 algs = &cost->memset[TARGET_64BIT != 0];
20829 algs = &cost->memcpy[TARGET_64BIT != 0];
20830 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
20831 return stringop_alg;
20832 /* rep; movq or rep; movl is the smallest variant. */
20833 else if (!optimize_for_speed)
20835 if (!count || (count & 3))
20836 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
20838 return rep_prefix_usable ? rep_prefix_4_byte : loop;
20840 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
20842 else if (expected_size != -1 && expected_size < 4)
20843 return loop_1_byte;
20844 else if (expected_size != -1)
20847 enum stringop_alg alg = libcall;
20848 for (i = 0; i < MAX_STRINGOP_ALGS; i++)
20850 /* We get here if the algorithms that were not libcall-based
20851 were rep-prefix based and we are unable to use rep prefixes
20852 based on global register usage. Break out of the loop and
20853 use the heuristic below. */
20854 if (algs->size[i].max == 0)
20856 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
20858 enum stringop_alg candidate = algs->size[i].alg;
20860 if (candidate != libcall && ALG_USABLE_P (candidate))
20862 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
20863 last non-libcall inline algorithm. */
20864 if (TARGET_INLINE_ALL_STRINGOPS)
20866 /* When the current size is best to be copied by a libcall,
20867 but we are still forced to inline, run the heuristic below
20868 that will pick code for medium sized blocks. */
20869 if (alg != libcall)
20873 else if (ALG_USABLE_P (candidate))
20877 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
20879 /* When asked to inline the call anyway, try to pick meaningful choice.
20880 We look for maximal size of block that is faster to copy by hand and
20881 take blocks of at most of that size guessing that average size will
20882 be roughly half of the block.
20884 If this turns out to be bad, we might simply specify the preferred
20885 choice in ix86_costs. */
20886 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
20887 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
20890 enum stringop_alg alg;
20892 bool any_alg_usable_p = true;
20894 for (i = 0; i < MAX_STRINGOP_ALGS; i++)
20896 enum stringop_alg candidate = algs->size[i].alg;
20897 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
20899 if (candidate != libcall && candidate
20900 && ALG_USABLE_P (candidate))
20901 max = algs->size[i].max;
20903 /* If there aren't any usable algorithms, then recursing on
20904 smaller sizes isn't going to find anything. Just return the
20905 simple byte-at-a-time copy loop. */
20906 if (!any_alg_usable_p)
20908 /* Pick something reasonable. */
20909 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
20910 *dynamic_check = 128;
20911 return loop_1_byte;
20915 alg = decide_alg (count, max / 2, memset, dynamic_check);
20916 gcc_assert (*dynamic_check == -1);
20917 gcc_assert (alg != libcall);
20918 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
20919 *dynamic_check = max;
20922 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
20923 #undef ALG_USABLE_P
20926 /* Decide on alignment. We know that the operand is already aligned to ALIGN
20927 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
20929 decide_alignment (int align,
20930 enum stringop_alg alg,
20933 int desired_align = 0;
20937 gcc_unreachable ();
20939 case unrolled_loop:
20940 desired_align = GET_MODE_SIZE (Pmode);
20942 case rep_prefix_8_byte:
20945 case rep_prefix_4_byte:
20946 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
20947 copying whole cacheline at once. */
20948 if (TARGET_PENTIUMPRO)
20953 case rep_prefix_1_byte:
20954 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
20955 copying whole cacheline at once. */
20956 if (TARGET_PENTIUMPRO)
20970 if (desired_align < align)
20971 desired_align = align;
20972 if (expected_size != -1 && expected_size < 4)
20973 desired_align = align;
20974 return desired_align;
20977 /* Return the smallest power of 2 greater than VAL. */
20979 smallest_pow2_greater_than (int val)
20987 /* Expand string move (memcpy) operation. Use i386 string operations
20988 when profitable. expand_setmem contains similar code. The code
20989 depends upon architecture, block size and alignment, but always has
20990 the same overall structure:
20992 1) Prologue guard: Conditional that jumps up to epilogues for small
20993 blocks that can be handled by epilogue alone. This is faster
20994 but also needed for correctness, since prologue assume the block
20995 is larger than the desired alignment.
20997 Optional dynamic check for size and libcall for large
20998 blocks is emitted here too, with -minline-stringops-dynamically.
21000 2) Prologue: copy first few bytes in order to get destination
21001 aligned to DESIRED_ALIGN. It is emitted only when ALIGN is less
21002 than DESIRED_ALIGN and up to DESIRED_ALIGN - ALIGN bytes can be
21003 copied. We emit either a jump tree on power of two sized
21004 blocks, or a byte loop.
21006 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
21007 with specified algorithm.
21009 4) Epilogue: code copying tail of the block that is too small to be
21010 handled by main body (or up to size guarded by prologue guard). */
21013 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
21014 rtx expected_align_exp, rtx expected_size_exp)
21020 rtx jump_around_label = NULL;
21021 HOST_WIDE_INT align = 1;
21022 unsigned HOST_WIDE_INT count = 0;
21023 HOST_WIDE_INT expected_size = -1;
21024 int size_needed = 0, epilogue_size_needed;
21025 int desired_align = 0, align_bytes = 0;
21026 enum stringop_alg alg;
21028 bool need_zero_guard = false;
21030 if (CONST_INT_P (align_exp))
21031 align = INTVAL (align_exp);
21032 /* i386 can do misaligned access on reasonably increased cost. */
21033 if (CONST_INT_P (expected_align_exp)
21034 && INTVAL (expected_align_exp) > align)
21035 align = INTVAL (expected_align_exp);
21036 /* ALIGN is the minimum of destination and source alignment, but we care here
21037 just about destination alignment. */
21038 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
21039 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
21041 if (CONST_INT_P (count_exp))
21042 count = expected_size = INTVAL (count_exp);
21043 if (CONST_INT_P (expected_size_exp) && count == 0)
21044 expected_size = INTVAL (expected_size_exp);
21046 /* Make sure we don't need to care about overflow later on. */
21047 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
21050 /* Step 0: Decide on preferred algorithm, desired alignment and
21051 size of chunks to be copied by main loop. */
21053 alg = decide_alg (count, expected_size, false, &dynamic_check);
21054 desired_align = decide_alignment (align, alg, expected_size);
21056 if (!TARGET_ALIGN_STRINGOPS)
21057 align = desired_align;
21059 if (alg == libcall)
21061 gcc_assert (alg != no_stringop);
21063 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
21064 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
21065 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
21070 gcc_unreachable ();
21072 need_zero_guard = true;
21073 size_needed = GET_MODE_SIZE (Pmode);
21075 case unrolled_loop:
21076 need_zero_guard = true;
21077 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
21079 case rep_prefix_8_byte:
21082 case rep_prefix_4_byte:
21085 case rep_prefix_1_byte:
21089 need_zero_guard = true;
21094 epilogue_size_needed = size_needed;
21096 /* Step 1: Prologue guard. */
21098 /* Alignment code needs count to be in register. */
21099 if (CONST_INT_P (count_exp) && desired_align > align)
21101 if (INTVAL (count_exp) > desired_align
21102 && INTVAL (count_exp) > size_needed)
21105 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
21106 if (align_bytes <= 0)
21109 align_bytes = desired_align - align_bytes;
21111 if (align_bytes == 0)
21112 count_exp = force_reg (counter_mode (count_exp), count_exp);
21114 gcc_assert (desired_align >= 1 && align >= 1);
21116 /* Ensure that alignment prologue won't copy past end of block. */
21117 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
21119 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
21120 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
21121 Make sure it is power of 2. */
21122 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
21126 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
21128 /* If main algorithm works on QImode, no epilogue is needed.
21129 For small sizes just don't align anything. */
21130 if (size_needed == 1)
21131 desired_align = align;
21138 label = gen_label_rtx ();
21139 emit_cmp_and_jump_insns (count_exp,
21140 GEN_INT (epilogue_size_needed),
21141 LTU, 0, counter_mode (count_exp), 1, label);
21142 if (expected_size == -1 || expected_size < epilogue_size_needed)
21143 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21145 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21149 /* Emit code to decide on runtime whether library call or inline should be
21151 if (dynamic_check != -1)
21153 if (CONST_INT_P (count_exp))
21155 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
21157 emit_block_move_via_libcall (dst, src, count_exp, false);
21158 count_exp = const0_rtx;
21164 rtx hot_label = gen_label_rtx ();
21165 jump_around_label = gen_label_rtx ();
21166 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
21167 LEU, 0, GET_MODE (count_exp), 1, hot_label);
21168 predict_jump (REG_BR_PROB_BASE * 90 / 100);
21169 emit_block_move_via_libcall (dst, src, count_exp, false);
21170 emit_jump (jump_around_label);
21171 emit_label (hot_label);
21175 /* Step 2: Alignment prologue. */
21177 if (desired_align > align)
21179 if (align_bytes == 0)
21181 /* Except for the first move in epilogue, we no longer know
21182 constant offset in aliasing info. It don't seems to worth
21183 the pain to maintain it for the first move, so throw away
21185 src = change_address (src, BLKmode, srcreg);
21186 dst = change_address (dst, BLKmode, destreg);
21187 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
21192 /* If we know how many bytes need to be stored before dst is
21193 sufficiently aligned, maintain aliasing info accurately. */
21194 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
21195 desired_align, align_bytes);
21196 count_exp = plus_constant (count_exp, -align_bytes);
21197 count -= align_bytes;
21199 if (need_zero_guard
21200 && (count < (unsigned HOST_WIDE_INT) size_needed
21201 || (align_bytes == 0
21202 && count < ((unsigned HOST_WIDE_INT) size_needed
21203 + desired_align - align))))
21205 /* It is possible that we copied enough so the main loop will not
21207 gcc_assert (size_needed > 1);
21208 if (label == NULL_RTX)
21209 label = gen_label_rtx ();
21210 emit_cmp_and_jump_insns (count_exp,
21211 GEN_INT (size_needed),
21212 LTU, 0, counter_mode (count_exp), 1, label);
21213 if (expected_size == -1
21214 || expected_size < (desired_align - align) / 2 + size_needed)
21215 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21217 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21220 if (label && size_needed == 1)
21222 emit_label (label);
21223 LABEL_NUSES (label) = 1;
21225 epilogue_size_needed = 1;
21227 else if (label == NULL_RTX)
21228 epilogue_size_needed = size_needed;
21230 /* Step 3: Main loop. */
21236 gcc_unreachable ();
21238 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
21239 count_exp, QImode, 1, expected_size);
21242 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
21243 count_exp, Pmode, 1, expected_size);
21245 case unrolled_loop:
21246 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
21247 registers for 4 temporaries anyway. */
21248 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
21249 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
21252 case rep_prefix_8_byte:
21253 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
21256 case rep_prefix_4_byte:
21257 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
21260 case rep_prefix_1_byte:
21261 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
21265 /* Adjust properly the offset of src and dest memory for aliasing. */
21266 if (CONST_INT_P (count_exp))
21268 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
21269 (count / size_needed) * size_needed);
21270 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
21271 (count / size_needed) * size_needed);
21275 src = change_address (src, BLKmode, srcreg);
21276 dst = change_address (dst, BLKmode, destreg);
21279 /* Step 4: Epilogue to copy the remaining bytes. */
21283 /* When the main loop is done, COUNT_EXP might hold original count,
21284 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
21285 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
21286 bytes. Compensate if needed. */
21288 if (size_needed < epilogue_size_needed)
21291 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
21292 GEN_INT (size_needed - 1), count_exp, 1,
21294 if (tmp != count_exp)
21295 emit_move_insn (count_exp, tmp);
21297 emit_label (label);
21298 LABEL_NUSES (label) = 1;
21301 if (count_exp != const0_rtx && epilogue_size_needed > 1)
21302 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
21303 epilogue_size_needed);
21304 if (jump_around_label)
21305 emit_label (jump_around_label);
21309 /* Helper function for memcpy. For QImode value 0xXY produce
21310 0xXYXYXYXY of wide specified by MODE. This is essentially
21311 a * 0x10101010, but we can do slightly better than
21312 synth_mult by unwinding the sequence by hand on CPUs with
21315 promote_duplicated_reg (enum machine_mode mode, rtx val)
21317 enum machine_mode valmode = GET_MODE (val);
21319 int nops = mode == DImode ? 3 : 2;
21321 gcc_assert (mode == SImode || mode == DImode);
21322 if (val == const0_rtx)
21323 return copy_to_mode_reg (mode, const0_rtx);
21324 if (CONST_INT_P (val))
21326 HOST_WIDE_INT v = INTVAL (val) & 255;
21330 if (mode == DImode)
21331 v |= (v << 16) << 16;
21332 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
21335 if (valmode == VOIDmode)
21337 if (valmode != QImode)
21338 val = gen_lowpart (QImode, val);
21339 if (mode == QImode)
21341 if (!TARGET_PARTIAL_REG_STALL)
21343 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
21344 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
21345 <= (ix86_cost->shift_const + ix86_cost->add) * nops
21346 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
21348 rtx reg = convert_modes (mode, QImode, val, true);
21349 tmp = promote_duplicated_reg (mode, const1_rtx);
21350 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
21355 rtx reg = convert_modes (mode, QImode, val, true);
21357 if (!TARGET_PARTIAL_REG_STALL)
21358 if (mode == SImode)
21359 emit_insn (gen_movsi_insv_1 (reg, reg));
21361 emit_insn (gen_movdi_insv_1 (reg, reg));
21364 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
21365 NULL, 1, OPTAB_DIRECT);
21367 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
21369 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
21370 NULL, 1, OPTAB_DIRECT);
21371 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
21372 if (mode == SImode)
21374 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
21375 NULL, 1, OPTAB_DIRECT);
21376 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
21381 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
21382 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
21383 alignment from ALIGN to DESIRED_ALIGN. */
21385 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
21390 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
21391 promoted_val = promote_duplicated_reg (DImode, val);
21392 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
21393 promoted_val = promote_duplicated_reg (SImode, val);
21394 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
21395 promoted_val = promote_duplicated_reg (HImode, val);
21397 promoted_val = val;
21399 return promoted_val;
21402 /* Expand string clear operation (bzero). Use i386 string operations when
21403 profitable. See expand_movmem comment for explanation of individual
21404 steps performed. */
21406 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
21407 rtx expected_align_exp, rtx expected_size_exp)
21412 rtx jump_around_label = NULL;
21413 HOST_WIDE_INT align = 1;
21414 unsigned HOST_WIDE_INT count = 0;
21415 HOST_WIDE_INT expected_size = -1;
21416 int size_needed = 0, epilogue_size_needed;
21417 int desired_align = 0, align_bytes = 0;
21418 enum stringop_alg alg;
21419 rtx promoted_val = NULL;
21420 bool force_loopy_epilogue = false;
21422 bool need_zero_guard = false;
21424 if (CONST_INT_P (align_exp))
21425 align = INTVAL (align_exp);
21426 /* i386 can do misaligned access on reasonably increased cost. */
21427 if (CONST_INT_P (expected_align_exp)
21428 && INTVAL (expected_align_exp) > align)
21429 align = INTVAL (expected_align_exp);
21430 if (CONST_INT_P (count_exp))
21431 count = expected_size = INTVAL (count_exp);
21432 if (CONST_INT_P (expected_size_exp) && count == 0)
21433 expected_size = INTVAL (expected_size_exp);
21435 /* Make sure we don't need to care about overflow later on. */
21436 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
21439 /* Step 0: Decide on preferred algorithm, desired alignment and
21440 size of chunks to be copied by main loop. */
21442 alg = decide_alg (count, expected_size, true, &dynamic_check);
21443 desired_align = decide_alignment (align, alg, expected_size);
21445 if (!TARGET_ALIGN_STRINGOPS)
21446 align = desired_align;
21448 if (alg == libcall)
21450 gcc_assert (alg != no_stringop);
21452 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
21453 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
21458 gcc_unreachable ();
21460 need_zero_guard = true;
21461 size_needed = GET_MODE_SIZE (Pmode);
21463 case unrolled_loop:
21464 need_zero_guard = true;
21465 size_needed = GET_MODE_SIZE (Pmode) * 4;
21467 case rep_prefix_8_byte:
21470 case rep_prefix_4_byte:
21473 case rep_prefix_1_byte:
21477 need_zero_guard = true;
21481 epilogue_size_needed = size_needed;
21483 /* Step 1: Prologue guard. */
21485 /* Alignment code needs count to be in register. */
21486 if (CONST_INT_P (count_exp) && desired_align > align)
21488 if (INTVAL (count_exp) > desired_align
21489 && INTVAL (count_exp) > size_needed)
21492 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
21493 if (align_bytes <= 0)
21496 align_bytes = desired_align - align_bytes;
21498 if (align_bytes == 0)
21500 enum machine_mode mode = SImode;
21501 if (TARGET_64BIT && (count & ~0xffffffff))
21503 count_exp = force_reg (mode, count_exp);
21506 /* Do the cheap promotion to allow better CSE across the
21507 main loop and epilogue (ie one load of the big constant in the
21508 front of all code. */
21509 if (CONST_INT_P (val_exp))
21510 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
21511 desired_align, align);
21512 /* Ensure that alignment prologue won't copy past end of block. */
21513 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
21515 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
21516 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
21517 Make sure it is power of 2. */
21518 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
21520 /* To improve performance of small blocks, we jump around the VAL
21521 promoting mode. This mean that if the promoted VAL is not constant,
21522 we might not use it in the epilogue and have to use byte
21524 if (epilogue_size_needed > 2 && !promoted_val)
21525 force_loopy_epilogue = true;
21528 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
21530 /* If main algorithm works on QImode, no epilogue is needed.
21531 For small sizes just don't align anything. */
21532 if (size_needed == 1)
21533 desired_align = align;
21540 label = gen_label_rtx ();
21541 emit_cmp_and_jump_insns (count_exp,
21542 GEN_INT (epilogue_size_needed),
21543 LTU, 0, counter_mode (count_exp), 1, label);
21544 if (expected_size == -1 || expected_size <= epilogue_size_needed)
21545 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21547 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21550 if (dynamic_check != -1)
21552 rtx hot_label = gen_label_rtx ();
21553 jump_around_label = gen_label_rtx ();
21554 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
21555 LEU, 0, counter_mode (count_exp), 1, hot_label);
21556 predict_jump (REG_BR_PROB_BASE * 90 / 100);
21557 set_storage_via_libcall (dst, count_exp, val_exp, false);
21558 emit_jump (jump_around_label);
21559 emit_label (hot_label);
21562 /* Step 2: Alignment prologue. */
21564 /* Do the expensive promotion once we branched off the small blocks. */
21566 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
21567 desired_align, align);
21568 gcc_assert (desired_align >= 1 && align >= 1);
21570 if (desired_align > align)
21572 if (align_bytes == 0)
21574 /* Except for the first move in epilogue, we no longer know
21575 constant offset in aliasing info. It don't seems to worth
21576 the pain to maintain it for the first move, so throw away
21578 dst = change_address (dst, BLKmode, destreg);
21579 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
21584 /* If we know how many bytes need to be stored before dst is
21585 sufficiently aligned, maintain aliasing info accurately. */
21586 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
21587 desired_align, align_bytes);
21588 count_exp = plus_constant (count_exp, -align_bytes);
21589 count -= align_bytes;
21591 if (need_zero_guard
21592 && (count < (unsigned HOST_WIDE_INT) size_needed
21593 || (align_bytes == 0
21594 && count < ((unsigned HOST_WIDE_INT) size_needed
21595 + desired_align - align))))
21597 /* It is possible that we copied enough so the main loop will not
21599 gcc_assert (size_needed > 1);
21600 if (label == NULL_RTX)
21601 label = gen_label_rtx ();
21602 emit_cmp_and_jump_insns (count_exp,
21603 GEN_INT (size_needed),
21604 LTU, 0, counter_mode (count_exp), 1, label);
21605 if (expected_size == -1
21606 || expected_size < (desired_align - align) / 2 + size_needed)
21607 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21609 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21612 if (label && size_needed == 1)
21614 emit_label (label);
21615 LABEL_NUSES (label) = 1;
21617 promoted_val = val_exp;
21618 epilogue_size_needed = 1;
21620 else if (label == NULL_RTX)
21621 epilogue_size_needed = size_needed;
21623 /* Step 3: Main loop. */
21629 gcc_unreachable ();
21631 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
21632 count_exp, QImode, 1, expected_size);
21635 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
21636 count_exp, Pmode, 1, expected_size);
21638 case unrolled_loop:
21639 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
21640 count_exp, Pmode, 4, expected_size);
21642 case rep_prefix_8_byte:
21643 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
21646 case rep_prefix_4_byte:
21647 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
21650 case rep_prefix_1_byte:
21651 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
21655 /* Adjust properly the offset of src and dest memory for aliasing. */
21656 if (CONST_INT_P (count_exp))
21657 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
21658 (count / size_needed) * size_needed);
21660 dst = change_address (dst, BLKmode, destreg);
21662 /* Step 4: Epilogue to copy the remaining bytes. */
21666 /* When the main loop is done, COUNT_EXP might hold original count,
21667 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
21668 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
21669 bytes. Compensate if needed. */
21671 if (size_needed < epilogue_size_needed)
21674 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
21675 GEN_INT (size_needed - 1), count_exp, 1,
21677 if (tmp != count_exp)
21678 emit_move_insn (count_exp, tmp);
21680 emit_label (label);
21681 LABEL_NUSES (label) = 1;
21684 if (count_exp != const0_rtx && epilogue_size_needed > 1)
21686 if (force_loopy_epilogue)
21687 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
21688 epilogue_size_needed);
21690 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
21691 epilogue_size_needed);
21693 if (jump_around_label)
21694 emit_label (jump_around_label);
21698 /* Expand the appropriate insns for doing strlen if not just doing
21701 out = result, initialized with the start address
21702 align_rtx = alignment of the address.
21703 scratch = scratch register, initialized with the startaddress when
21704 not aligned, otherwise undefined
21706 This is just the body. It needs the initializations mentioned above and
21707 some address computing at the end. These things are done in i386.md. */
21710 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
21714 rtx align_2_label = NULL_RTX;
21715 rtx align_3_label = NULL_RTX;
21716 rtx align_4_label = gen_label_rtx ();
21717 rtx end_0_label = gen_label_rtx ();
21719 rtx tmpreg = gen_reg_rtx (SImode);
21720 rtx scratch = gen_reg_rtx (SImode);
21724 if (CONST_INT_P (align_rtx))
21725 align = INTVAL (align_rtx);
21727 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
21729 /* Is there a known alignment and is it less than 4? */
21732 rtx scratch1 = gen_reg_rtx (Pmode);
21733 emit_move_insn (scratch1, out);
21734 /* Is there a known alignment and is it not 2? */
21737 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
21738 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
21740 /* Leave just the 3 lower bits. */
21741 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
21742 NULL_RTX, 0, OPTAB_WIDEN);
21744 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
21745 Pmode, 1, align_4_label);
21746 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
21747 Pmode, 1, align_2_label);
21748 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
21749 Pmode, 1, align_3_label);
21753 /* Since the alignment is 2, we have to check 2 or 0 bytes;
21754 check if is aligned to 4 - byte. */
21756 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
21757 NULL_RTX, 0, OPTAB_WIDEN);
21759 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
21760 Pmode, 1, align_4_label);
21763 mem = change_address (src, QImode, out);
21765 /* Now compare the bytes. */
21767 /* Compare the first n unaligned byte on a byte per byte basis. */
21768 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
21769 QImode, 1, end_0_label);
21771 /* Increment the address. */
21772 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
21774 /* Not needed with an alignment of 2 */
21777 emit_label (align_2_label);
21779 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
21782 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
21784 emit_label (align_3_label);
21787 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
21790 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
21793 /* Generate loop to check 4 bytes at a time. It is not a good idea to
21794 align this loop. It gives only huge programs, but does not help to
21796 emit_label (align_4_label);
21798 mem = change_address (src, SImode, out);
21799 emit_move_insn (scratch, mem);
21800 emit_insn (ix86_gen_add3 (out, out, GEN_INT (4)));
21802 /* This formula yields a nonzero result iff one of the bytes is zero.
21803 This saves three branches inside loop and many cycles. */
21805 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
21806 emit_insn (gen_one_cmplsi2 (scratch, scratch));
21807 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
21808 emit_insn (gen_andsi3 (tmpreg, tmpreg,
21809 gen_int_mode (0x80808080, SImode)));
21810 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
21815 rtx reg = gen_reg_rtx (SImode);
21816 rtx reg2 = gen_reg_rtx (Pmode);
21817 emit_move_insn (reg, tmpreg);
21818 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
21820 /* If zero is not in the first two bytes, move two bytes forward. */
21821 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
21822 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21823 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
21824 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
21825 gen_rtx_IF_THEN_ELSE (SImode, tmp,
21828 /* Emit lea manually to avoid clobbering of flags. */
21829 emit_insn (gen_rtx_SET (SImode, reg2,
21830 gen_rtx_PLUS (Pmode, out, const2_rtx)));
21832 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21833 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
21834 emit_insn (gen_rtx_SET (VOIDmode, out,
21835 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
21841 rtx end_2_label = gen_label_rtx ();
21842 /* Is zero in the first two bytes? */
21844 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
21845 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21846 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
21847 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
21848 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
21850 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
21851 JUMP_LABEL (tmp) = end_2_label;
21853 /* Not in the first two. Move two bytes forward. */
21854 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
21855 emit_insn (ix86_gen_add3 (out, out, const2_rtx));
21857 emit_label (end_2_label);
21861 /* Avoid branch in fixing the byte. */
21862 tmpreg = gen_lowpart (QImode, tmpreg);
21863 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
21864 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
21865 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
21866 emit_insn (ix86_gen_sub3_carry (out, out, GEN_INT (3), tmp, cmp));
21868 emit_label (end_0_label);
21871 /* Expand strlen. */
21874 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
21876 rtx addr, scratch1, scratch2, scratch3, scratch4;
21878 /* The generic case of strlen expander is long. Avoid it's
21879 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
21881 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
21882 && !TARGET_INLINE_ALL_STRINGOPS
21883 && !optimize_insn_for_size_p ()
21884 && (!CONST_INT_P (align) || INTVAL (align) < 4))
21887 addr = force_reg (Pmode, XEXP (src, 0));
21888 scratch1 = gen_reg_rtx (Pmode);
21890 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
21891 && !optimize_insn_for_size_p ())
21893 /* Well it seems that some optimizer does not combine a call like
21894 foo(strlen(bar), strlen(bar));
21895 when the move and the subtraction is done here. It does calculate
21896 the length just once when these instructions are done inside of
21897 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
21898 often used and I use one fewer register for the lifetime of
21899 output_strlen_unroll() this is better. */
21901 emit_move_insn (out, addr);
21903 ix86_expand_strlensi_unroll_1 (out, src, align);
21905 /* strlensi_unroll_1 returns the address of the zero at the end of
21906 the string, like memchr(), so compute the length by subtracting
21907 the start address. */
21908 emit_insn (ix86_gen_sub3 (out, out, addr));
21914 /* Can't use this if the user has appropriated eax, ecx, or edi. */
21915 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
21918 scratch2 = gen_reg_rtx (Pmode);
21919 scratch3 = gen_reg_rtx (Pmode);
21920 scratch4 = force_reg (Pmode, constm1_rtx);
21922 emit_move_insn (scratch3, addr);
21923 eoschar = force_reg (QImode, eoschar);
21925 src = replace_equiv_address_nv (src, scratch3);
21927 /* If .md starts supporting :P, this can be done in .md. */
21928 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
21929 scratch4), UNSPEC_SCAS);
21930 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
21931 emit_insn (ix86_gen_one_cmpl2 (scratch2, scratch1));
21932 emit_insn (ix86_gen_add3 (out, scratch2, constm1_rtx));
21937 /* For given symbol (function) construct code to compute address of it's PLT
21938 entry in large x86-64 PIC model. */
21940 construct_plt_address (rtx symbol)
21942 rtx tmp = gen_reg_rtx (Pmode);
21943 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
21945 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
21946 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
21948 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
21949 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
21954 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
21956 rtx pop, int sibcall)
21958 rtx use = NULL, call;
21960 if (pop == const0_rtx)
21962 gcc_assert (!TARGET_64BIT || !pop);
21964 if (TARGET_MACHO && !TARGET_64BIT)
21967 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
21968 fnaddr = machopic_indirect_call_target (fnaddr);
21973 /* Static functions and indirect calls don't need the pic register. */
21974 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
21975 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
21976 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
21977 use_reg (&use, pic_offset_table_rtx);
21980 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
21982 rtx al = gen_rtx_REG (QImode, AX_REG);
21983 emit_move_insn (al, callarg2);
21984 use_reg (&use, al);
21987 if (ix86_cmodel == CM_LARGE_PIC
21989 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
21990 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
21991 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
21993 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
21994 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
21996 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
21997 fnaddr = gen_rtx_MEM (QImode, fnaddr);
22000 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
22002 call = gen_rtx_SET (VOIDmode, retval, call);
22005 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
22006 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
22007 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
22009 if (TARGET_64BIT_MS_ABI
22010 && (!callarg2 || INTVAL (callarg2) != -2))
22012 /* We need to represent that SI and DI registers are clobbered
22014 static int clobbered_registers[] = {
22015 XMM6_REG, XMM7_REG, XMM8_REG,
22016 XMM9_REG, XMM10_REG, XMM11_REG,
22017 XMM12_REG, XMM13_REG, XMM14_REG,
22018 XMM15_REG, SI_REG, DI_REG
22021 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
22022 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
22023 UNSPEC_MS_TO_SYSV_CALL);
22027 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
22028 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
22031 (SSE_REGNO_P (clobbered_registers[i])
22033 clobbered_registers[i]));
22035 call = gen_rtx_PARALLEL (VOIDmode,
22036 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
22040 /* Add UNSPEC_CALL_NEEDS_VZEROUPPER decoration. */
22041 if (TARGET_VZEROUPPER)
22046 if (cfun->machine->callee_pass_avx256_p)
22048 if (cfun->machine->callee_return_avx256_p)
22049 avx256 = callee_return_pass_avx256;
22051 avx256 = callee_pass_avx256;
22053 else if (cfun->machine->callee_return_avx256_p)
22054 avx256 = callee_return_avx256;
22056 avx256 = call_no_avx256;
22058 if (reload_completed)
22059 emit_insn (gen_avx_vzeroupper (GEN_INT (avx256)));
22062 unspec = gen_rtx_UNSPEC (VOIDmode,
22063 gen_rtvec (1, GEN_INT (avx256)),
22064 UNSPEC_CALL_NEEDS_VZEROUPPER);
22065 call = gen_rtx_PARALLEL (VOIDmode,
22066 gen_rtvec (2, call, unspec));
22070 call = emit_call_insn (call);
22072 CALL_INSN_FUNCTION_USAGE (call) = use;
22078 ix86_split_call_vzeroupper (rtx insn, rtx vzeroupper)
22080 rtx call = XVECEXP (PATTERN (insn), 0, 0);
22081 emit_insn (gen_avx_vzeroupper (vzeroupper));
22082 emit_call_insn (call);
22085 /* Output the assembly for a call instruction. */
22088 ix86_output_call_insn (rtx insn, rtx call_op, int addr_op)
22090 bool direct_p = constant_call_address_operand (call_op, Pmode);
22091 bool seh_nop_p = false;
22093 gcc_assert (addr_op == 0 || addr_op == 1);
22095 if (SIBLING_CALL_P (insn))
22098 return addr_op ? "jmp\t%P1" : "jmp\t%P0";
22099 /* SEH epilogue detection requires the indirect branch case
22100 to include REX.W. */
22101 else if (TARGET_SEH)
22102 return addr_op ? "rex.W jmp %A1" : "rex.W jmp %A0";
22104 return addr_op ? "jmp\t%A1" : "jmp\t%A0";
22107 /* SEH unwinding can require an extra nop to be emitted in several
22108 circumstances. Determine if we have one of those. */
22113 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
22115 /* If we get to another real insn, we don't need the nop. */
22119 /* If we get to the epilogue note, prevent a catch region from
22120 being adjacent to the standard epilogue sequence. If non-
22121 call-exceptions, we'll have done this during epilogue emission. */
22122 if (NOTE_P (i) && NOTE_KIND (i) == NOTE_INSN_EPILOGUE_BEG
22123 && !flag_non_call_exceptions
22124 && !can_throw_internal (insn))
22131 /* If we didn't find a real insn following the call, prevent the
22132 unwinder from looking into the next function. */
22140 return addr_op ? "call\t%P1\n\tnop" : "call\t%P0\n\tnop";
22142 return addr_op ? "call\t%P1" : "call\t%P0";
22147 return addr_op ? "call\t%A1\n\tnop" : "call\t%A0\n\tnop";
22149 return addr_op ? "call\t%A1" : "call\t%A0";
22153 /* Clear stack slot assignments remembered from previous functions.
22154 This is called from INIT_EXPANDERS once before RTL is emitted for each
22157 static struct machine_function *
22158 ix86_init_machine_status (void)
22160 struct machine_function *f;
22162 f = ggc_alloc_cleared_machine_function ();
22163 f->use_fast_prologue_epilogue_nregs = -1;
22164 f->tls_descriptor_call_expanded_p = 0;
22165 f->call_abi = ix86_abi;
22170 /* Return a MEM corresponding to a stack slot with mode MODE.
22171 Allocate a new slot if necessary.
22173 The RTL for a function can have several slots available: N is
22174 which slot to use. */
22177 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
22179 struct stack_local_entry *s;
22181 gcc_assert (n < MAX_386_STACK_LOCALS);
22183 /* Virtual slot is valid only before vregs are instantiated. */
22184 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
22186 for (s = ix86_stack_locals; s; s = s->next)
22187 if (s->mode == mode && s->n == n)
22188 return copy_rtx (s->rtl);
22190 s = ggc_alloc_stack_local_entry ();
22193 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
22195 s->next = ix86_stack_locals;
22196 ix86_stack_locals = s;
22200 /* Construct the SYMBOL_REF for the tls_get_addr function. */
22202 static GTY(()) rtx ix86_tls_symbol;
22204 ix86_tls_get_addr (void)
22207 if (!ix86_tls_symbol)
22209 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
22210 (TARGET_ANY_GNU_TLS
22212 ? "___tls_get_addr"
22213 : "__tls_get_addr");
22216 return ix86_tls_symbol;
22219 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
22221 static GTY(()) rtx ix86_tls_module_base_symbol;
22223 ix86_tls_module_base (void)
22226 if (!ix86_tls_module_base_symbol)
22228 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
22229 "_TLS_MODULE_BASE_");
22230 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
22231 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
22234 return ix86_tls_module_base_symbol;
22237 /* Calculate the length of the memory address in the instruction
22238 encoding. Does not include the one-byte modrm, opcode, or prefix. */
22241 memory_address_length (rtx addr)
22243 struct ix86_address parts;
22244 rtx base, index, disp;
22248 if (GET_CODE (addr) == PRE_DEC
22249 || GET_CODE (addr) == POST_INC
22250 || GET_CODE (addr) == PRE_MODIFY
22251 || GET_CODE (addr) == POST_MODIFY)
22254 ok = ix86_decompose_address (addr, &parts);
22257 if (parts.base && GET_CODE (parts.base) == SUBREG)
22258 parts.base = SUBREG_REG (parts.base);
22259 if (parts.index && GET_CODE (parts.index) == SUBREG)
22260 parts.index = SUBREG_REG (parts.index);
22263 index = parts.index;
22268 - esp as the base always wants an index,
22269 - ebp as the base always wants a displacement,
22270 - r12 as the base always wants an index,
22271 - r13 as the base always wants a displacement. */
22273 /* Register Indirect. */
22274 if (base && !index && !disp)
22276 /* esp (for its index) and ebp (for its displacement) need
22277 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
22280 && (addr == arg_pointer_rtx
22281 || addr == frame_pointer_rtx
22282 || REGNO (addr) == SP_REG
22283 || REGNO (addr) == BP_REG
22284 || REGNO (addr) == R12_REG
22285 || REGNO (addr) == R13_REG))
22289 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
22290 is not disp32, but disp32(%rip), so for disp32
22291 SIB byte is needed, unless print_operand_address
22292 optimizes it into disp32(%rip) or (%rip) is implied
22294 else if (disp && !base && !index)
22301 if (GET_CODE (disp) == CONST)
22302 symbol = XEXP (disp, 0);
22303 if (GET_CODE (symbol) == PLUS
22304 && CONST_INT_P (XEXP (symbol, 1)))
22305 symbol = XEXP (symbol, 0);
22307 if (GET_CODE (symbol) != LABEL_REF
22308 && (GET_CODE (symbol) != SYMBOL_REF
22309 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
22310 && (GET_CODE (symbol) != UNSPEC
22311 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
22312 && XINT (symbol, 1) != UNSPEC_PCREL
22313 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
22320 /* Find the length of the displacement constant. */
22323 if (base && satisfies_constraint_K (disp))
22328 /* ebp always wants a displacement. Similarly r13. */
22329 else if (base && REG_P (base)
22330 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
22333 /* An index requires the two-byte modrm form.... */
22335 /* ...like esp (or r12), which always wants an index. */
22336 || base == arg_pointer_rtx
22337 || base == frame_pointer_rtx
22338 || (base && REG_P (base)
22339 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
22356 /* Compute default value for "length_immediate" attribute. When SHORTFORM
22357 is set, expect that insn have 8bit immediate alternative. */
22359 ix86_attr_length_immediate_default (rtx insn, int shortform)
22363 extract_insn_cached (insn);
22364 for (i = recog_data.n_operands - 1; i >= 0; --i)
22365 if (CONSTANT_P (recog_data.operand[i]))
22367 enum attr_mode mode = get_attr_mode (insn);
22370 if (shortform && CONST_INT_P (recog_data.operand[i]))
22372 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
22379 ival = trunc_int_for_mode (ival, HImode);
22382 ival = trunc_int_for_mode (ival, SImode);
22387 if (IN_RANGE (ival, -128, 127))
22404 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
22409 fatal_insn ("unknown insn mode", insn);
22414 /* Compute default value for "length_address" attribute. */
22416 ix86_attr_length_address_default (rtx insn)
22420 if (get_attr_type (insn) == TYPE_LEA)
22422 rtx set = PATTERN (insn), addr;
22424 if (GET_CODE (set) == PARALLEL)
22425 set = XVECEXP (set, 0, 0);
22427 gcc_assert (GET_CODE (set) == SET);
22429 addr = SET_SRC (set);
22430 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
22432 if (GET_CODE (addr) == ZERO_EXTEND)
22433 addr = XEXP (addr, 0);
22434 if (GET_CODE (addr) == SUBREG)
22435 addr = SUBREG_REG (addr);
22438 return memory_address_length (addr);
22441 extract_insn_cached (insn);
22442 for (i = recog_data.n_operands - 1; i >= 0; --i)
22443 if (MEM_P (recog_data.operand[i]))
22445 constrain_operands_cached (reload_completed);
22446 if (which_alternative != -1)
22448 const char *constraints = recog_data.constraints[i];
22449 int alt = which_alternative;
22451 while (*constraints == '=' || *constraints == '+')
22454 while (*constraints++ != ',')
22456 /* Skip ignored operands. */
22457 if (*constraints == 'X')
22460 return memory_address_length (XEXP (recog_data.operand[i], 0));
22465 /* Compute default value for "length_vex" attribute. It includes
22466 2 or 3 byte VEX prefix and 1 opcode byte. */
22469 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
22474 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
22475 byte VEX prefix. */
22476 if (!has_0f_opcode || has_vex_w)
22479 /* We can always use 2 byte VEX prefix in 32bit. */
22483 extract_insn_cached (insn);
22485 for (i = recog_data.n_operands - 1; i >= 0; --i)
22486 if (REG_P (recog_data.operand[i]))
22488 /* REX.W bit uses 3 byte VEX prefix. */
22489 if (GET_MODE (recog_data.operand[i]) == DImode
22490 && GENERAL_REG_P (recog_data.operand[i]))
22495 /* REX.X or REX.B bits use 3 byte VEX prefix. */
22496 if (MEM_P (recog_data.operand[i])
22497 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
22504 /* Return the maximum number of instructions a cpu can issue. */
22507 ix86_issue_rate (void)
22511 case PROCESSOR_PENTIUM:
22512 case PROCESSOR_ATOM:
22516 case PROCESSOR_PENTIUMPRO:
22517 case PROCESSOR_PENTIUM4:
22518 case PROCESSOR_CORE2_32:
22519 case PROCESSOR_CORE2_64:
22520 case PROCESSOR_COREI7_32:
22521 case PROCESSOR_COREI7_64:
22522 case PROCESSOR_ATHLON:
22524 case PROCESSOR_AMDFAM10:
22525 case PROCESSOR_NOCONA:
22526 case PROCESSOR_GENERIC32:
22527 case PROCESSOR_GENERIC64:
22528 case PROCESSOR_BDVER1:
22529 case PROCESSOR_BTVER1:
22537 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
22538 by DEP_INSN and nothing set by DEP_INSN. */
22541 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
22545 /* Simplify the test for uninteresting insns. */
22546 if (insn_type != TYPE_SETCC
22547 && insn_type != TYPE_ICMOV
22548 && insn_type != TYPE_FCMOV
22549 && insn_type != TYPE_IBR)
22552 if ((set = single_set (dep_insn)) != 0)
22554 set = SET_DEST (set);
22557 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
22558 && XVECLEN (PATTERN (dep_insn), 0) == 2
22559 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
22560 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
22562 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
22563 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
22568 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
22571 /* This test is true if the dependent insn reads the flags but
22572 not any other potentially set register. */
22573 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
22576 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
22582 /* Return true iff USE_INSN has a memory address with operands set by
22586 ix86_agi_dependent (rtx set_insn, rtx use_insn)
22589 extract_insn_cached (use_insn);
22590 for (i = recog_data.n_operands - 1; i >= 0; --i)
22591 if (MEM_P (recog_data.operand[i]))
22593 rtx addr = XEXP (recog_data.operand[i], 0);
22594 return modified_in_p (addr, set_insn) != 0;
22600 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
22602 enum attr_type insn_type, dep_insn_type;
22603 enum attr_memory memory;
22605 int dep_insn_code_number;
22607 /* Anti and output dependencies have zero cost on all CPUs. */
22608 if (REG_NOTE_KIND (link) != 0)
22611 dep_insn_code_number = recog_memoized (dep_insn);
22613 /* If we can't recognize the insns, we can't really do anything. */
22614 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
22617 insn_type = get_attr_type (insn);
22618 dep_insn_type = get_attr_type (dep_insn);
22622 case PROCESSOR_PENTIUM:
22623 /* Address Generation Interlock adds a cycle of latency. */
22624 if (insn_type == TYPE_LEA)
22626 rtx addr = PATTERN (insn);
22628 if (GET_CODE (addr) == PARALLEL)
22629 addr = XVECEXP (addr, 0, 0);
22631 gcc_assert (GET_CODE (addr) == SET);
22633 addr = SET_SRC (addr);
22634 if (modified_in_p (addr, dep_insn))
22637 else if (ix86_agi_dependent (dep_insn, insn))
22640 /* ??? Compares pair with jump/setcc. */
22641 if (ix86_flags_dependent (insn, dep_insn, insn_type))
22644 /* Floating point stores require value to be ready one cycle earlier. */
22645 if (insn_type == TYPE_FMOV
22646 && get_attr_memory (insn) == MEMORY_STORE
22647 && !ix86_agi_dependent (dep_insn, insn))
22651 case PROCESSOR_PENTIUMPRO:
22652 memory = get_attr_memory (insn);
22654 /* INT->FP conversion is expensive. */
22655 if (get_attr_fp_int_src (dep_insn))
22658 /* There is one cycle extra latency between an FP op and a store. */
22659 if (insn_type == TYPE_FMOV
22660 && (set = single_set (dep_insn)) != NULL_RTX
22661 && (set2 = single_set (insn)) != NULL_RTX
22662 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
22663 && MEM_P (SET_DEST (set2)))
22666 /* Show ability of reorder buffer to hide latency of load by executing
22667 in parallel with previous instruction in case
22668 previous instruction is not needed to compute the address. */
22669 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
22670 && !ix86_agi_dependent (dep_insn, insn))
22672 /* Claim moves to take one cycle, as core can issue one load
22673 at time and the next load can start cycle later. */
22674 if (dep_insn_type == TYPE_IMOV
22675 || dep_insn_type == TYPE_FMOV)
22683 memory = get_attr_memory (insn);
22685 /* The esp dependency is resolved before the instruction is really
22687 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
22688 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
22691 /* INT->FP conversion is expensive. */
22692 if (get_attr_fp_int_src (dep_insn))
22695 /* Show ability of reorder buffer to hide latency of load by executing
22696 in parallel with previous instruction in case
22697 previous instruction is not needed to compute the address. */
22698 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
22699 && !ix86_agi_dependent (dep_insn, insn))
22701 /* Claim moves to take one cycle, as core can issue one load
22702 at time and the next load can start cycle later. */
22703 if (dep_insn_type == TYPE_IMOV
22704 || dep_insn_type == TYPE_FMOV)
22713 case PROCESSOR_ATHLON:
22715 case PROCESSOR_AMDFAM10:
22716 case PROCESSOR_BDVER1:
22717 case PROCESSOR_BTVER1:
22718 case PROCESSOR_ATOM:
22719 case PROCESSOR_GENERIC32:
22720 case PROCESSOR_GENERIC64:
22721 memory = get_attr_memory (insn);
22723 /* Show ability of reorder buffer to hide latency of load by executing
22724 in parallel with previous instruction in case
22725 previous instruction is not needed to compute the address. */
22726 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
22727 && !ix86_agi_dependent (dep_insn, insn))
22729 enum attr_unit unit = get_attr_unit (insn);
22732 /* Because of the difference between the length of integer and
22733 floating unit pipeline preparation stages, the memory operands
22734 for floating point are cheaper.
22736 ??? For Athlon it the difference is most probably 2. */
22737 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
22740 loadcost = TARGET_ATHLON ? 2 : 0;
22742 if (cost >= loadcost)
22755 /* How many alternative schedules to try. This should be as wide as the
22756 scheduling freedom in the DFA, but no wider. Making this value too
22757 large results extra work for the scheduler. */
22760 ia32_multipass_dfa_lookahead (void)
22764 case PROCESSOR_PENTIUM:
22767 case PROCESSOR_PENTIUMPRO:
22771 case PROCESSOR_CORE2_32:
22772 case PROCESSOR_CORE2_64:
22773 case PROCESSOR_COREI7_32:
22774 case PROCESSOR_COREI7_64:
22775 /* Generally, we want haifa-sched:max_issue() to look ahead as far
22776 as many instructions can be executed on a cycle, i.e.,
22777 issue_rate. I wonder why tuning for many CPUs does not do this. */
22778 return ix86_issue_rate ();
22787 /* Model decoder of Core 2/i7.
22788 Below hooks for multipass scheduling (see haifa-sched.c:max_issue)
22789 track the instruction fetch block boundaries and make sure that long
22790 (9+ bytes) instructions are assigned to D0. */
22792 /* Maximum length of an insn that can be handled by
22793 a secondary decoder unit. '8' for Core 2/i7. */
22794 static int core2i7_secondary_decoder_max_insn_size;
22796 /* Ifetch block size, i.e., number of bytes decoder reads per cycle.
22797 '16' for Core 2/i7. */
22798 static int core2i7_ifetch_block_size;
22800 /* Maximum number of instructions decoder can handle per cycle.
22801 '6' for Core 2/i7. */
22802 static int core2i7_ifetch_block_max_insns;
22804 typedef struct ix86_first_cycle_multipass_data_ *
22805 ix86_first_cycle_multipass_data_t;
22806 typedef const struct ix86_first_cycle_multipass_data_ *
22807 const_ix86_first_cycle_multipass_data_t;
22809 /* A variable to store target state across calls to max_issue within
22811 static struct ix86_first_cycle_multipass_data_ _ix86_first_cycle_multipass_data,
22812 *ix86_first_cycle_multipass_data = &_ix86_first_cycle_multipass_data;
22814 /* Initialize DATA. */
22816 core2i7_first_cycle_multipass_init (void *_data)
22818 ix86_first_cycle_multipass_data_t data
22819 = (ix86_first_cycle_multipass_data_t) _data;
22821 data->ifetch_block_len = 0;
22822 data->ifetch_block_n_insns = 0;
22823 data->ready_try_change = NULL;
22824 data->ready_try_change_size = 0;
22827 /* Advancing the cycle; reset ifetch block counts. */
22829 core2i7_dfa_post_advance_cycle (void)
22831 ix86_first_cycle_multipass_data_t data = ix86_first_cycle_multipass_data;
22833 gcc_assert (data->ifetch_block_n_insns <= core2i7_ifetch_block_max_insns);
22835 data->ifetch_block_len = 0;
22836 data->ifetch_block_n_insns = 0;
22839 static int min_insn_size (rtx);
22841 /* Filter out insns from ready_try that the core will not be able to issue
22842 on current cycle due to decoder. */
22844 core2i7_first_cycle_multipass_filter_ready_try
22845 (const_ix86_first_cycle_multipass_data_t data,
22846 char *ready_try, int n_ready, bool first_cycle_insn_p)
22853 if (ready_try[n_ready])
22856 insn = get_ready_element (n_ready);
22857 insn_size = min_insn_size (insn);
22859 if (/* If this is a too long an insn for a secondary decoder ... */
22860 (!first_cycle_insn_p
22861 && insn_size > core2i7_secondary_decoder_max_insn_size)
22862 /* ... or it would not fit into the ifetch block ... */
22863 || data->ifetch_block_len + insn_size > core2i7_ifetch_block_size
22864 /* ... or the decoder is full already ... */
22865 || data->ifetch_block_n_insns + 1 > core2i7_ifetch_block_max_insns)
22866 /* ... mask the insn out. */
22868 ready_try[n_ready] = 1;
22870 if (data->ready_try_change)
22871 SET_BIT (data->ready_try_change, n_ready);
22876 /* Prepare for a new round of multipass lookahead scheduling. */
22878 core2i7_first_cycle_multipass_begin (void *_data, char *ready_try, int n_ready,
22879 bool first_cycle_insn_p)
22881 ix86_first_cycle_multipass_data_t data
22882 = (ix86_first_cycle_multipass_data_t) _data;
22883 const_ix86_first_cycle_multipass_data_t prev_data
22884 = ix86_first_cycle_multipass_data;
22886 /* Restore the state from the end of the previous round. */
22887 data->ifetch_block_len = prev_data->ifetch_block_len;
22888 data->ifetch_block_n_insns = prev_data->ifetch_block_n_insns;
22890 /* Filter instructions that cannot be issued on current cycle due to
22891 decoder restrictions. */
22892 core2i7_first_cycle_multipass_filter_ready_try (data, ready_try, n_ready,
22893 first_cycle_insn_p);
22896 /* INSN is being issued in current solution. Account for its impact on
22897 the decoder model. */
22899 core2i7_first_cycle_multipass_issue (void *_data, char *ready_try, int n_ready,
22900 rtx insn, const void *_prev_data)
22902 ix86_first_cycle_multipass_data_t data
22903 = (ix86_first_cycle_multipass_data_t) _data;
22904 const_ix86_first_cycle_multipass_data_t prev_data
22905 = (const_ix86_first_cycle_multipass_data_t) _prev_data;
22907 int insn_size = min_insn_size (insn);
22909 data->ifetch_block_len = prev_data->ifetch_block_len + insn_size;
22910 data->ifetch_block_n_insns = prev_data->ifetch_block_n_insns + 1;
22911 gcc_assert (data->ifetch_block_len <= core2i7_ifetch_block_size
22912 && data->ifetch_block_n_insns <= core2i7_ifetch_block_max_insns);
22914 /* Allocate or resize the bitmap for storing INSN's effect on ready_try. */
22915 if (!data->ready_try_change)
22917 data->ready_try_change = sbitmap_alloc (n_ready);
22918 data->ready_try_change_size = n_ready;
22920 else if (data->ready_try_change_size < n_ready)
22922 data->ready_try_change = sbitmap_resize (data->ready_try_change,
22924 data->ready_try_change_size = n_ready;
22926 sbitmap_zero (data->ready_try_change);
22928 /* Filter out insns from ready_try that the core will not be able to issue
22929 on current cycle due to decoder. */
22930 core2i7_first_cycle_multipass_filter_ready_try (data, ready_try, n_ready,
22934 /* Revert the effect on ready_try. */
22936 core2i7_first_cycle_multipass_backtrack (const void *_data,
22938 int n_ready ATTRIBUTE_UNUSED)
22940 const_ix86_first_cycle_multipass_data_t data
22941 = (const_ix86_first_cycle_multipass_data_t) _data;
22942 unsigned int i = 0;
22943 sbitmap_iterator sbi;
22945 gcc_assert (sbitmap_last_set_bit (data->ready_try_change) < n_ready);
22946 EXECUTE_IF_SET_IN_SBITMAP (data->ready_try_change, 0, i, sbi)
22952 /* Save the result of multipass lookahead scheduling for the next round. */
22954 core2i7_first_cycle_multipass_end (const void *_data)
22956 const_ix86_first_cycle_multipass_data_t data
22957 = (const_ix86_first_cycle_multipass_data_t) _data;
22958 ix86_first_cycle_multipass_data_t next_data
22959 = ix86_first_cycle_multipass_data;
22963 next_data->ifetch_block_len = data->ifetch_block_len;
22964 next_data->ifetch_block_n_insns = data->ifetch_block_n_insns;
22968 /* Deallocate target data. */
22970 core2i7_first_cycle_multipass_fini (void *_data)
22972 ix86_first_cycle_multipass_data_t data
22973 = (ix86_first_cycle_multipass_data_t) _data;
22975 if (data->ready_try_change)
22977 sbitmap_free (data->ready_try_change);
22978 data->ready_try_change = NULL;
22979 data->ready_try_change_size = 0;
22983 /* Prepare for scheduling pass. */
22985 ix86_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
22986 int verbose ATTRIBUTE_UNUSED,
22987 int max_uid ATTRIBUTE_UNUSED)
22989 /* Install scheduling hooks for current CPU. Some of these hooks are used
22990 in time-critical parts of the scheduler, so we only set them up when
22991 they are actually used. */
22994 case PROCESSOR_CORE2_32:
22995 case PROCESSOR_CORE2_64:
22996 case PROCESSOR_COREI7_32:
22997 case PROCESSOR_COREI7_64:
22998 targetm.sched.dfa_post_advance_cycle
22999 = core2i7_dfa_post_advance_cycle;
23000 targetm.sched.first_cycle_multipass_init
23001 = core2i7_first_cycle_multipass_init;
23002 targetm.sched.first_cycle_multipass_begin
23003 = core2i7_first_cycle_multipass_begin;
23004 targetm.sched.first_cycle_multipass_issue
23005 = core2i7_first_cycle_multipass_issue;
23006 targetm.sched.first_cycle_multipass_backtrack
23007 = core2i7_first_cycle_multipass_backtrack;
23008 targetm.sched.first_cycle_multipass_end
23009 = core2i7_first_cycle_multipass_end;
23010 targetm.sched.first_cycle_multipass_fini
23011 = core2i7_first_cycle_multipass_fini;
23013 /* Set decoder parameters. */
23014 core2i7_secondary_decoder_max_insn_size = 8;
23015 core2i7_ifetch_block_size = 16;
23016 core2i7_ifetch_block_max_insns = 6;
23020 targetm.sched.dfa_post_advance_cycle = NULL;
23021 targetm.sched.first_cycle_multipass_init = NULL;
23022 targetm.sched.first_cycle_multipass_begin = NULL;
23023 targetm.sched.first_cycle_multipass_issue = NULL;
23024 targetm.sched.first_cycle_multipass_backtrack = NULL;
23025 targetm.sched.first_cycle_multipass_end = NULL;
23026 targetm.sched.first_cycle_multipass_fini = NULL;
23032 /* Compute the alignment given to a constant that is being placed in memory.
23033 EXP is the constant and ALIGN is the alignment that the object would
23035 The value of this function is used instead of that alignment to align
23039 ix86_constant_alignment (tree exp, int align)
23041 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
23042 || TREE_CODE (exp) == INTEGER_CST)
23044 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
23046 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
23049 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
23050 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
23051 return BITS_PER_WORD;
23056 /* Compute the alignment for a static variable.
23057 TYPE is the data type, and ALIGN is the alignment that
23058 the object would ordinarily have. The value of this function is used
23059 instead of that alignment to align the object. */
23062 ix86_data_alignment (tree type, int align)
23064 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
23066 if (AGGREGATE_TYPE_P (type)
23067 && TYPE_SIZE (type)
23068 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
23069 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
23070 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
23071 && align < max_align)
23074 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
23075 to 16byte boundary. */
23078 if (AGGREGATE_TYPE_P (type)
23079 && TYPE_SIZE (type)
23080 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
23081 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
23082 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
23086 if (TREE_CODE (type) == ARRAY_TYPE)
23088 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
23090 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
23093 else if (TREE_CODE (type) == COMPLEX_TYPE)
23096 if (TYPE_MODE (type) == DCmode && align < 64)
23098 if ((TYPE_MODE (type) == XCmode
23099 || TYPE_MODE (type) == TCmode) && align < 128)
23102 else if ((TREE_CODE (type) == RECORD_TYPE
23103 || TREE_CODE (type) == UNION_TYPE
23104 || TREE_CODE (type) == QUAL_UNION_TYPE)
23105 && TYPE_FIELDS (type))
23107 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
23109 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
23112 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
23113 || TREE_CODE (type) == INTEGER_TYPE)
23115 if (TYPE_MODE (type) == DFmode && align < 64)
23117 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
23124 /* Compute the alignment for a local variable or a stack slot. EXP is
23125 the data type or decl itself, MODE is the widest mode available and
23126 ALIGN is the alignment that the object would ordinarily have. The
23127 value of this macro is used instead of that alignment to align the
23131 ix86_local_alignment (tree exp, enum machine_mode mode,
23132 unsigned int align)
23136 if (exp && DECL_P (exp))
23138 type = TREE_TYPE (exp);
23147 /* Don't do dynamic stack realignment for long long objects with
23148 -mpreferred-stack-boundary=2. */
23151 && ix86_preferred_stack_boundary < 64
23152 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
23153 && (!type || !TYPE_USER_ALIGN (type))
23154 && (!decl || !DECL_USER_ALIGN (decl)))
23157 /* If TYPE is NULL, we are allocating a stack slot for caller-save
23158 register in MODE. We will return the largest alignment of XF
23162 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
23163 align = GET_MODE_ALIGNMENT (DFmode);
23167 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
23168 to 16byte boundary. Exact wording is:
23170 An array uses the same alignment as its elements, except that a local or
23171 global array variable of length at least 16 bytes or
23172 a C99 variable-length array variable always has alignment of at least 16 bytes.
23174 This was added to allow use of aligned SSE instructions at arrays. This
23175 rule is meant for static storage (where compiler can not do the analysis
23176 by itself). We follow it for automatic variables only when convenient.
23177 We fully control everything in the function compiled and functions from
23178 other unit can not rely on the alignment.
23180 Exclude va_list type. It is the common case of local array where
23181 we can not benefit from the alignment. */
23182 if (TARGET_64BIT && optimize_function_for_speed_p (cfun)
23185 if (AGGREGATE_TYPE_P (type)
23186 && (va_list_type_node == NULL_TREE
23187 || (TYPE_MAIN_VARIANT (type)
23188 != TYPE_MAIN_VARIANT (va_list_type_node)))
23189 && TYPE_SIZE (type)
23190 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
23191 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
23192 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
23195 if (TREE_CODE (type) == ARRAY_TYPE)
23197 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
23199 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
23202 else if (TREE_CODE (type) == COMPLEX_TYPE)
23204 if (TYPE_MODE (type) == DCmode && align < 64)
23206 if ((TYPE_MODE (type) == XCmode
23207 || TYPE_MODE (type) == TCmode) && align < 128)
23210 else if ((TREE_CODE (type) == RECORD_TYPE
23211 || TREE_CODE (type) == UNION_TYPE
23212 || TREE_CODE (type) == QUAL_UNION_TYPE)
23213 && TYPE_FIELDS (type))
23215 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
23217 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
23220 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
23221 || TREE_CODE (type) == INTEGER_TYPE)
23224 if (TYPE_MODE (type) == DFmode && align < 64)
23226 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
23232 /* Compute the minimum required alignment for dynamic stack realignment
23233 purposes for a local variable, parameter or a stack slot. EXP is
23234 the data type or decl itself, MODE is its mode and ALIGN is the
23235 alignment that the object would ordinarily have. */
23238 ix86_minimum_alignment (tree exp, enum machine_mode mode,
23239 unsigned int align)
23243 if (exp && DECL_P (exp))
23245 type = TREE_TYPE (exp);
23254 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
23257 /* Don't do dynamic stack realignment for long long objects with
23258 -mpreferred-stack-boundary=2. */
23259 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
23260 && (!type || !TYPE_USER_ALIGN (type))
23261 && (!decl || !DECL_USER_ALIGN (decl)))
23267 /* Find a location for the static chain incoming to a nested function.
23268 This is a register, unless all free registers are used by arguments. */
23271 ix86_static_chain (const_tree fndecl, bool incoming_p)
23275 if (!DECL_STATIC_CHAIN (fndecl))
23280 /* We always use R10 in 64-bit mode. */
23286 /* By default in 32-bit mode we use ECX to pass the static chain. */
23289 fntype = TREE_TYPE (fndecl);
23290 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
23292 /* Fastcall functions use ecx/edx for arguments, which leaves
23293 us with EAX for the static chain. */
23296 else if (ix86_is_type_thiscall (fntype))
23298 /* Thiscall functions use ecx for arguments, which leaves
23299 us with EAX for the static chain. */
23302 else if (ix86_function_regparm (fntype, fndecl) == 3)
23304 /* For regparm 3, we have no free call-clobbered registers in
23305 which to store the static chain. In order to implement this,
23306 we have the trampoline push the static chain to the stack.
23307 However, we can't push a value below the return address when
23308 we call the nested function directly, so we have to use an
23309 alternate entry point. For this we use ESI, and have the
23310 alternate entry point push ESI, so that things appear the
23311 same once we're executing the nested function. */
23314 if (fndecl == current_function_decl)
23315 ix86_static_chain_on_stack = true;
23316 return gen_frame_mem (SImode,
23317 plus_constant (arg_pointer_rtx, -8));
23323 return gen_rtx_REG (Pmode, regno);
23326 /* Emit RTL insns to initialize the variable parts of a trampoline.
23327 FNDECL is the decl of the target address; M_TRAMP is a MEM for
23328 the trampoline, and CHAIN_VALUE is an RTX for the static chain
23329 to be passed to the target function. */
23332 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
23336 fnaddr = XEXP (DECL_RTL (fndecl), 0);
23343 /* Depending on the static chain location, either load a register
23344 with a constant, or push the constant to the stack. All of the
23345 instructions are the same size. */
23346 chain = ix86_static_chain (fndecl, true);
23349 if (REGNO (chain) == CX_REG)
23351 else if (REGNO (chain) == AX_REG)
23354 gcc_unreachable ();
23359 mem = adjust_address (m_tramp, QImode, 0);
23360 emit_move_insn (mem, gen_int_mode (opcode, QImode));
23362 mem = adjust_address (m_tramp, SImode, 1);
23363 emit_move_insn (mem, chain_value);
23365 /* Compute offset from the end of the jmp to the target function.
23366 In the case in which the trampoline stores the static chain on
23367 the stack, we need to skip the first insn which pushes the
23368 (call-saved) register static chain; this push is 1 byte. */
23369 disp = expand_binop (SImode, sub_optab, fnaddr,
23370 plus_constant (XEXP (m_tramp, 0),
23371 MEM_P (chain) ? 9 : 10),
23372 NULL_RTX, 1, OPTAB_DIRECT);
23374 mem = adjust_address (m_tramp, QImode, 5);
23375 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
23377 mem = adjust_address (m_tramp, SImode, 6);
23378 emit_move_insn (mem, disp);
23384 /* Load the function address to r11. Try to load address using
23385 the shorter movl instead of movabs. We may want to support
23386 movq for kernel mode, but kernel does not use trampolines at
23388 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
23390 fnaddr = copy_to_mode_reg (DImode, fnaddr);
23392 mem = adjust_address (m_tramp, HImode, offset);
23393 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
23395 mem = adjust_address (m_tramp, SImode, offset + 2);
23396 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
23401 mem = adjust_address (m_tramp, HImode, offset);
23402 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
23404 mem = adjust_address (m_tramp, DImode, offset + 2);
23405 emit_move_insn (mem, fnaddr);
23409 /* Load static chain using movabs to r10. */
23410 mem = adjust_address (m_tramp, HImode, offset);
23411 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
23413 mem = adjust_address (m_tramp, DImode, offset + 2);
23414 emit_move_insn (mem, chain_value);
23417 /* Jump to r11; the last (unused) byte is a nop, only there to
23418 pad the write out to a single 32-bit store. */
23419 mem = adjust_address (m_tramp, SImode, offset);
23420 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
23423 gcc_assert (offset <= TRAMPOLINE_SIZE);
23426 #ifdef ENABLE_EXECUTE_STACK
23427 #ifdef CHECK_EXECUTE_STACK_ENABLED
23428 if (CHECK_EXECUTE_STACK_ENABLED)
23430 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
23431 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
23435 /* The following file contains several enumerations and data structures
23436 built from the definitions in i386-builtin-types.def. */
23438 #include "i386-builtin-types.inc"
23440 /* Table for the ix86 builtin non-function types. */
23441 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
23443 /* Retrieve an element from the above table, building some of
23444 the types lazily. */
23447 ix86_get_builtin_type (enum ix86_builtin_type tcode)
23449 unsigned int index;
23452 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
23454 type = ix86_builtin_type_tab[(int) tcode];
23458 gcc_assert (tcode > IX86_BT_LAST_PRIM);
23459 if (tcode <= IX86_BT_LAST_VECT)
23461 enum machine_mode mode;
23463 index = tcode - IX86_BT_LAST_PRIM - 1;
23464 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
23465 mode = ix86_builtin_type_vect_mode[index];
23467 type = build_vector_type_for_mode (itype, mode);
23473 index = tcode - IX86_BT_LAST_VECT - 1;
23474 if (tcode <= IX86_BT_LAST_PTR)
23475 quals = TYPE_UNQUALIFIED;
23477 quals = TYPE_QUAL_CONST;
23479 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
23480 if (quals != TYPE_UNQUALIFIED)
23481 itype = build_qualified_type (itype, quals);
23483 type = build_pointer_type (itype);
23486 ix86_builtin_type_tab[(int) tcode] = type;
23490 /* Table for the ix86 builtin function types. */
23491 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
23493 /* Retrieve an element from the above table, building some of
23494 the types lazily. */
23497 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
23501 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
23503 type = ix86_builtin_func_type_tab[(int) tcode];
23507 if (tcode <= IX86_BT_LAST_FUNC)
23509 unsigned start = ix86_builtin_func_start[(int) tcode];
23510 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
23511 tree rtype, atype, args = void_list_node;
23514 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
23515 for (i = after - 1; i > start; --i)
23517 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
23518 args = tree_cons (NULL, atype, args);
23521 type = build_function_type (rtype, args);
23525 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
23526 enum ix86_builtin_func_type icode;
23528 icode = ix86_builtin_func_alias_base[index];
23529 type = ix86_get_builtin_func_type (icode);
23532 ix86_builtin_func_type_tab[(int) tcode] = type;
23537 /* Codes for all the SSE/MMX builtins. */
23540 IX86_BUILTIN_ADDPS,
23541 IX86_BUILTIN_ADDSS,
23542 IX86_BUILTIN_DIVPS,
23543 IX86_BUILTIN_DIVSS,
23544 IX86_BUILTIN_MULPS,
23545 IX86_BUILTIN_MULSS,
23546 IX86_BUILTIN_SUBPS,
23547 IX86_BUILTIN_SUBSS,
23549 IX86_BUILTIN_CMPEQPS,
23550 IX86_BUILTIN_CMPLTPS,
23551 IX86_BUILTIN_CMPLEPS,
23552 IX86_BUILTIN_CMPGTPS,
23553 IX86_BUILTIN_CMPGEPS,
23554 IX86_BUILTIN_CMPNEQPS,
23555 IX86_BUILTIN_CMPNLTPS,
23556 IX86_BUILTIN_CMPNLEPS,
23557 IX86_BUILTIN_CMPNGTPS,
23558 IX86_BUILTIN_CMPNGEPS,
23559 IX86_BUILTIN_CMPORDPS,
23560 IX86_BUILTIN_CMPUNORDPS,
23561 IX86_BUILTIN_CMPEQSS,
23562 IX86_BUILTIN_CMPLTSS,
23563 IX86_BUILTIN_CMPLESS,
23564 IX86_BUILTIN_CMPNEQSS,
23565 IX86_BUILTIN_CMPNLTSS,
23566 IX86_BUILTIN_CMPNLESS,
23567 IX86_BUILTIN_CMPNGTSS,
23568 IX86_BUILTIN_CMPNGESS,
23569 IX86_BUILTIN_CMPORDSS,
23570 IX86_BUILTIN_CMPUNORDSS,
23572 IX86_BUILTIN_COMIEQSS,
23573 IX86_BUILTIN_COMILTSS,
23574 IX86_BUILTIN_COMILESS,
23575 IX86_BUILTIN_COMIGTSS,
23576 IX86_BUILTIN_COMIGESS,
23577 IX86_BUILTIN_COMINEQSS,
23578 IX86_BUILTIN_UCOMIEQSS,
23579 IX86_BUILTIN_UCOMILTSS,
23580 IX86_BUILTIN_UCOMILESS,
23581 IX86_BUILTIN_UCOMIGTSS,
23582 IX86_BUILTIN_UCOMIGESS,
23583 IX86_BUILTIN_UCOMINEQSS,
23585 IX86_BUILTIN_CVTPI2PS,
23586 IX86_BUILTIN_CVTPS2PI,
23587 IX86_BUILTIN_CVTSI2SS,
23588 IX86_BUILTIN_CVTSI642SS,
23589 IX86_BUILTIN_CVTSS2SI,
23590 IX86_BUILTIN_CVTSS2SI64,
23591 IX86_BUILTIN_CVTTPS2PI,
23592 IX86_BUILTIN_CVTTSS2SI,
23593 IX86_BUILTIN_CVTTSS2SI64,
23595 IX86_BUILTIN_MAXPS,
23596 IX86_BUILTIN_MAXSS,
23597 IX86_BUILTIN_MINPS,
23598 IX86_BUILTIN_MINSS,
23600 IX86_BUILTIN_LOADUPS,
23601 IX86_BUILTIN_STOREUPS,
23602 IX86_BUILTIN_MOVSS,
23604 IX86_BUILTIN_MOVHLPS,
23605 IX86_BUILTIN_MOVLHPS,
23606 IX86_BUILTIN_LOADHPS,
23607 IX86_BUILTIN_LOADLPS,
23608 IX86_BUILTIN_STOREHPS,
23609 IX86_BUILTIN_STORELPS,
23611 IX86_BUILTIN_MASKMOVQ,
23612 IX86_BUILTIN_MOVMSKPS,
23613 IX86_BUILTIN_PMOVMSKB,
23615 IX86_BUILTIN_MOVNTPS,
23616 IX86_BUILTIN_MOVNTQ,
23618 IX86_BUILTIN_LOADDQU,
23619 IX86_BUILTIN_STOREDQU,
23621 IX86_BUILTIN_PACKSSWB,
23622 IX86_BUILTIN_PACKSSDW,
23623 IX86_BUILTIN_PACKUSWB,
23625 IX86_BUILTIN_PADDB,
23626 IX86_BUILTIN_PADDW,
23627 IX86_BUILTIN_PADDD,
23628 IX86_BUILTIN_PADDQ,
23629 IX86_BUILTIN_PADDSB,
23630 IX86_BUILTIN_PADDSW,
23631 IX86_BUILTIN_PADDUSB,
23632 IX86_BUILTIN_PADDUSW,
23633 IX86_BUILTIN_PSUBB,
23634 IX86_BUILTIN_PSUBW,
23635 IX86_BUILTIN_PSUBD,
23636 IX86_BUILTIN_PSUBQ,
23637 IX86_BUILTIN_PSUBSB,
23638 IX86_BUILTIN_PSUBSW,
23639 IX86_BUILTIN_PSUBUSB,
23640 IX86_BUILTIN_PSUBUSW,
23643 IX86_BUILTIN_PANDN,
23647 IX86_BUILTIN_PAVGB,
23648 IX86_BUILTIN_PAVGW,
23650 IX86_BUILTIN_PCMPEQB,
23651 IX86_BUILTIN_PCMPEQW,
23652 IX86_BUILTIN_PCMPEQD,
23653 IX86_BUILTIN_PCMPGTB,
23654 IX86_BUILTIN_PCMPGTW,
23655 IX86_BUILTIN_PCMPGTD,
23657 IX86_BUILTIN_PMADDWD,
23659 IX86_BUILTIN_PMAXSW,
23660 IX86_BUILTIN_PMAXUB,
23661 IX86_BUILTIN_PMINSW,
23662 IX86_BUILTIN_PMINUB,
23664 IX86_BUILTIN_PMULHUW,
23665 IX86_BUILTIN_PMULHW,
23666 IX86_BUILTIN_PMULLW,
23668 IX86_BUILTIN_PSADBW,
23669 IX86_BUILTIN_PSHUFW,
23671 IX86_BUILTIN_PSLLW,
23672 IX86_BUILTIN_PSLLD,
23673 IX86_BUILTIN_PSLLQ,
23674 IX86_BUILTIN_PSRAW,
23675 IX86_BUILTIN_PSRAD,
23676 IX86_BUILTIN_PSRLW,
23677 IX86_BUILTIN_PSRLD,
23678 IX86_BUILTIN_PSRLQ,
23679 IX86_BUILTIN_PSLLWI,
23680 IX86_BUILTIN_PSLLDI,
23681 IX86_BUILTIN_PSLLQI,
23682 IX86_BUILTIN_PSRAWI,
23683 IX86_BUILTIN_PSRADI,
23684 IX86_BUILTIN_PSRLWI,
23685 IX86_BUILTIN_PSRLDI,
23686 IX86_BUILTIN_PSRLQI,
23688 IX86_BUILTIN_PUNPCKHBW,
23689 IX86_BUILTIN_PUNPCKHWD,
23690 IX86_BUILTIN_PUNPCKHDQ,
23691 IX86_BUILTIN_PUNPCKLBW,
23692 IX86_BUILTIN_PUNPCKLWD,
23693 IX86_BUILTIN_PUNPCKLDQ,
23695 IX86_BUILTIN_SHUFPS,
23697 IX86_BUILTIN_RCPPS,
23698 IX86_BUILTIN_RCPSS,
23699 IX86_BUILTIN_RSQRTPS,
23700 IX86_BUILTIN_RSQRTPS_NR,
23701 IX86_BUILTIN_RSQRTSS,
23702 IX86_BUILTIN_RSQRTF,
23703 IX86_BUILTIN_SQRTPS,
23704 IX86_BUILTIN_SQRTPS_NR,
23705 IX86_BUILTIN_SQRTSS,
23707 IX86_BUILTIN_UNPCKHPS,
23708 IX86_BUILTIN_UNPCKLPS,
23710 IX86_BUILTIN_ANDPS,
23711 IX86_BUILTIN_ANDNPS,
23713 IX86_BUILTIN_XORPS,
23716 IX86_BUILTIN_LDMXCSR,
23717 IX86_BUILTIN_STMXCSR,
23718 IX86_BUILTIN_SFENCE,
23720 /* 3DNow! Original */
23721 IX86_BUILTIN_FEMMS,
23722 IX86_BUILTIN_PAVGUSB,
23723 IX86_BUILTIN_PF2ID,
23724 IX86_BUILTIN_PFACC,
23725 IX86_BUILTIN_PFADD,
23726 IX86_BUILTIN_PFCMPEQ,
23727 IX86_BUILTIN_PFCMPGE,
23728 IX86_BUILTIN_PFCMPGT,
23729 IX86_BUILTIN_PFMAX,
23730 IX86_BUILTIN_PFMIN,
23731 IX86_BUILTIN_PFMUL,
23732 IX86_BUILTIN_PFRCP,
23733 IX86_BUILTIN_PFRCPIT1,
23734 IX86_BUILTIN_PFRCPIT2,
23735 IX86_BUILTIN_PFRSQIT1,
23736 IX86_BUILTIN_PFRSQRT,
23737 IX86_BUILTIN_PFSUB,
23738 IX86_BUILTIN_PFSUBR,
23739 IX86_BUILTIN_PI2FD,
23740 IX86_BUILTIN_PMULHRW,
23742 /* 3DNow! Athlon Extensions */
23743 IX86_BUILTIN_PF2IW,
23744 IX86_BUILTIN_PFNACC,
23745 IX86_BUILTIN_PFPNACC,
23746 IX86_BUILTIN_PI2FW,
23747 IX86_BUILTIN_PSWAPDSI,
23748 IX86_BUILTIN_PSWAPDSF,
23751 IX86_BUILTIN_ADDPD,
23752 IX86_BUILTIN_ADDSD,
23753 IX86_BUILTIN_DIVPD,
23754 IX86_BUILTIN_DIVSD,
23755 IX86_BUILTIN_MULPD,
23756 IX86_BUILTIN_MULSD,
23757 IX86_BUILTIN_SUBPD,
23758 IX86_BUILTIN_SUBSD,
23760 IX86_BUILTIN_CMPEQPD,
23761 IX86_BUILTIN_CMPLTPD,
23762 IX86_BUILTIN_CMPLEPD,
23763 IX86_BUILTIN_CMPGTPD,
23764 IX86_BUILTIN_CMPGEPD,
23765 IX86_BUILTIN_CMPNEQPD,
23766 IX86_BUILTIN_CMPNLTPD,
23767 IX86_BUILTIN_CMPNLEPD,
23768 IX86_BUILTIN_CMPNGTPD,
23769 IX86_BUILTIN_CMPNGEPD,
23770 IX86_BUILTIN_CMPORDPD,
23771 IX86_BUILTIN_CMPUNORDPD,
23772 IX86_BUILTIN_CMPEQSD,
23773 IX86_BUILTIN_CMPLTSD,
23774 IX86_BUILTIN_CMPLESD,
23775 IX86_BUILTIN_CMPNEQSD,
23776 IX86_BUILTIN_CMPNLTSD,
23777 IX86_BUILTIN_CMPNLESD,
23778 IX86_BUILTIN_CMPORDSD,
23779 IX86_BUILTIN_CMPUNORDSD,
23781 IX86_BUILTIN_COMIEQSD,
23782 IX86_BUILTIN_COMILTSD,
23783 IX86_BUILTIN_COMILESD,
23784 IX86_BUILTIN_COMIGTSD,
23785 IX86_BUILTIN_COMIGESD,
23786 IX86_BUILTIN_COMINEQSD,
23787 IX86_BUILTIN_UCOMIEQSD,
23788 IX86_BUILTIN_UCOMILTSD,
23789 IX86_BUILTIN_UCOMILESD,
23790 IX86_BUILTIN_UCOMIGTSD,
23791 IX86_BUILTIN_UCOMIGESD,
23792 IX86_BUILTIN_UCOMINEQSD,
23794 IX86_BUILTIN_MAXPD,
23795 IX86_BUILTIN_MAXSD,
23796 IX86_BUILTIN_MINPD,
23797 IX86_BUILTIN_MINSD,
23799 IX86_BUILTIN_ANDPD,
23800 IX86_BUILTIN_ANDNPD,
23802 IX86_BUILTIN_XORPD,
23804 IX86_BUILTIN_SQRTPD,
23805 IX86_BUILTIN_SQRTSD,
23807 IX86_BUILTIN_UNPCKHPD,
23808 IX86_BUILTIN_UNPCKLPD,
23810 IX86_BUILTIN_SHUFPD,
23812 IX86_BUILTIN_LOADUPD,
23813 IX86_BUILTIN_STOREUPD,
23814 IX86_BUILTIN_MOVSD,
23816 IX86_BUILTIN_LOADHPD,
23817 IX86_BUILTIN_LOADLPD,
23819 IX86_BUILTIN_CVTDQ2PD,
23820 IX86_BUILTIN_CVTDQ2PS,
23822 IX86_BUILTIN_CVTPD2DQ,
23823 IX86_BUILTIN_CVTPD2PI,
23824 IX86_BUILTIN_CVTPD2PS,
23825 IX86_BUILTIN_CVTTPD2DQ,
23826 IX86_BUILTIN_CVTTPD2PI,
23828 IX86_BUILTIN_CVTPI2PD,
23829 IX86_BUILTIN_CVTSI2SD,
23830 IX86_BUILTIN_CVTSI642SD,
23832 IX86_BUILTIN_CVTSD2SI,
23833 IX86_BUILTIN_CVTSD2SI64,
23834 IX86_BUILTIN_CVTSD2SS,
23835 IX86_BUILTIN_CVTSS2SD,
23836 IX86_BUILTIN_CVTTSD2SI,
23837 IX86_BUILTIN_CVTTSD2SI64,
23839 IX86_BUILTIN_CVTPS2DQ,
23840 IX86_BUILTIN_CVTPS2PD,
23841 IX86_BUILTIN_CVTTPS2DQ,
23843 IX86_BUILTIN_MOVNTI,
23844 IX86_BUILTIN_MOVNTPD,
23845 IX86_BUILTIN_MOVNTDQ,
23847 IX86_BUILTIN_MOVQ128,
23850 IX86_BUILTIN_MASKMOVDQU,
23851 IX86_BUILTIN_MOVMSKPD,
23852 IX86_BUILTIN_PMOVMSKB128,
23854 IX86_BUILTIN_PACKSSWB128,
23855 IX86_BUILTIN_PACKSSDW128,
23856 IX86_BUILTIN_PACKUSWB128,
23858 IX86_BUILTIN_PADDB128,
23859 IX86_BUILTIN_PADDW128,
23860 IX86_BUILTIN_PADDD128,
23861 IX86_BUILTIN_PADDQ128,
23862 IX86_BUILTIN_PADDSB128,
23863 IX86_BUILTIN_PADDSW128,
23864 IX86_BUILTIN_PADDUSB128,
23865 IX86_BUILTIN_PADDUSW128,
23866 IX86_BUILTIN_PSUBB128,
23867 IX86_BUILTIN_PSUBW128,
23868 IX86_BUILTIN_PSUBD128,
23869 IX86_BUILTIN_PSUBQ128,
23870 IX86_BUILTIN_PSUBSB128,
23871 IX86_BUILTIN_PSUBSW128,
23872 IX86_BUILTIN_PSUBUSB128,
23873 IX86_BUILTIN_PSUBUSW128,
23875 IX86_BUILTIN_PAND128,
23876 IX86_BUILTIN_PANDN128,
23877 IX86_BUILTIN_POR128,
23878 IX86_BUILTIN_PXOR128,
23880 IX86_BUILTIN_PAVGB128,
23881 IX86_BUILTIN_PAVGW128,
23883 IX86_BUILTIN_PCMPEQB128,
23884 IX86_BUILTIN_PCMPEQW128,
23885 IX86_BUILTIN_PCMPEQD128,
23886 IX86_BUILTIN_PCMPGTB128,
23887 IX86_BUILTIN_PCMPGTW128,
23888 IX86_BUILTIN_PCMPGTD128,
23890 IX86_BUILTIN_PMADDWD128,
23892 IX86_BUILTIN_PMAXSW128,
23893 IX86_BUILTIN_PMAXUB128,
23894 IX86_BUILTIN_PMINSW128,
23895 IX86_BUILTIN_PMINUB128,
23897 IX86_BUILTIN_PMULUDQ,
23898 IX86_BUILTIN_PMULUDQ128,
23899 IX86_BUILTIN_PMULHUW128,
23900 IX86_BUILTIN_PMULHW128,
23901 IX86_BUILTIN_PMULLW128,
23903 IX86_BUILTIN_PSADBW128,
23904 IX86_BUILTIN_PSHUFHW,
23905 IX86_BUILTIN_PSHUFLW,
23906 IX86_BUILTIN_PSHUFD,
23908 IX86_BUILTIN_PSLLDQI128,
23909 IX86_BUILTIN_PSLLWI128,
23910 IX86_BUILTIN_PSLLDI128,
23911 IX86_BUILTIN_PSLLQI128,
23912 IX86_BUILTIN_PSRAWI128,
23913 IX86_BUILTIN_PSRADI128,
23914 IX86_BUILTIN_PSRLDQI128,
23915 IX86_BUILTIN_PSRLWI128,
23916 IX86_BUILTIN_PSRLDI128,
23917 IX86_BUILTIN_PSRLQI128,
23919 IX86_BUILTIN_PSLLDQ128,
23920 IX86_BUILTIN_PSLLW128,
23921 IX86_BUILTIN_PSLLD128,
23922 IX86_BUILTIN_PSLLQ128,
23923 IX86_BUILTIN_PSRAW128,
23924 IX86_BUILTIN_PSRAD128,
23925 IX86_BUILTIN_PSRLW128,
23926 IX86_BUILTIN_PSRLD128,
23927 IX86_BUILTIN_PSRLQ128,
23929 IX86_BUILTIN_PUNPCKHBW128,
23930 IX86_BUILTIN_PUNPCKHWD128,
23931 IX86_BUILTIN_PUNPCKHDQ128,
23932 IX86_BUILTIN_PUNPCKHQDQ128,
23933 IX86_BUILTIN_PUNPCKLBW128,
23934 IX86_BUILTIN_PUNPCKLWD128,
23935 IX86_BUILTIN_PUNPCKLDQ128,
23936 IX86_BUILTIN_PUNPCKLQDQ128,
23938 IX86_BUILTIN_CLFLUSH,
23939 IX86_BUILTIN_MFENCE,
23940 IX86_BUILTIN_LFENCE,
23942 IX86_BUILTIN_BSRSI,
23943 IX86_BUILTIN_BSRDI,
23944 IX86_BUILTIN_RDPMC,
23945 IX86_BUILTIN_RDTSC,
23946 IX86_BUILTIN_RDTSCP,
23947 IX86_BUILTIN_ROLQI,
23948 IX86_BUILTIN_ROLHI,
23949 IX86_BUILTIN_RORQI,
23950 IX86_BUILTIN_RORHI,
23953 IX86_BUILTIN_ADDSUBPS,
23954 IX86_BUILTIN_HADDPS,
23955 IX86_BUILTIN_HSUBPS,
23956 IX86_BUILTIN_MOVSHDUP,
23957 IX86_BUILTIN_MOVSLDUP,
23958 IX86_BUILTIN_ADDSUBPD,
23959 IX86_BUILTIN_HADDPD,
23960 IX86_BUILTIN_HSUBPD,
23961 IX86_BUILTIN_LDDQU,
23963 IX86_BUILTIN_MONITOR,
23964 IX86_BUILTIN_MWAIT,
23967 IX86_BUILTIN_PHADDW,
23968 IX86_BUILTIN_PHADDD,
23969 IX86_BUILTIN_PHADDSW,
23970 IX86_BUILTIN_PHSUBW,
23971 IX86_BUILTIN_PHSUBD,
23972 IX86_BUILTIN_PHSUBSW,
23973 IX86_BUILTIN_PMADDUBSW,
23974 IX86_BUILTIN_PMULHRSW,
23975 IX86_BUILTIN_PSHUFB,
23976 IX86_BUILTIN_PSIGNB,
23977 IX86_BUILTIN_PSIGNW,
23978 IX86_BUILTIN_PSIGND,
23979 IX86_BUILTIN_PALIGNR,
23980 IX86_BUILTIN_PABSB,
23981 IX86_BUILTIN_PABSW,
23982 IX86_BUILTIN_PABSD,
23984 IX86_BUILTIN_PHADDW128,
23985 IX86_BUILTIN_PHADDD128,
23986 IX86_BUILTIN_PHADDSW128,
23987 IX86_BUILTIN_PHSUBW128,
23988 IX86_BUILTIN_PHSUBD128,
23989 IX86_BUILTIN_PHSUBSW128,
23990 IX86_BUILTIN_PMADDUBSW128,
23991 IX86_BUILTIN_PMULHRSW128,
23992 IX86_BUILTIN_PSHUFB128,
23993 IX86_BUILTIN_PSIGNB128,
23994 IX86_BUILTIN_PSIGNW128,
23995 IX86_BUILTIN_PSIGND128,
23996 IX86_BUILTIN_PALIGNR128,
23997 IX86_BUILTIN_PABSB128,
23998 IX86_BUILTIN_PABSW128,
23999 IX86_BUILTIN_PABSD128,
24001 /* AMDFAM10 - SSE4A New Instructions. */
24002 IX86_BUILTIN_MOVNTSD,
24003 IX86_BUILTIN_MOVNTSS,
24004 IX86_BUILTIN_EXTRQI,
24005 IX86_BUILTIN_EXTRQ,
24006 IX86_BUILTIN_INSERTQI,
24007 IX86_BUILTIN_INSERTQ,
24010 IX86_BUILTIN_BLENDPD,
24011 IX86_BUILTIN_BLENDPS,
24012 IX86_BUILTIN_BLENDVPD,
24013 IX86_BUILTIN_BLENDVPS,
24014 IX86_BUILTIN_PBLENDVB128,
24015 IX86_BUILTIN_PBLENDW128,
24020 IX86_BUILTIN_INSERTPS128,
24022 IX86_BUILTIN_MOVNTDQA,
24023 IX86_BUILTIN_MPSADBW128,
24024 IX86_BUILTIN_PACKUSDW128,
24025 IX86_BUILTIN_PCMPEQQ,
24026 IX86_BUILTIN_PHMINPOSUW128,
24028 IX86_BUILTIN_PMAXSB128,
24029 IX86_BUILTIN_PMAXSD128,
24030 IX86_BUILTIN_PMAXUD128,
24031 IX86_BUILTIN_PMAXUW128,
24033 IX86_BUILTIN_PMINSB128,
24034 IX86_BUILTIN_PMINSD128,
24035 IX86_BUILTIN_PMINUD128,
24036 IX86_BUILTIN_PMINUW128,
24038 IX86_BUILTIN_PMOVSXBW128,
24039 IX86_BUILTIN_PMOVSXBD128,
24040 IX86_BUILTIN_PMOVSXBQ128,
24041 IX86_BUILTIN_PMOVSXWD128,
24042 IX86_BUILTIN_PMOVSXWQ128,
24043 IX86_BUILTIN_PMOVSXDQ128,
24045 IX86_BUILTIN_PMOVZXBW128,
24046 IX86_BUILTIN_PMOVZXBD128,
24047 IX86_BUILTIN_PMOVZXBQ128,
24048 IX86_BUILTIN_PMOVZXWD128,
24049 IX86_BUILTIN_PMOVZXWQ128,
24050 IX86_BUILTIN_PMOVZXDQ128,
24052 IX86_BUILTIN_PMULDQ128,
24053 IX86_BUILTIN_PMULLD128,
24055 IX86_BUILTIN_ROUNDPD,
24056 IX86_BUILTIN_ROUNDPS,
24057 IX86_BUILTIN_ROUNDSD,
24058 IX86_BUILTIN_ROUNDSS,
24060 IX86_BUILTIN_FLOORPD,
24061 IX86_BUILTIN_CEILPD,
24062 IX86_BUILTIN_TRUNCPD,
24063 IX86_BUILTIN_RINTPD,
24064 IX86_BUILTIN_FLOORPS,
24065 IX86_BUILTIN_CEILPS,
24066 IX86_BUILTIN_TRUNCPS,
24067 IX86_BUILTIN_RINTPS,
24069 IX86_BUILTIN_PTESTZ,
24070 IX86_BUILTIN_PTESTC,
24071 IX86_BUILTIN_PTESTNZC,
24073 IX86_BUILTIN_VEC_INIT_V2SI,
24074 IX86_BUILTIN_VEC_INIT_V4HI,
24075 IX86_BUILTIN_VEC_INIT_V8QI,
24076 IX86_BUILTIN_VEC_EXT_V2DF,
24077 IX86_BUILTIN_VEC_EXT_V2DI,
24078 IX86_BUILTIN_VEC_EXT_V4SF,
24079 IX86_BUILTIN_VEC_EXT_V4SI,
24080 IX86_BUILTIN_VEC_EXT_V8HI,
24081 IX86_BUILTIN_VEC_EXT_V2SI,
24082 IX86_BUILTIN_VEC_EXT_V4HI,
24083 IX86_BUILTIN_VEC_EXT_V16QI,
24084 IX86_BUILTIN_VEC_SET_V2DI,
24085 IX86_BUILTIN_VEC_SET_V4SF,
24086 IX86_BUILTIN_VEC_SET_V4SI,
24087 IX86_BUILTIN_VEC_SET_V8HI,
24088 IX86_BUILTIN_VEC_SET_V4HI,
24089 IX86_BUILTIN_VEC_SET_V16QI,
24091 IX86_BUILTIN_VEC_PACK_SFIX,
24094 IX86_BUILTIN_CRC32QI,
24095 IX86_BUILTIN_CRC32HI,
24096 IX86_BUILTIN_CRC32SI,
24097 IX86_BUILTIN_CRC32DI,
24099 IX86_BUILTIN_PCMPESTRI128,
24100 IX86_BUILTIN_PCMPESTRM128,
24101 IX86_BUILTIN_PCMPESTRA128,
24102 IX86_BUILTIN_PCMPESTRC128,
24103 IX86_BUILTIN_PCMPESTRO128,
24104 IX86_BUILTIN_PCMPESTRS128,
24105 IX86_BUILTIN_PCMPESTRZ128,
24106 IX86_BUILTIN_PCMPISTRI128,
24107 IX86_BUILTIN_PCMPISTRM128,
24108 IX86_BUILTIN_PCMPISTRA128,
24109 IX86_BUILTIN_PCMPISTRC128,
24110 IX86_BUILTIN_PCMPISTRO128,
24111 IX86_BUILTIN_PCMPISTRS128,
24112 IX86_BUILTIN_PCMPISTRZ128,
24114 IX86_BUILTIN_PCMPGTQ,
24116 /* AES instructions */
24117 IX86_BUILTIN_AESENC128,
24118 IX86_BUILTIN_AESENCLAST128,
24119 IX86_BUILTIN_AESDEC128,
24120 IX86_BUILTIN_AESDECLAST128,
24121 IX86_BUILTIN_AESIMC128,
24122 IX86_BUILTIN_AESKEYGENASSIST128,
24124 /* PCLMUL instruction */
24125 IX86_BUILTIN_PCLMULQDQ128,
24128 IX86_BUILTIN_ADDPD256,
24129 IX86_BUILTIN_ADDPS256,
24130 IX86_BUILTIN_ADDSUBPD256,
24131 IX86_BUILTIN_ADDSUBPS256,
24132 IX86_BUILTIN_ANDPD256,
24133 IX86_BUILTIN_ANDPS256,
24134 IX86_BUILTIN_ANDNPD256,
24135 IX86_BUILTIN_ANDNPS256,
24136 IX86_BUILTIN_BLENDPD256,
24137 IX86_BUILTIN_BLENDPS256,
24138 IX86_BUILTIN_BLENDVPD256,
24139 IX86_BUILTIN_BLENDVPS256,
24140 IX86_BUILTIN_DIVPD256,
24141 IX86_BUILTIN_DIVPS256,
24142 IX86_BUILTIN_DPPS256,
24143 IX86_BUILTIN_HADDPD256,
24144 IX86_BUILTIN_HADDPS256,
24145 IX86_BUILTIN_HSUBPD256,
24146 IX86_BUILTIN_HSUBPS256,
24147 IX86_BUILTIN_MAXPD256,
24148 IX86_BUILTIN_MAXPS256,
24149 IX86_BUILTIN_MINPD256,
24150 IX86_BUILTIN_MINPS256,
24151 IX86_BUILTIN_MULPD256,
24152 IX86_BUILTIN_MULPS256,
24153 IX86_BUILTIN_ORPD256,
24154 IX86_BUILTIN_ORPS256,
24155 IX86_BUILTIN_SHUFPD256,
24156 IX86_BUILTIN_SHUFPS256,
24157 IX86_BUILTIN_SUBPD256,
24158 IX86_BUILTIN_SUBPS256,
24159 IX86_BUILTIN_XORPD256,
24160 IX86_BUILTIN_XORPS256,
24161 IX86_BUILTIN_CMPSD,
24162 IX86_BUILTIN_CMPSS,
24163 IX86_BUILTIN_CMPPD,
24164 IX86_BUILTIN_CMPPS,
24165 IX86_BUILTIN_CMPPD256,
24166 IX86_BUILTIN_CMPPS256,
24167 IX86_BUILTIN_CVTDQ2PD256,
24168 IX86_BUILTIN_CVTDQ2PS256,
24169 IX86_BUILTIN_CVTPD2PS256,
24170 IX86_BUILTIN_CVTPS2DQ256,
24171 IX86_BUILTIN_CVTPS2PD256,
24172 IX86_BUILTIN_CVTTPD2DQ256,
24173 IX86_BUILTIN_CVTPD2DQ256,
24174 IX86_BUILTIN_CVTTPS2DQ256,
24175 IX86_BUILTIN_EXTRACTF128PD256,
24176 IX86_BUILTIN_EXTRACTF128PS256,
24177 IX86_BUILTIN_EXTRACTF128SI256,
24178 IX86_BUILTIN_VZEROALL,
24179 IX86_BUILTIN_VZEROUPPER,
24180 IX86_BUILTIN_VPERMILVARPD,
24181 IX86_BUILTIN_VPERMILVARPS,
24182 IX86_BUILTIN_VPERMILVARPD256,
24183 IX86_BUILTIN_VPERMILVARPS256,
24184 IX86_BUILTIN_VPERMILPD,
24185 IX86_BUILTIN_VPERMILPS,
24186 IX86_BUILTIN_VPERMILPD256,
24187 IX86_BUILTIN_VPERMILPS256,
24188 IX86_BUILTIN_VPERMIL2PD,
24189 IX86_BUILTIN_VPERMIL2PS,
24190 IX86_BUILTIN_VPERMIL2PD256,
24191 IX86_BUILTIN_VPERMIL2PS256,
24192 IX86_BUILTIN_VPERM2F128PD256,
24193 IX86_BUILTIN_VPERM2F128PS256,
24194 IX86_BUILTIN_VPERM2F128SI256,
24195 IX86_BUILTIN_VBROADCASTSS,
24196 IX86_BUILTIN_VBROADCASTSD256,
24197 IX86_BUILTIN_VBROADCASTSS256,
24198 IX86_BUILTIN_VBROADCASTPD256,
24199 IX86_BUILTIN_VBROADCASTPS256,
24200 IX86_BUILTIN_VINSERTF128PD256,
24201 IX86_BUILTIN_VINSERTF128PS256,
24202 IX86_BUILTIN_VINSERTF128SI256,
24203 IX86_BUILTIN_LOADUPD256,
24204 IX86_BUILTIN_LOADUPS256,
24205 IX86_BUILTIN_STOREUPD256,
24206 IX86_BUILTIN_STOREUPS256,
24207 IX86_BUILTIN_LDDQU256,
24208 IX86_BUILTIN_MOVNTDQ256,
24209 IX86_BUILTIN_MOVNTPD256,
24210 IX86_BUILTIN_MOVNTPS256,
24211 IX86_BUILTIN_LOADDQU256,
24212 IX86_BUILTIN_STOREDQU256,
24213 IX86_BUILTIN_MASKLOADPD,
24214 IX86_BUILTIN_MASKLOADPS,
24215 IX86_BUILTIN_MASKSTOREPD,
24216 IX86_BUILTIN_MASKSTOREPS,
24217 IX86_BUILTIN_MASKLOADPD256,
24218 IX86_BUILTIN_MASKLOADPS256,
24219 IX86_BUILTIN_MASKSTOREPD256,
24220 IX86_BUILTIN_MASKSTOREPS256,
24221 IX86_BUILTIN_MOVSHDUP256,
24222 IX86_BUILTIN_MOVSLDUP256,
24223 IX86_BUILTIN_MOVDDUP256,
24225 IX86_BUILTIN_SQRTPD256,
24226 IX86_BUILTIN_SQRTPS256,
24227 IX86_BUILTIN_SQRTPS_NR256,
24228 IX86_BUILTIN_RSQRTPS256,
24229 IX86_BUILTIN_RSQRTPS_NR256,
24231 IX86_BUILTIN_RCPPS256,
24233 IX86_BUILTIN_ROUNDPD256,
24234 IX86_BUILTIN_ROUNDPS256,
24236 IX86_BUILTIN_FLOORPD256,
24237 IX86_BUILTIN_CEILPD256,
24238 IX86_BUILTIN_TRUNCPD256,
24239 IX86_BUILTIN_RINTPD256,
24240 IX86_BUILTIN_FLOORPS256,
24241 IX86_BUILTIN_CEILPS256,
24242 IX86_BUILTIN_TRUNCPS256,
24243 IX86_BUILTIN_RINTPS256,
24245 IX86_BUILTIN_UNPCKHPD256,
24246 IX86_BUILTIN_UNPCKLPD256,
24247 IX86_BUILTIN_UNPCKHPS256,
24248 IX86_BUILTIN_UNPCKLPS256,
24250 IX86_BUILTIN_SI256_SI,
24251 IX86_BUILTIN_PS256_PS,
24252 IX86_BUILTIN_PD256_PD,
24253 IX86_BUILTIN_SI_SI256,
24254 IX86_BUILTIN_PS_PS256,
24255 IX86_BUILTIN_PD_PD256,
24257 IX86_BUILTIN_VTESTZPD,
24258 IX86_BUILTIN_VTESTCPD,
24259 IX86_BUILTIN_VTESTNZCPD,
24260 IX86_BUILTIN_VTESTZPS,
24261 IX86_BUILTIN_VTESTCPS,
24262 IX86_BUILTIN_VTESTNZCPS,
24263 IX86_BUILTIN_VTESTZPD256,
24264 IX86_BUILTIN_VTESTCPD256,
24265 IX86_BUILTIN_VTESTNZCPD256,
24266 IX86_BUILTIN_VTESTZPS256,
24267 IX86_BUILTIN_VTESTCPS256,
24268 IX86_BUILTIN_VTESTNZCPS256,
24269 IX86_BUILTIN_PTESTZ256,
24270 IX86_BUILTIN_PTESTC256,
24271 IX86_BUILTIN_PTESTNZC256,
24273 IX86_BUILTIN_MOVMSKPD256,
24274 IX86_BUILTIN_MOVMSKPS256,
24276 /* TFmode support builtins. */
24278 IX86_BUILTIN_HUGE_VALQ,
24279 IX86_BUILTIN_FABSQ,
24280 IX86_BUILTIN_COPYSIGNQ,
24282 /* Vectorizer support builtins. */
24283 IX86_BUILTIN_CPYSGNPS,
24284 IX86_BUILTIN_CPYSGNPD,
24285 IX86_BUILTIN_CPYSGNPS256,
24286 IX86_BUILTIN_CPYSGNPD256,
24288 IX86_BUILTIN_CVTUDQ2PS,
24290 IX86_BUILTIN_VEC_PERM_V2DF,
24291 IX86_BUILTIN_VEC_PERM_V4SF,
24292 IX86_BUILTIN_VEC_PERM_V2DI,
24293 IX86_BUILTIN_VEC_PERM_V4SI,
24294 IX86_BUILTIN_VEC_PERM_V8HI,
24295 IX86_BUILTIN_VEC_PERM_V16QI,
24296 IX86_BUILTIN_VEC_PERM_V2DI_U,
24297 IX86_BUILTIN_VEC_PERM_V4SI_U,
24298 IX86_BUILTIN_VEC_PERM_V8HI_U,
24299 IX86_BUILTIN_VEC_PERM_V16QI_U,
24300 IX86_BUILTIN_VEC_PERM_V4DF,
24301 IX86_BUILTIN_VEC_PERM_V8SF,
24303 /* FMA4 and XOP instructions. */
24304 IX86_BUILTIN_VFMADDSS,
24305 IX86_BUILTIN_VFMADDSD,
24306 IX86_BUILTIN_VFMADDPS,
24307 IX86_BUILTIN_VFMADDPD,
24308 IX86_BUILTIN_VFMADDPS256,
24309 IX86_BUILTIN_VFMADDPD256,
24310 IX86_BUILTIN_VFMADDSUBPS,
24311 IX86_BUILTIN_VFMADDSUBPD,
24312 IX86_BUILTIN_VFMADDSUBPS256,
24313 IX86_BUILTIN_VFMADDSUBPD256,
24315 IX86_BUILTIN_VPCMOV,
24316 IX86_BUILTIN_VPCMOV_V2DI,
24317 IX86_BUILTIN_VPCMOV_V4SI,
24318 IX86_BUILTIN_VPCMOV_V8HI,
24319 IX86_BUILTIN_VPCMOV_V16QI,
24320 IX86_BUILTIN_VPCMOV_V4SF,
24321 IX86_BUILTIN_VPCMOV_V2DF,
24322 IX86_BUILTIN_VPCMOV256,
24323 IX86_BUILTIN_VPCMOV_V4DI256,
24324 IX86_BUILTIN_VPCMOV_V8SI256,
24325 IX86_BUILTIN_VPCMOV_V16HI256,
24326 IX86_BUILTIN_VPCMOV_V32QI256,
24327 IX86_BUILTIN_VPCMOV_V8SF256,
24328 IX86_BUILTIN_VPCMOV_V4DF256,
24330 IX86_BUILTIN_VPPERM,
24332 IX86_BUILTIN_VPMACSSWW,
24333 IX86_BUILTIN_VPMACSWW,
24334 IX86_BUILTIN_VPMACSSWD,
24335 IX86_BUILTIN_VPMACSWD,
24336 IX86_BUILTIN_VPMACSSDD,
24337 IX86_BUILTIN_VPMACSDD,
24338 IX86_BUILTIN_VPMACSSDQL,
24339 IX86_BUILTIN_VPMACSSDQH,
24340 IX86_BUILTIN_VPMACSDQL,
24341 IX86_BUILTIN_VPMACSDQH,
24342 IX86_BUILTIN_VPMADCSSWD,
24343 IX86_BUILTIN_VPMADCSWD,
24345 IX86_BUILTIN_VPHADDBW,
24346 IX86_BUILTIN_VPHADDBD,
24347 IX86_BUILTIN_VPHADDBQ,
24348 IX86_BUILTIN_VPHADDWD,
24349 IX86_BUILTIN_VPHADDWQ,
24350 IX86_BUILTIN_VPHADDDQ,
24351 IX86_BUILTIN_VPHADDUBW,
24352 IX86_BUILTIN_VPHADDUBD,
24353 IX86_BUILTIN_VPHADDUBQ,
24354 IX86_BUILTIN_VPHADDUWD,
24355 IX86_BUILTIN_VPHADDUWQ,
24356 IX86_BUILTIN_VPHADDUDQ,
24357 IX86_BUILTIN_VPHSUBBW,
24358 IX86_BUILTIN_VPHSUBWD,
24359 IX86_BUILTIN_VPHSUBDQ,
24361 IX86_BUILTIN_VPROTB,
24362 IX86_BUILTIN_VPROTW,
24363 IX86_BUILTIN_VPROTD,
24364 IX86_BUILTIN_VPROTQ,
24365 IX86_BUILTIN_VPROTB_IMM,
24366 IX86_BUILTIN_VPROTW_IMM,
24367 IX86_BUILTIN_VPROTD_IMM,
24368 IX86_BUILTIN_VPROTQ_IMM,
24370 IX86_BUILTIN_VPSHLB,
24371 IX86_BUILTIN_VPSHLW,
24372 IX86_BUILTIN_VPSHLD,
24373 IX86_BUILTIN_VPSHLQ,
24374 IX86_BUILTIN_VPSHAB,
24375 IX86_BUILTIN_VPSHAW,
24376 IX86_BUILTIN_VPSHAD,
24377 IX86_BUILTIN_VPSHAQ,
24379 IX86_BUILTIN_VFRCZSS,
24380 IX86_BUILTIN_VFRCZSD,
24381 IX86_BUILTIN_VFRCZPS,
24382 IX86_BUILTIN_VFRCZPD,
24383 IX86_BUILTIN_VFRCZPS256,
24384 IX86_BUILTIN_VFRCZPD256,
24386 IX86_BUILTIN_VPCOMEQUB,
24387 IX86_BUILTIN_VPCOMNEUB,
24388 IX86_BUILTIN_VPCOMLTUB,
24389 IX86_BUILTIN_VPCOMLEUB,
24390 IX86_BUILTIN_VPCOMGTUB,
24391 IX86_BUILTIN_VPCOMGEUB,
24392 IX86_BUILTIN_VPCOMFALSEUB,
24393 IX86_BUILTIN_VPCOMTRUEUB,
24395 IX86_BUILTIN_VPCOMEQUW,
24396 IX86_BUILTIN_VPCOMNEUW,
24397 IX86_BUILTIN_VPCOMLTUW,
24398 IX86_BUILTIN_VPCOMLEUW,
24399 IX86_BUILTIN_VPCOMGTUW,
24400 IX86_BUILTIN_VPCOMGEUW,
24401 IX86_BUILTIN_VPCOMFALSEUW,
24402 IX86_BUILTIN_VPCOMTRUEUW,
24404 IX86_BUILTIN_VPCOMEQUD,
24405 IX86_BUILTIN_VPCOMNEUD,
24406 IX86_BUILTIN_VPCOMLTUD,
24407 IX86_BUILTIN_VPCOMLEUD,
24408 IX86_BUILTIN_VPCOMGTUD,
24409 IX86_BUILTIN_VPCOMGEUD,
24410 IX86_BUILTIN_VPCOMFALSEUD,
24411 IX86_BUILTIN_VPCOMTRUEUD,
24413 IX86_BUILTIN_VPCOMEQUQ,
24414 IX86_BUILTIN_VPCOMNEUQ,
24415 IX86_BUILTIN_VPCOMLTUQ,
24416 IX86_BUILTIN_VPCOMLEUQ,
24417 IX86_BUILTIN_VPCOMGTUQ,
24418 IX86_BUILTIN_VPCOMGEUQ,
24419 IX86_BUILTIN_VPCOMFALSEUQ,
24420 IX86_BUILTIN_VPCOMTRUEUQ,
24422 IX86_BUILTIN_VPCOMEQB,
24423 IX86_BUILTIN_VPCOMNEB,
24424 IX86_BUILTIN_VPCOMLTB,
24425 IX86_BUILTIN_VPCOMLEB,
24426 IX86_BUILTIN_VPCOMGTB,
24427 IX86_BUILTIN_VPCOMGEB,
24428 IX86_BUILTIN_VPCOMFALSEB,
24429 IX86_BUILTIN_VPCOMTRUEB,
24431 IX86_BUILTIN_VPCOMEQW,
24432 IX86_BUILTIN_VPCOMNEW,
24433 IX86_BUILTIN_VPCOMLTW,
24434 IX86_BUILTIN_VPCOMLEW,
24435 IX86_BUILTIN_VPCOMGTW,
24436 IX86_BUILTIN_VPCOMGEW,
24437 IX86_BUILTIN_VPCOMFALSEW,
24438 IX86_BUILTIN_VPCOMTRUEW,
24440 IX86_BUILTIN_VPCOMEQD,
24441 IX86_BUILTIN_VPCOMNED,
24442 IX86_BUILTIN_VPCOMLTD,
24443 IX86_BUILTIN_VPCOMLED,
24444 IX86_BUILTIN_VPCOMGTD,
24445 IX86_BUILTIN_VPCOMGED,
24446 IX86_BUILTIN_VPCOMFALSED,
24447 IX86_BUILTIN_VPCOMTRUED,
24449 IX86_BUILTIN_VPCOMEQQ,
24450 IX86_BUILTIN_VPCOMNEQ,
24451 IX86_BUILTIN_VPCOMLTQ,
24452 IX86_BUILTIN_VPCOMLEQ,
24453 IX86_BUILTIN_VPCOMGTQ,
24454 IX86_BUILTIN_VPCOMGEQ,
24455 IX86_BUILTIN_VPCOMFALSEQ,
24456 IX86_BUILTIN_VPCOMTRUEQ,
24458 /* LWP instructions. */
24459 IX86_BUILTIN_LLWPCB,
24460 IX86_BUILTIN_SLWPCB,
24461 IX86_BUILTIN_LWPVAL32,
24462 IX86_BUILTIN_LWPVAL64,
24463 IX86_BUILTIN_LWPINS32,
24464 IX86_BUILTIN_LWPINS64,
24468 /* BMI instructions. */
24469 IX86_BUILTIN_BEXTR32,
24470 IX86_BUILTIN_BEXTR64,
24473 /* TBM instructions. */
24474 IX86_BUILTIN_BEXTRI32,
24475 IX86_BUILTIN_BEXTRI64,
24478 /* FSGSBASE instructions. */
24479 IX86_BUILTIN_RDFSBASE32,
24480 IX86_BUILTIN_RDFSBASE64,
24481 IX86_BUILTIN_RDGSBASE32,
24482 IX86_BUILTIN_RDGSBASE64,
24483 IX86_BUILTIN_WRFSBASE32,
24484 IX86_BUILTIN_WRFSBASE64,
24485 IX86_BUILTIN_WRGSBASE32,
24486 IX86_BUILTIN_WRGSBASE64,
24488 /* RDRND instructions. */
24489 IX86_BUILTIN_RDRAND16_STEP,
24490 IX86_BUILTIN_RDRAND32_STEP,
24491 IX86_BUILTIN_RDRAND64_STEP,
24493 /* F16C instructions. */
24494 IX86_BUILTIN_CVTPH2PS,
24495 IX86_BUILTIN_CVTPH2PS256,
24496 IX86_BUILTIN_CVTPS2PH,
24497 IX86_BUILTIN_CVTPS2PH256,
24499 /* CFString built-in for darwin */
24500 IX86_BUILTIN_CFSTRING,
24505 /* Table for the ix86 builtin decls. */
24506 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
24508 /* Table of all of the builtin functions that are possible with different ISA's
24509 but are waiting to be built until a function is declared to use that
24511 struct builtin_isa {
24512 const char *name; /* function name */
24513 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
24514 int isa; /* isa_flags this builtin is defined for */
24515 bool const_p; /* true if the declaration is constant */
24516 bool set_and_not_built_p;
24519 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
24522 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
24523 of which isa_flags to use in the ix86_builtins_isa array. Stores the
24524 function decl in the ix86_builtins array. Returns the function decl or
24525 NULL_TREE, if the builtin was not added.
24527 If the front end has a special hook for builtin functions, delay adding
24528 builtin functions that aren't in the current ISA until the ISA is changed
24529 with function specific optimization. Doing so, can save about 300K for the
24530 default compiler. When the builtin is expanded, check at that time whether
24533 If the front end doesn't have a special hook, record all builtins, even if
24534 it isn't an instruction set in the current ISA in case the user uses
24535 function specific options for a different ISA, so that we don't get scope
24536 errors if a builtin is added in the middle of a function scope. */
24539 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
24540 enum ix86_builtins code)
24542 tree decl = NULL_TREE;
24544 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
24546 ix86_builtins_isa[(int) code].isa = mask;
24548 mask &= ~OPTION_MASK_ISA_64BIT;
24550 || (mask & ix86_isa_flags) != 0
24551 || (lang_hooks.builtin_function
24552 == lang_hooks.builtin_function_ext_scope))
24555 tree type = ix86_get_builtin_func_type (tcode);
24556 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
24558 ix86_builtins[(int) code] = decl;
24559 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
24563 ix86_builtins[(int) code] = NULL_TREE;
24564 ix86_builtins_isa[(int) code].tcode = tcode;
24565 ix86_builtins_isa[(int) code].name = name;
24566 ix86_builtins_isa[(int) code].const_p = false;
24567 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
24574 /* Like def_builtin, but also marks the function decl "const". */
24577 def_builtin_const (int mask, const char *name,
24578 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
24580 tree decl = def_builtin (mask, name, tcode, code);
24582 TREE_READONLY (decl) = 1;
24584 ix86_builtins_isa[(int) code].const_p = true;
24589 /* Add any new builtin functions for a given ISA that may not have been
24590 declared. This saves a bit of space compared to adding all of the
24591 declarations to the tree, even if we didn't use them. */
24594 ix86_add_new_builtins (int isa)
24598 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
24600 if ((ix86_builtins_isa[i].isa & isa) != 0
24601 && ix86_builtins_isa[i].set_and_not_built_p)
24605 /* Don't define the builtin again. */
24606 ix86_builtins_isa[i].set_and_not_built_p = false;
24608 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
24609 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
24610 type, i, BUILT_IN_MD, NULL,
24613 ix86_builtins[i] = decl;
24614 if (ix86_builtins_isa[i].const_p)
24615 TREE_READONLY (decl) = 1;
24620 /* Bits for builtin_description.flag. */
24622 /* Set when we don't support the comparison natively, and should
24623 swap_comparison in order to support it. */
24624 #define BUILTIN_DESC_SWAP_OPERANDS 1
24626 struct builtin_description
24628 const unsigned int mask;
24629 const enum insn_code icode;
24630 const char *const name;
24631 const enum ix86_builtins code;
24632 const enum rtx_code comparison;
24636 static const struct builtin_description bdesc_comi[] =
24638 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
24639 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
24640 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
24641 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
24642 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
24643 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
24644 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
24645 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
24646 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
24647 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
24648 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
24649 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
24650 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
24651 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
24652 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
24653 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
24654 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
24655 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
24656 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
24657 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
24658 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
24659 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
24660 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
24661 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
24664 static const struct builtin_description bdesc_pcmpestr[] =
24667 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
24668 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
24669 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
24670 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
24671 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
24672 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
24673 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
24676 static const struct builtin_description bdesc_pcmpistr[] =
24679 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
24680 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
24681 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
24682 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
24683 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
24684 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
24685 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
24688 /* Special builtins with variable number of arguments. */
24689 static const struct builtin_description bdesc_special_args[] =
24691 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
24692 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
24695 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
24698 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
24701 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
24702 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
24703 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
24705 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
24706 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
24707 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
24708 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
24710 /* SSE or 3DNow!A */
24711 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
24712 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
24715 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
24716 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
24717 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
24718 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
24719 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
24720 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
24721 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
24722 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
24723 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
24725 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
24726 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
24729 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
24732 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
24735 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
24736 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
24739 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
24740 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
24742 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
24743 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
24744 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
24745 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
24746 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
24748 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
24749 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
24750 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
24751 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
24752 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
24753 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
24754 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
24756 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
24757 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
24758 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
24760 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DI },
24761 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SI },
24762 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DI },
24763 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SI },
24764 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DI_V2DF },
24765 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SI_V4SF },
24766 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DI_V4DF },
24767 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SI_V8SF },
24769 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
24770 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
24771 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
24772 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
24773 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
24774 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
24777 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdfsbasesi, "__builtin_ia32_rdfsbase32", IX86_BUILTIN_RDFSBASE32, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
24778 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdfsbasedi, "__builtin_ia32_rdfsbase64", IX86_BUILTIN_RDFSBASE64, UNKNOWN, (int) UINT64_FTYPE_VOID },
24779 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdgsbasesi, "__builtin_ia32_rdgsbase32", IX86_BUILTIN_RDGSBASE32, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
24780 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdgsbasedi, "__builtin_ia32_rdgsbase64", IX86_BUILTIN_RDGSBASE64, UNKNOWN, (int) UINT64_FTYPE_VOID },
24781 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrfsbasesi, "__builtin_ia32_wrfsbase32", IX86_BUILTIN_WRFSBASE32, UNKNOWN, (int) VOID_FTYPE_UNSIGNED },
24782 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrfsbasedi, "__builtin_ia32_wrfsbase64", IX86_BUILTIN_WRFSBASE64, UNKNOWN, (int) VOID_FTYPE_UINT64 },
24783 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrgsbasesi, "__builtin_ia32_wrgsbase32", IX86_BUILTIN_WRGSBASE32, UNKNOWN, (int) VOID_FTYPE_UNSIGNED },
24784 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrgsbasedi, "__builtin_ia32_wrgsbase64", IX86_BUILTIN_WRGSBASE64, UNKNOWN, (int) VOID_FTYPE_UINT64 },
24787 /* Builtins with variable number of arguments. */
24788 static const struct builtin_description bdesc_args[] =
24790 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
24791 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
24792 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
24793 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
24794 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
24795 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
24796 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
24799 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24800 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24801 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24802 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24803 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24804 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24806 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24807 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24808 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24809 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24810 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24811 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24812 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24813 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24815 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24816 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24818 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24819 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24820 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24821 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24823 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24824 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24825 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24826 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24827 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24828 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24830 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24831 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24832 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24833 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24834 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
24835 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
24837 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
24838 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
24839 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
24841 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
24843 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
24844 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
24845 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
24846 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
24847 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
24848 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
24850 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
24851 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
24852 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
24853 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
24854 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
24855 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
24857 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
24858 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
24859 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
24860 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
24863 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
24864 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
24865 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
24866 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
24868 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24869 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24870 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24871 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
24872 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
24873 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
24874 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24875 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24876 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24877 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24878 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24879 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24880 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24881 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24882 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24885 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
24886 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
24887 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
24888 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
24889 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24890 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24893 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
24894 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24895 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24896 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24897 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24898 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24899 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
24900 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
24901 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
24902 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
24903 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
24904 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
24906 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
24908 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24909 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24910 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24911 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24912 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24913 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24914 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24915 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24917 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
24918 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
24919 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
24920 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24921 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24922 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24923 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
24924 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
24925 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
24926 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24927 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
24928 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24929 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
24930 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
24931 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
24932 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24933 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
24934 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
24935 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
24936 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24937 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24938 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24940 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24941 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24942 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24943 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24945 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24946 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24947 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24948 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24950 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24952 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24953 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24954 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24955 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24956 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24958 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
24959 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
24960 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
24962 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
24964 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
24965 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
24966 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
24968 /* SSE MMX or 3Dnow!A */
24969 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24970 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24971 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24973 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24974 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24975 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24976 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24978 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
24979 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
24981 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
24984 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
24986 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
24987 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
24988 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
24989 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
24990 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
24991 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
24992 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
24993 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
24994 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
24995 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
24996 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
24997 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
24999 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
25000 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
25001 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
25002 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
25003 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
25004 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
25006 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
25007 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
25008 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
25009 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
25010 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
25012 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
25014 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
25015 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
25016 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
25017 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
25019 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
25020 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
25021 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
25023 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25024 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25025 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25026 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25027 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25028 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25029 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25030 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25032 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
25033 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
25034 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
25035 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
25036 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
25037 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
25038 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
25039 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
25040 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
25041 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
25042 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
25043 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
25044 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
25045 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
25046 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
25047 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
25048 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
25049 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
25050 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
25051 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
25053 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25054 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25055 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25056 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25058 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25059 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25060 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25061 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25063 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25065 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25066 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25067 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25069 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
25071 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25072 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25073 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25074 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25075 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25076 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25077 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25078 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25080 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25081 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25082 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25083 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25084 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25085 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25086 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25087 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25089 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25090 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
25092 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25093 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25094 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25095 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25097 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25098 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25100 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25101 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25102 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25103 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25104 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25105 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25107 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25108 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25109 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25110 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25112 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25113 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25114 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25115 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25116 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25117 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25118 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25119 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25121 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
25122 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
25123 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
25125 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25126 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
25128 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
25129 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
25131 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
25133 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
25134 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
25135 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
25136 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
25138 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
25139 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
25140 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
25141 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
25142 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
25143 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
25144 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
25146 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
25147 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
25148 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
25149 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
25150 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
25151 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
25152 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
25154 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
25155 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
25156 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
25157 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
25159 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
25160 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
25161 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
25163 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
25165 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
25166 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
25168 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
25171 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
25172 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
25175 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
25176 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
25178 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25179 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25180 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25181 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25182 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25183 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25186 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
25187 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
25188 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
25189 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
25190 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
25191 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
25193 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25194 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25195 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25196 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25197 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25198 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25199 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25200 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25201 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25202 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25203 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25204 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25205 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
25206 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
25207 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25208 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25209 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25210 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25211 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25212 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25213 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25214 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25215 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25216 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25219 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
25220 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
25223 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25224 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25225 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
25226 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
25227 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25228 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25229 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25230 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
25231 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
25232 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
25234 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
25235 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
25236 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
25237 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
25238 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
25239 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
25240 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
25241 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
25242 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
25243 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
25244 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
25245 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
25246 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
25248 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
25249 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25250 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25251 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25252 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25253 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25254 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25255 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25256 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25257 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25258 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
25259 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25262 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
25263 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
25264 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25265 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25267 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_floorpd", IX86_BUILTIN_FLOORPD, (enum rtx_code) ROUND_FLOOR, (int) V2DF_FTYPE_V2DF_ROUND },
25268 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_ceilpd", IX86_BUILTIN_CEILPD, (enum rtx_code) ROUND_CEIL, (int) V2DF_FTYPE_V2DF_ROUND },
25269 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_truncpd", IX86_BUILTIN_TRUNCPD, (enum rtx_code) ROUND_TRUNC, (int) V2DF_FTYPE_V2DF_ROUND },
25270 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_rintpd", IX86_BUILTIN_RINTPD, (enum rtx_code) ROUND_MXCSR, (int) V2DF_FTYPE_V2DF_ROUND },
25272 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_floorps", IX86_BUILTIN_FLOORPS, (enum rtx_code) ROUND_FLOOR, (int) V4SF_FTYPE_V4SF_ROUND },
25273 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_ceilps", IX86_BUILTIN_CEILPS, (enum rtx_code) ROUND_CEIL, (int) V4SF_FTYPE_V4SF_ROUND },
25274 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_truncps", IX86_BUILTIN_TRUNCPS, (enum rtx_code) ROUND_TRUNC, (int) V4SF_FTYPE_V4SF_ROUND },
25275 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_rintps", IX86_BUILTIN_RINTPS, (enum rtx_code) ROUND_MXCSR, (int) V4SF_FTYPE_V4SF_ROUND },
25277 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
25278 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
25279 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
25282 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25283 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
25284 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
25285 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
25286 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
25289 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
25290 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
25291 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
25292 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25295 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
25296 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
25298 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25299 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25300 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25301 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25304 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
25307 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25308 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25309 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25310 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25311 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25312 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25313 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25314 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25315 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25316 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25317 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25318 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25319 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25320 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25321 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25322 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25323 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25324 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25325 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25326 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25327 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25328 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25329 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25330 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25331 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25332 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25334 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
25335 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
25336 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
25337 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
25339 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
25340 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25341 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
25342 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
25343 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25344 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
25345 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25346 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vmcmpv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25347 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vmcmpv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25348 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25349 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25350 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
25351 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25352 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
25353 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
25354 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
25355 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
25356 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
25357 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
25358 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
25359 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
25360 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
25361 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
25362 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
25363 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
25364 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25365 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
25366 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
25367 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
25368 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
25369 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
25370 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
25371 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
25372 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
25374 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25375 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25376 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
25378 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
25379 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25380 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25381 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25382 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25384 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25386 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
25387 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
25389 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_floorpd256", IX86_BUILTIN_FLOORPD256, (enum rtx_code) ROUND_FLOOR, (int) V4DF_FTYPE_V4DF_ROUND },
25390 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_ceilpd256", IX86_BUILTIN_CEILPD256, (enum rtx_code) ROUND_CEIL, (int) V4DF_FTYPE_V4DF_ROUND },
25391 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_truncpd256", IX86_BUILTIN_TRUNCPD256, (enum rtx_code) ROUND_TRUNC, (int) V4DF_FTYPE_V4DF_ROUND },
25392 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_rintpd256", IX86_BUILTIN_RINTPD256, (enum rtx_code) ROUND_MXCSR, (int) V4DF_FTYPE_V4DF_ROUND },
25394 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_floorps256", IX86_BUILTIN_FLOORPS256, (enum rtx_code) ROUND_FLOOR, (int) V8SF_FTYPE_V8SF_ROUND },
25395 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_ceilps256", IX86_BUILTIN_CEILPS256, (enum rtx_code) ROUND_CEIL, (int) V8SF_FTYPE_V8SF_ROUND },
25396 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_truncps256", IX86_BUILTIN_TRUNCPS256, (enum rtx_code) ROUND_TRUNC, (int) V8SF_FTYPE_V8SF_ROUND },
25397 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_rintps256", IX86_BUILTIN_RINTPS256, (enum rtx_code) ROUND_MXCSR, (int) V8SF_FTYPE_V8SF_ROUND },
25399 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25400 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25401 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25402 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25404 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
25405 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
25406 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
25407 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8si, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
25408 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8sf, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
25409 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v4df, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
25411 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
25412 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
25413 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
25414 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
25415 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
25416 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
25417 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
25418 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
25419 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
25420 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
25421 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
25422 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
25423 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
25424 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
25425 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
25427 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
25428 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
25430 { OPTION_MASK_ISA_AVX, CODE_FOR_copysignv8sf3, "__builtin_ia32_copysignps256", IX86_BUILTIN_CPYSGNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25431 { OPTION_MASK_ISA_AVX, CODE_FOR_copysignv4df3, "__builtin_ia32_copysignpd256", IX86_BUILTIN_CPYSGNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25433 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
25436 { OPTION_MASK_ISA_BMI, CODE_FOR_bmi_bextr_si, "__builtin_ia32_bextr_u32", IX86_BUILTIN_BEXTR32, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
25437 { OPTION_MASK_ISA_BMI, CODE_FOR_bmi_bextr_di, "__builtin_ia32_bextr_u64", IX86_BUILTIN_BEXTR64, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
25438 { OPTION_MASK_ISA_BMI, CODE_FOR_ctzhi2, "__builtin_ctzs", IX86_BUILTIN_CTZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
25441 { OPTION_MASK_ISA_TBM, CODE_FOR_tbm_bextri_si, "__builtin_ia32_bextri_u32", IX86_BUILTIN_BEXTRI32, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
25442 { OPTION_MASK_ISA_TBM, CODE_FOR_tbm_bextri_di, "__builtin_ia32_bextri_u64", IX86_BUILTIN_BEXTRI64, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
25445 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtph2ps, "__builtin_ia32_vcvtph2ps", IX86_BUILTIN_CVTPH2PS, UNKNOWN, (int) V4SF_FTYPE_V8HI },
25446 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtph2ps256, "__builtin_ia32_vcvtph2ps256", IX86_BUILTIN_CVTPH2PS256, UNKNOWN, (int) V8SF_FTYPE_V8HI },
25447 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtps2ph, "__builtin_ia32_vcvtps2ph", IX86_BUILTIN_CVTPS2PH, UNKNOWN, (int) V8HI_FTYPE_V4SF_INT },
25448 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtps2ph256, "__builtin_ia32_vcvtps2ph256", IX86_BUILTIN_CVTPS2PH256, UNKNOWN, (int) V8HI_FTYPE_V8SF_INT },
25451 /* FMA4 and XOP. */
25452 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
25453 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
25454 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
25455 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
25456 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
25457 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
25458 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
25459 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
25460 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
25461 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
25462 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
25463 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
25464 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
25465 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
25466 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
25467 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
25468 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
25469 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
25470 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
25471 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
25472 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
25473 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
25474 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
25475 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
25476 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
25477 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
25478 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
25479 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
25480 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
25481 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
25482 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
25483 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
25484 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
25485 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
25486 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
25487 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
25488 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
25489 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
25490 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
25491 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
25492 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
25493 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
25494 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
25495 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
25496 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
25497 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
25498 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
25499 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
25500 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
25501 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
25502 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
25503 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
25505 static const struct builtin_description bdesc_multi_arg[] =
25507 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmadd_v4sf,
25508 "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS,
25509 UNKNOWN, (int)MULTI_ARG_3_SF },
25510 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmadd_v2df,
25511 "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD,
25512 UNKNOWN, (int)MULTI_ARG_3_DF },
25514 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v4sf,
25515 "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS,
25516 UNKNOWN, (int)MULTI_ARG_3_SF },
25517 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v2df,
25518 "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD,
25519 UNKNOWN, (int)MULTI_ARG_3_DF },
25520 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v8sf,
25521 "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256,
25522 UNKNOWN, (int)MULTI_ARG_3_SF2 },
25523 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v4df,
25524 "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256,
25525 UNKNOWN, (int)MULTI_ARG_3_DF2 },
25527 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v4sf,
25528 "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS,
25529 UNKNOWN, (int)MULTI_ARG_3_SF },
25530 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v2df,
25531 "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD,
25532 UNKNOWN, (int)MULTI_ARG_3_DF },
25533 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v8sf,
25534 "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256,
25535 UNKNOWN, (int)MULTI_ARG_3_SF2 },
25536 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v4df,
25537 "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256,
25538 UNKNOWN, (int)MULTI_ARG_3_DF2 },
25540 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
25541 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
25542 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
25543 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
25544 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
25545 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
25546 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
25548 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
25549 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
25550 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
25551 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
25552 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
25553 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
25554 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
25556 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
25558 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
25559 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
25560 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25561 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25562 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
25563 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
25564 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25565 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25566 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25567 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25568 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25569 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25571 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
25572 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
25573 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
25574 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
25575 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
25576 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
25577 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
25578 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
25579 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
25580 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
25581 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
25582 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
25583 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
25584 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
25585 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
25586 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
25588 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
25589 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
25590 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
25591 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
25592 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
25593 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
25595 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
25596 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
25597 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
25598 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
25599 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
25600 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
25601 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
25602 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
25603 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
25604 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
25605 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
25606 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
25607 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
25608 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
25609 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
25611 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
25612 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
25613 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
25614 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
25615 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
25616 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
25617 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
25619 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
25620 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
25621 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
25622 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
25623 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
25624 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
25625 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
25627 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
25628 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
25629 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
25630 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
25631 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
25632 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
25633 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
25635 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
25636 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
25637 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
25638 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
25639 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
25640 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
25641 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
25643 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
25644 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
25645 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
25646 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
25647 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
25648 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
25649 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
25651 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
25652 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
25653 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
25654 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
25655 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
25656 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
25657 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
25659 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
25660 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
25661 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
25662 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
25663 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
25664 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
25665 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
25667 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
25668 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
25669 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
25670 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
25671 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
25672 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
25673 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
25675 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
25676 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
25677 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
25678 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
25679 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
25680 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
25681 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
25682 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
25684 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
25685 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
25686 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
25687 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
25688 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
25689 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
25690 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
25691 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
25693 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
25694 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
25695 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
25696 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
25700 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
25701 in the current target ISA to allow the user to compile particular modules
25702 with different target specific options that differ from the command line
25705 ix86_init_mmx_sse_builtins (void)
25707 const struct builtin_description * d;
25708 enum ix86_builtin_func_type ftype;
25711 /* Add all special builtins with variable number of operands. */
25712 for (i = 0, d = bdesc_special_args;
25713 i < ARRAY_SIZE (bdesc_special_args);
25719 ftype = (enum ix86_builtin_func_type) d->flag;
25720 def_builtin (d->mask, d->name, ftype, d->code);
25723 /* Add all builtins with variable number of operands. */
25724 for (i = 0, d = bdesc_args;
25725 i < ARRAY_SIZE (bdesc_args);
25731 ftype = (enum ix86_builtin_func_type) d->flag;
25732 def_builtin_const (d->mask, d->name, ftype, d->code);
25735 /* pcmpestr[im] insns. */
25736 for (i = 0, d = bdesc_pcmpestr;
25737 i < ARRAY_SIZE (bdesc_pcmpestr);
25740 if (d->code == IX86_BUILTIN_PCMPESTRM128)
25741 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
25743 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
25744 def_builtin_const (d->mask, d->name, ftype, d->code);
25747 /* pcmpistr[im] insns. */
25748 for (i = 0, d = bdesc_pcmpistr;
25749 i < ARRAY_SIZE (bdesc_pcmpistr);
25752 if (d->code == IX86_BUILTIN_PCMPISTRM128)
25753 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
25755 ftype = INT_FTYPE_V16QI_V16QI_INT;
25756 def_builtin_const (d->mask, d->name, ftype, d->code);
25759 /* comi/ucomi insns. */
25760 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
25762 if (d->mask == OPTION_MASK_ISA_SSE2)
25763 ftype = INT_FTYPE_V2DF_V2DF;
25765 ftype = INT_FTYPE_V4SF_V4SF;
25766 def_builtin_const (d->mask, d->name, ftype, d->code);
25770 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
25771 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
25772 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
25773 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
25775 /* SSE or 3DNow!A */
25776 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
25777 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
25778 IX86_BUILTIN_MASKMOVQ);
25781 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
25782 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
25784 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
25785 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
25786 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
25787 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
25790 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
25791 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
25792 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
25793 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
25796 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
25797 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
25798 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
25799 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
25800 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
25801 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
25802 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
25803 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
25804 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
25805 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
25806 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
25807 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
25810 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
25811 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
25814 def_builtin (OPTION_MASK_ISA_RDRND, "__builtin_ia32_rdrand16_step",
25815 INT_FTYPE_PUSHORT, IX86_BUILTIN_RDRAND16_STEP);
25816 def_builtin (OPTION_MASK_ISA_RDRND, "__builtin_ia32_rdrand32_step",
25817 INT_FTYPE_PUNSIGNED, IX86_BUILTIN_RDRAND32_STEP);
25818 def_builtin (OPTION_MASK_ISA_RDRND | OPTION_MASK_ISA_64BIT,
25819 "__builtin_ia32_rdrand64_step", INT_FTYPE_PULONGLONG,
25820 IX86_BUILTIN_RDRAND64_STEP);
25822 /* MMX access to the vec_init patterns. */
25823 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
25824 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
25826 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
25827 V4HI_FTYPE_HI_HI_HI_HI,
25828 IX86_BUILTIN_VEC_INIT_V4HI);
25830 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
25831 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
25832 IX86_BUILTIN_VEC_INIT_V8QI);
25834 /* Access to the vec_extract patterns. */
25835 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
25836 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
25837 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
25838 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
25839 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
25840 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
25841 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
25842 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
25843 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
25844 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
25846 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
25847 "__builtin_ia32_vec_ext_v4hi",
25848 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
25850 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
25851 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
25853 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
25854 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
25856 /* Access to the vec_set patterns. */
25857 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
25858 "__builtin_ia32_vec_set_v2di",
25859 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
25861 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
25862 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
25864 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
25865 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
25867 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
25868 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
25870 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
25871 "__builtin_ia32_vec_set_v4hi",
25872 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
25874 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
25875 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
25877 /* Add FMA4 multi-arg argument instructions */
25878 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
25883 ftype = (enum ix86_builtin_func_type) d->flag;
25884 def_builtin_const (d->mask, d->name, ftype, d->code);
25888 /* Internal method for ix86_init_builtins. */
25891 ix86_init_builtins_va_builtins_abi (void)
25893 tree ms_va_ref, sysv_va_ref;
25894 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
25895 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
25896 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
25897 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
25901 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
25902 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
25903 ms_va_ref = build_reference_type (ms_va_list_type_node);
25905 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
25908 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
25909 fnvoid_va_start_ms =
25910 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
25911 fnvoid_va_end_sysv =
25912 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
25913 fnvoid_va_start_sysv =
25914 build_varargs_function_type_list (void_type_node, sysv_va_ref,
25916 fnvoid_va_copy_ms =
25917 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
25919 fnvoid_va_copy_sysv =
25920 build_function_type_list (void_type_node, sysv_va_ref,
25921 sysv_va_ref, NULL_TREE);
25923 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
25924 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
25925 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
25926 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
25927 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
25928 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
25929 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
25930 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
25931 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
25932 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
25933 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
25934 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
25938 ix86_init_builtin_types (void)
25940 tree float128_type_node, float80_type_node;
25942 /* The __float80 type. */
25943 float80_type_node = long_double_type_node;
25944 if (TYPE_MODE (float80_type_node) != XFmode)
25946 /* The __float80 type. */
25947 float80_type_node = make_node (REAL_TYPE);
25949 TYPE_PRECISION (float80_type_node) = 80;
25950 layout_type (float80_type_node);
25952 lang_hooks.types.register_builtin_type (float80_type_node, "__float80");
25954 /* The __float128 type. */
25955 float128_type_node = make_node (REAL_TYPE);
25956 TYPE_PRECISION (float128_type_node) = 128;
25957 layout_type (float128_type_node);
25958 lang_hooks.types.register_builtin_type (float128_type_node, "__float128");
25960 /* This macro is built by i386-builtin-types.awk. */
25961 DEFINE_BUILTIN_PRIMITIVE_TYPES;
25965 ix86_init_builtins (void)
25969 ix86_init_builtin_types ();
25971 /* TFmode support builtins. */
25972 def_builtin_const (0, "__builtin_infq",
25973 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
25974 def_builtin_const (0, "__builtin_huge_valq",
25975 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
25977 /* We will expand them to normal call if SSE2 isn't available since
25978 they are used by libgcc. */
25979 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
25980 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
25981 BUILT_IN_MD, "__fabstf2", NULL_TREE);
25982 TREE_READONLY (t) = 1;
25983 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
25985 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
25986 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
25987 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
25988 TREE_READONLY (t) = 1;
25989 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
25991 ix86_init_mmx_sse_builtins ();
25994 ix86_init_builtins_va_builtins_abi ();
25996 #ifdef SUBTARGET_INIT_BUILTINS
25997 SUBTARGET_INIT_BUILTINS;
26001 /* Return the ix86 builtin for CODE. */
26004 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
26006 if (code >= IX86_BUILTIN_MAX)
26007 return error_mark_node;
26009 return ix86_builtins[code];
26012 /* Errors in the source file can cause expand_expr to return const0_rtx
26013 where we expect a vector. To avoid crashing, use one of the vector
26014 clear instructions. */
26016 safe_vector_operand (rtx x, enum machine_mode mode)
26018 if (x == const0_rtx)
26019 x = CONST0_RTX (mode);
26023 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
26026 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
26029 tree arg0 = CALL_EXPR_ARG (exp, 0);
26030 tree arg1 = CALL_EXPR_ARG (exp, 1);
26031 rtx op0 = expand_normal (arg0);
26032 rtx op1 = expand_normal (arg1);
26033 enum machine_mode tmode = insn_data[icode].operand[0].mode;
26034 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
26035 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
26037 if (VECTOR_MODE_P (mode0))
26038 op0 = safe_vector_operand (op0, mode0);
26039 if (VECTOR_MODE_P (mode1))
26040 op1 = safe_vector_operand (op1, mode1);
26042 if (optimize || !target
26043 || GET_MODE (target) != tmode
26044 || !insn_data[icode].operand[0].predicate (target, tmode))
26045 target = gen_reg_rtx (tmode);
26047 if (GET_MODE (op1) == SImode && mode1 == TImode)
26049 rtx x = gen_reg_rtx (V4SImode);
26050 emit_insn (gen_sse2_loadd (x, op1));
26051 op1 = gen_lowpart (TImode, x);
26054 if (!insn_data[icode].operand[1].predicate (op0, mode0))
26055 op0 = copy_to_mode_reg (mode0, op0);
26056 if (!insn_data[icode].operand[2].predicate (op1, mode1))
26057 op1 = copy_to_mode_reg (mode1, op1);
26059 pat = GEN_FCN (icode) (target, op0, op1);
26068 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
26071 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
26072 enum ix86_builtin_func_type m_type,
26073 enum rtx_code sub_code)
26078 bool comparison_p = false;
26080 bool last_arg_constant = false;
26081 int num_memory = 0;
26084 enum machine_mode mode;
26087 enum machine_mode tmode = insn_data[icode].operand[0].mode;
26091 case MULTI_ARG_4_DF2_DI_I:
26092 case MULTI_ARG_4_DF2_DI_I1:
26093 case MULTI_ARG_4_SF2_SI_I:
26094 case MULTI_ARG_4_SF2_SI_I1:
26096 last_arg_constant = true;
26099 case MULTI_ARG_3_SF:
26100 case MULTI_ARG_3_DF:
26101 case MULTI_ARG_3_SF2:
26102 case MULTI_ARG_3_DF2:
26103 case MULTI_ARG_3_DI:
26104 case MULTI_ARG_3_SI:
26105 case MULTI_ARG_3_SI_DI:
26106 case MULTI_ARG_3_HI:
26107 case MULTI_ARG_3_HI_SI:
26108 case MULTI_ARG_3_QI:
26109 case MULTI_ARG_3_DI2:
26110 case MULTI_ARG_3_SI2:
26111 case MULTI_ARG_3_HI2:
26112 case MULTI_ARG_3_QI2:
26116 case MULTI_ARG_2_SF:
26117 case MULTI_ARG_2_DF:
26118 case MULTI_ARG_2_DI:
26119 case MULTI_ARG_2_SI:
26120 case MULTI_ARG_2_HI:
26121 case MULTI_ARG_2_QI:
26125 case MULTI_ARG_2_DI_IMM:
26126 case MULTI_ARG_2_SI_IMM:
26127 case MULTI_ARG_2_HI_IMM:
26128 case MULTI_ARG_2_QI_IMM:
26130 last_arg_constant = true;
26133 case MULTI_ARG_1_SF:
26134 case MULTI_ARG_1_DF:
26135 case MULTI_ARG_1_SF2:
26136 case MULTI_ARG_1_DF2:
26137 case MULTI_ARG_1_DI:
26138 case MULTI_ARG_1_SI:
26139 case MULTI_ARG_1_HI:
26140 case MULTI_ARG_1_QI:
26141 case MULTI_ARG_1_SI_DI:
26142 case MULTI_ARG_1_HI_DI:
26143 case MULTI_ARG_1_HI_SI:
26144 case MULTI_ARG_1_QI_DI:
26145 case MULTI_ARG_1_QI_SI:
26146 case MULTI_ARG_1_QI_HI:
26150 case MULTI_ARG_2_DI_CMP:
26151 case MULTI_ARG_2_SI_CMP:
26152 case MULTI_ARG_2_HI_CMP:
26153 case MULTI_ARG_2_QI_CMP:
26155 comparison_p = true;
26158 case MULTI_ARG_2_SF_TF:
26159 case MULTI_ARG_2_DF_TF:
26160 case MULTI_ARG_2_DI_TF:
26161 case MULTI_ARG_2_SI_TF:
26162 case MULTI_ARG_2_HI_TF:
26163 case MULTI_ARG_2_QI_TF:
26169 gcc_unreachable ();
26172 if (optimize || !target
26173 || GET_MODE (target) != tmode
26174 || !insn_data[icode].operand[0].predicate (target, tmode))
26175 target = gen_reg_rtx (tmode);
26177 gcc_assert (nargs <= 4);
26179 for (i = 0; i < nargs; i++)
26181 tree arg = CALL_EXPR_ARG (exp, i);
26182 rtx op = expand_normal (arg);
26183 int adjust = (comparison_p) ? 1 : 0;
26184 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
26186 if (last_arg_constant && i == nargs-1)
26188 if (!CONST_INT_P (op))
26190 error ("last argument must be an immediate");
26191 return gen_reg_rtx (tmode);
26196 if (VECTOR_MODE_P (mode))
26197 op = safe_vector_operand (op, mode);
26199 /* If we aren't optimizing, only allow one memory operand to be
26201 if (memory_operand (op, mode))
26204 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
26207 || !insn_data[icode].operand[i+adjust+1].predicate (op, mode)
26209 op = force_reg (mode, op);
26213 args[i].mode = mode;
26219 pat = GEN_FCN (icode) (target, args[0].op);
26224 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
26225 GEN_INT ((int)sub_code));
26226 else if (! comparison_p)
26227 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
26230 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
26234 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
26239 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
26243 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
26247 gcc_unreachable ();
26257 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
26258 insns with vec_merge. */
26261 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
26265 tree arg0 = CALL_EXPR_ARG (exp, 0);
26266 rtx op1, op0 = expand_normal (arg0);
26267 enum machine_mode tmode = insn_data[icode].operand[0].mode;
26268 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
26270 if (optimize || !target
26271 || GET_MODE (target) != tmode
26272 || !insn_data[icode].operand[0].predicate (target, tmode))
26273 target = gen_reg_rtx (tmode);
26275 if (VECTOR_MODE_P (mode0))
26276 op0 = safe_vector_operand (op0, mode0);
26278 if ((optimize && !register_operand (op0, mode0))
26279 || !insn_data[icode].operand[1].predicate (op0, mode0))
26280 op0 = copy_to_mode_reg (mode0, op0);
26283 if (!insn_data[icode].operand[2].predicate (op1, mode0))
26284 op1 = copy_to_mode_reg (mode0, op1);
26286 pat = GEN_FCN (icode) (target, op0, op1);
26293 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
26296 ix86_expand_sse_compare (const struct builtin_description *d,
26297 tree exp, rtx target, bool swap)
26300 tree arg0 = CALL_EXPR_ARG (exp, 0);
26301 tree arg1 = CALL_EXPR_ARG (exp, 1);
26302 rtx op0 = expand_normal (arg0);
26303 rtx op1 = expand_normal (arg1);
26305 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
26306 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
26307 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
26308 enum rtx_code comparison = d->comparison;
26310 if (VECTOR_MODE_P (mode0))
26311 op0 = safe_vector_operand (op0, mode0);
26312 if (VECTOR_MODE_P (mode1))
26313 op1 = safe_vector_operand (op1, mode1);
26315 /* Swap operands if we have a comparison that isn't available in
26319 rtx tmp = gen_reg_rtx (mode1);
26320 emit_move_insn (tmp, op1);
26325 if (optimize || !target
26326 || GET_MODE (target) != tmode
26327 || !insn_data[d->icode].operand[0].predicate (target, tmode))
26328 target = gen_reg_rtx (tmode);
26330 if ((optimize && !register_operand (op0, mode0))
26331 || !insn_data[d->icode].operand[1].predicate (op0, mode0))
26332 op0 = copy_to_mode_reg (mode0, op0);
26333 if ((optimize && !register_operand (op1, mode1))
26334 || !insn_data[d->icode].operand[2].predicate (op1, mode1))
26335 op1 = copy_to_mode_reg (mode1, op1);
26337 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
26338 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
26345 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
26348 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
26352 tree arg0 = CALL_EXPR_ARG (exp, 0);
26353 tree arg1 = CALL_EXPR_ARG (exp, 1);
26354 rtx op0 = expand_normal (arg0);
26355 rtx op1 = expand_normal (arg1);
26356 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
26357 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
26358 enum rtx_code comparison = d->comparison;
26360 if (VECTOR_MODE_P (mode0))
26361 op0 = safe_vector_operand (op0, mode0);
26362 if (VECTOR_MODE_P (mode1))
26363 op1 = safe_vector_operand (op1, mode1);
26365 /* Swap operands if we have a comparison that isn't available in
26367 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
26374 target = gen_reg_rtx (SImode);
26375 emit_move_insn (target, const0_rtx);
26376 target = gen_rtx_SUBREG (QImode, target, 0);
26378 if ((optimize && !register_operand (op0, mode0))
26379 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
26380 op0 = copy_to_mode_reg (mode0, op0);
26381 if ((optimize && !register_operand (op1, mode1))
26382 || !insn_data[d->icode].operand[1].predicate (op1, mode1))
26383 op1 = copy_to_mode_reg (mode1, op1);
26385 pat = GEN_FCN (d->icode) (op0, op1);
26389 emit_insn (gen_rtx_SET (VOIDmode,
26390 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26391 gen_rtx_fmt_ee (comparison, QImode,
26395 return SUBREG_REG (target);
26398 /* Subroutine of ix86_expand_args_builtin to take care of round insns. */
26401 ix86_expand_sse_round (const struct builtin_description *d, tree exp,
26405 tree arg0 = CALL_EXPR_ARG (exp, 0);
26406 rtx op1, op0 = expand_normal (arg0);
26407 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
26408 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
26410 if (optimize || target == 0
26411 || GET_MODE (target) != tmode
26412 || !insn_data[d->icode].operand[0].predicate (target, tmode))
26413 target = gen_reg_rtx (tmode);
26415 if (VECTOR_MODE_P (mode0))
26416 op0 = safe_vector_operand (op0, mode0);
26418 if ((optimize && !register_operand (op0, mode0))
26419 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
26420 op0 = copy_to_mode_reg (mode0, op0);
26422 op1 = GEN_INT (d->comparison);
26424 pat = GEN_FCN (d->icode) (target, op0, op1);
26431 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
26434 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
26438 tree arg0 = CALL_EXPR_ARG (exp, 0);
26439 tree arg1 = CALL_EXPR_ARG (exp, 1);
26440 rtx op0 = expand_normal (arg0);
26441 rtx op1 = expand_normal (arg1);
26442 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
26443 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
26444 enum rtx_code comparison = d->comparison;
26446 if (VECTOR_MODE_P (mode0))
26447 op0 = safe_vector_operand (op0, mode0);
26448 if (VECTOR_MODE_P (mode1))
26449 op1 = safe_vector_operand (op1, mode1);
26451 target = gen_reg_rtx (SImode);
26452 emit_move_insn (target, const0_rtx);
26453 target = gen_rtx_SUBREG (QImode, target, 0);
26455 if ((optimize && !register_operand (op0, mode0))
26456 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
26457 op0 = copy_to_mode_reg (mode0, op0);
26458 if ((optimize && !register_operand (op1, mode1))
26459 || !insn_data[d->icode].operand[1].predicate (op1, mode1))
26460 op1 = copy_to_mode_reg (mode1, op1);
26462 pat = GEN_FCN (d->icode) (op0, op1);
26466 emit_insn (gen_rtx_SET (VOIDmode,
26467 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26468 gen_rtx_fmt_ee (comparison, QImode,
26472 return SUBREG_REG (target);
26475 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
26478 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
26479 tree exp, rtx target)
26482 tree arg0 = CALL_EXPR_ARG (exp, 0);
26483 tree arg1 = CALL_EXPR_ARG (exp, 1);
26484 tree arg2 = CALL_EXPR_ARG (exp, 2);
26485 tree arg3 = CALL_EXPR_ARG (exp, 3);
26486 tree arg4 = CALL_EXPR_ARG (exp, 4);
26487 rtx scratch0, scratch1;
26488 rtx op0 = expand_normal (arg0);
26489 rtx op1 = expand_normal (arg1);
26490 rtx op2 = expand_normal (arg2);
26491 rtx op3 = expand_normal (arg3);
26492 rtx op4 = expand_normal (arg4);
26493 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
26495 tmode0 = insn_data[d->icode].operand[0].mode;
26496 tmode1 = insn_data[d->icode].operand[1].mode;
26497 modev2 = insn_data[d->icode].operand[2].mode;
26498 modei3 = insn_data[d->icode].operand[3].mode;
26499 modev4 = insn_data[d->icode].operand[4].mode;
26500 modei5 = insn_data[d->icode].operand[5].mode;
26501 modeimm = insn_data[d->icode].operand[6].mode;
26503 if (VECTOR_MODE_P (modev2))
26504 op0 = safe_vector_operand (op0, modev2);
26505 if (VECTOR_MODE_P (modev4))
26506 op2 = safe_vector_operand (op2, modev4);
26508 if (!insn_data[d->icode].operand[2].predicate (op0, modev2))
26509 op0 = copy_to_mode_reg (modev2, op0);
26510 if (!insn_data[d->icode].operand[3].predicate (op1, modei3))
26511 op1 = copy_to_mode_reg (modei3, op1);
26512 if ((optimize && !register_operand (op2, modev4))
26513 || !insn_data[d->icode].operand[4].predicate (op2, modev4))
26514 op2 = copy_to_mode_reg (modev4, op2);
26515 if (!insn_data[d->icode].operand[5].predicate (op3, modei5))
26516 op3 = copy_to_mode_reg (modei5, op3);
26518 if (!insn_data[d->icode].operand[6].predicate (op4, modeimm))
26520 error ("the fifth argument must be a 8-bit immediate");
26524 if (d->code == IX86_BUILTIN_PCMPESTRI128)
26526 if (optimize || !target
26527 || GET_MODE (target) != tmode0
26528 || !insn_data[d->icode].operand[0].predicate (target, tmode0))
26529 target = gen_reg_rtx (tmode0);
26531 scratch1 = gen_reg_rtx (tmode1);
26533 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
26535 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
26537 if (optimize || !target
26538 || GET_MODE (target) != tmode1
26539 || !insn_data[d->icode].operand[1].predicate (target, tmode1))
26540 target = gen_reg_rtx (tmode1);
26542 scratch0 = gen_reg_rtx (tmode0);
26544 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
26548 gcc_assert (d->flag);
26550 scratch0 = gen_reg_rtx (tmode0);
26551 scratch1 = gen_reg_rtx (tmode1);
26553 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
26563 target = gen_reg_rtx (SImode);
26564 emit_move_insn (target, const0_rtx);
26565 target = gen_rtx_SUBREG (QImode, target, 0);
26568 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26569 gen_rtx_fmt_ee (EQ, QImode,
26570 gen_rtx_REG ((enum machine_mode) d->flag,
26573 return SUBREG_REG (target);
26580 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
26583 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
26584 tree exp, rtx target)
26587 tree arg0 = CALL_EXPR_ARG (exp, 0);
26588 tree arg1 = CALL_EXPR_ARG (exp, 1);
26589 tree arg2 = CALL_EXPR_ARG (exp, 2);
26590 rtx scratch0, scratch1;
26591 rtx op0 = expand_normal (arg0);
26592 rtx op1 = expand_normal (arg1);
26593 rtx op2 = expand_normal (arg2);
26594 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
26596 tmode0 = insn_data[d->icode].operand[0].mode;
26597 tmode1 = insn_data[d->icode].operand[1].mode;
26598 modev2 = insn_data[d->icode].operand[2].mode;
26599 modev3 = insn_data[d->icode].operand[3].mode;
26600 modeimm = insn_data[d->icode].operand[4].mode;
26602 if (VECTOR_MODE_P (modev2))
26603 op0 = safe_vector_operand (op0, modev2);
26604 if (VECTOR_MODE_P (modev3))
26605 op1 = safe_vector_operand (op1, modev3);
26607 if (!insn_data[d->icode].operand[2].predicate (op0, modev2))
26608 op0 = copy_to_mode_reg (modev2, op0);
26609 if ((optimize && !register_operand (op1, modev3))
26610 || !insn_data[d->icode].operand[3].predicate (op1, modev3))
26611 op1 = copy_to_mode_reg (modev3, op1);
26613 if (!insn_data[d->icode].operand[4].predicate (op2, modeimm))
26615 error ("the third argument must be a 8-bit immediate");
26619 if (d->code == IX86_BUILTIN_PCMPISTRI128)
26621 if (optimize || !target
26622 || GET_MODE (target) != tmode0
26623 || !insn_data[d->icode].operand[0].predicate (target, tmode0))
26624 target = gen_reg_rtx (tmode0);
26626 scratch1 = gen_reg_rtx (tmode1);
26628 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
26630 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
26632 if (optimize || !target
26633 || GET_MODE (target) != tmode1
26634 || !insn_data[d->icode].operand[1].predicate (target, tmode1))
26635 target = gen_reg_rtx (tmode1);
26637 scratch0 = gen_reg_rtx (tmode0);
26639 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
26643 gcc_assert (d->flag);
26645 scratch0 = gen_reg_rtx (tmode0);
26646 scratch1 = gen_reg_rtx (tmode1);
26648 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
26658 target = gen_reg_rtx (SImode);
26659 emit_move_insn (target, const0_rtx);
26660 target = gen_rtx_SUBREG (QImode, target, 0);
26663 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26664 gen_rtx_fmt_ee (EQ, QImode,
26665 gen_rtx_REG ((enum machine_mode) d->flag,
26668 return SUBREG_REG (target);
26674 /* Subroutine of ix86_expand_builtin to take care of insns with
26675 variable number of operands. */
26678 ix86_expand_args_builtin (const struct builtin_description *d,
26679 tree exp, rtx target)
26681 rtx pat, real_target;
26682 unsigned int i, nargs;
26683 unsigned int nargs_constant = 0;
26684 int num_memory = 0;
26688 enum machine_mode mode;
26690 bool last_arg_count = false;
26691 enum insn_code icode = d->icode;
26692 const struct insn_data_d *insn_p = &insn_data[icode];
26693 enum machine_mode tmode = insn_p->operand[0].mode;
26694 enum machine_mode rmode = VOIDmode;
26696 enum rtx_code comparison = d->comparison;
26698 switch ((enum ix86_builtin_func_type) d->flag)
26700 case V2DF_FTYPE_V2DF_ROUND:
26701 case V4DF_FTYPE_V4DF_ROUND:
26702 case V4SF_FTYPE_V4SF_ROUND:
26703 case V8SF_FTYPE_V8SF_ROUND:
26704 return ix86_expand_sse_round (d, exp, target);
26705 case INT_FTYPE_V8SF_V8SF_PTEST:
26706 case INT_FTYPE_V4DI_V4DI_PTEST:
26707 case INT_FTYPE_V4DF_V4DF_PTEST:
26708 case INT_FTYPE_V4SF_V4SF_PTEST:
26709 case INT_FTYPE_V2DI_V2DI_PTEST:
26710 case INT_FTYPE_V2DF_V2DF_PTEST:
26711 return ix86_expand_sse_ptest (d, exp, target);
26712 case FLOAT128_FTYPE_FLOAT128:
26713 case FLOAT_FTYPE_FLOAT:
26714 case INT_FTYPE_INT:
26715 case UINT64_FTYPE_INT:
26716 case UINT16_FTYPE_UINT16:
26717 case INT64_FTYPE_INT64:
26718 case INT64_FTYPE_V4SF:
26719 case INT64_FTYPE_V2DF:
26720 case INT_FTYPE_V16QI:
26721 case INT_FTYPE_V8QI:
26722 case INT_FTYPE_V8SF:
26723 case INT_FTYPE_V4DF:
26724 case INT_FTYPE_V4SF:
26725 case INT_FTYPE_V2DF:
26726 case V16QI_FTYPE_V16QI:
26727 case V8SI_FTYPE_V8SF:
26728 case V8SI_FTYPE_V4SI:
26729 case V8HI_FTYPE_V8HI:
26730 case V8HI_FTYPE_V16QI:
26731 case V8QI_FTYPE_V8QI:
26732 case V8SF_FTYPE_V8SF:
26733 case V8SF_FTYPE_V8SI:
26734 case V8SF_FTYPE_V4SF:
26735 case V8SF_FTYPE_V8HI:
26736 case V4SI_FTYPE_V4SI:
26737 case V4SI_FTYPE_V16QI:
26738 case V4SI_FTYPE_V4SF:
26739 case V4SI_FTYPE_V8SI:
26740 case V4SI_FTYPE_V8HI:
26741 case V4SI_FTYPE_V4DF:
26742 case V4SI_FTYPE_V2DF:
26743 case V4HI_FTYPE_V4HI:
26744 case V4DF_FTYPE_V4DF:
26745 case V4DF_FTYPE_V4SI:
26746 case V4DF_FTYPE_V4SF:
26747 case V4DF_FTYPE_V2DF:
26748 case V4SF_FTYPE_V4SF:
26749 case V4SF_FTYPE_V4SI:
26750 case V4SF_FTYPE_V8SF:
26751 case V4SF_FTYPE_V4DF:
26752 case V4SF_FTYPE_V8HI:
26753 case V4SF_FTYPE_V2DF:
26754 case V2DI_FTYPE_V2DI:
26755 case V2DI_FTYPE_V16QI:
26756 case V2DI_FTYPE_V8HI:
26757 case V2DI_FTYPE_V4SI:
26758 case V2DF_FTYPE_V2DF:
26759 case V2DF_FTYPE_V4SI:
26760 case V2DF_FTYPE_V4DF:
26761 case V2DF_FTYPE_V4SF:
26762 case V2DF_FTYPE_V2SI:
26763 case V2SI_FTYPE_V2SI:
26764 case V2SI_FTYPE_V4SF:
26765 case V2SI_FTYPE_V2SF:
26766 case V2SI_FTYPE_V2DF:
26767 case V2SF_FTYPE_V2SF:
26768 case V2SF_FTYPE_V2SI:
26771 case V4SF_FTYPE_V4SF_VEC_MERGE:
26772 case V2DF_FTYPE_V2DF_VEC_MERGE:
26773 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
26774 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
26775 case V16QI_FTYPE_V16QI_V16QI:
26776 case V16QI_FTYPE_V8HI_V8HI:
26777 case V8QI_FTYPE_V8QI_V8QI:
26778 case V8QI_FTYPE_V4HI_V4HI:
26779 case V8HI_FTYPE_V8HI_V8HI:
26780 case V8HI_FTYPE_V16QI_V16QI:
26781 case V8HI_FTYPE_V4SI_V4SI:
26782 case V8SF_FTYPE_V8SF_V8SF:
26783 case V8SF_FTYPE_V8SF_V8SI:
26784 case V4SI_FTYPE_V4SI_V4SI:
26785 case V4SI_FTYPE_V8HI_V8HI:
26786 case V4SI_FTYPE_V4SF_V4SF:
26787 case V4SI_FTYPE_V2DF_V2DF:
26788 case V4HI_FTYPE_V4HI_V4HI:
26789 case V4HI_FTYPE_V8QI_V8QI:
26790 case V4HI_FTYPE_V2SI_V2SI:
26791 case V4DF_FTYPE_V4DF_V4DF:
26792 case V4DF_FTYPE_V4DF_V4DI:
26793 case V4SF_FTYPE_V4SF_V4SF:
26794 case V4SF_FTYPE_V4SF_V4SI:
26795 case V4SF_FTYPE_V4SF_V2SI:
26796 case V4SF_FTYPE_V4SF_V2DF:
26797 case V4SF_FTYPE_V4SF_DI:
26798 case V4SF_FTYPE_V4SF_SI:
26799 case V2DI_FTYPE_V2DI_V2DI:
26800 case V2DI_FTYPE_V16QI_V16QI:
26801 case V2DI_FTYPE_V4SI_V4SI:
26802 case V2DI_FTYPE_V2DI_V16QI:
26803 case V2DI_FTYPE_V2DF_V2DF:
26804 case V2SI_FTYPE_V2SI_V2SI:
26805 case V2SI_FTYPE_V4HI_V4HI:
26806 case V2SI_FTYPE_V2SF_V2SF:
26807 case V2DF_FTYPE_V2DF_V2DF:
26808 case V2DF_FTYPE_V2DF_V4SF:
26809 case V2DF_FTYPE_V2DF_V2DI:
26810 case V2DF_FTYPE_V2DF_DI:
26811 case V2DF_FTYPE_V2DF_SI:
26812 case V2SF_FTYPE_V2SF_V2SF:
26813 case V1DI_FTYPE_V1DI_V1DI:
26814 case V1DI_FTYPE_V8QI_V8QI:
26815 case V1DI_FTYPE_V2SI_V2SI:
26816 if (comparison == UNKNOWN)
26817 return ix86_expand_binop_builtin (icode, exp, target);
26820 case V4SF_FTYPE_V4SF_V4SF_SWAP:
26821 case V2DF_FTYPE_V2DF_V2DF_SWAP:
26822 gcc_assert (comparison != UNKNOWN);
26826 case V8HI_FTYPE_V8HI_V8HI_COUNT:
26827 case V8HI_FTYPE_V8HI_SI_COUNT:
26828 case V4SI_FTYPE_V4SI_V4SI_COUNT:
26829 case V4SI_FTYPE_V4SI_SI_COUNT:
26830 case V4HI_FTYPE_V4HI_V4HI_COUNT:
26831 case V4HI_FTYPE_V4HI_SI_COUNT:
26832 case V2DI_FTYPE_V2DI_V2DI_COUNT:
26833 case V2DI_FTYPE_V2DI_SI_COUNT:
26834 case V2SI_FTYPE_V2SI_V2SI_COUNT:
26835 case V2SI_FTYPE_V2SI_SI_COUNT:
26836 case V1DI_FTYPE_V1DI_V1DI_COUNT:
26837 case V1DI_FTYPE_V1DI_SI_COUNT:
26839 last_arg_count = true;
26841 case UINT64_FTYPE_UINT64_UINT64:
26842 case UINT_FTYPE_UINT_UINT:
26843 case UINT_FTYPE_UINT_USHORT:
26844 case UINT_FTYPE_UINT_UCHAR:
26845 case UINT16_FTYPE_UINT16_INT:
26846 case UINT8_FTYPE_UINT8_INT:
26849 case V2DI_FTYPE_V2DI_INT_CONVERT:
26852 nargs_constant = 1;
26854 case V8HI_FTYPE_V8HI_INT:
26855 case V8HI_FTYPE_V8SF_INT:
26856 case V8HI_FTYPE_V4SF_INT:
26857 case V8SF_FTYPE_V8SF_INT:
26858 case V4SI_FTYPE_V4SI_INT:
26859 case V4SI_FTYPE_V8SI_INT:
26860 case V4HI_FTYPE_V4HI_INT:
26861 case V4DF_FTYPE_V4DF_INT:
26862 case V4SF_FTYPE_V4SF_INT:
26863 case V4SF_FTYPE_V8SF_INT:
26864 case V2DI_FTYPE_V2DI_INT:
26865 case V2DF_FTYPE_V2DF_INT:
26866 case V2DF_FTYPE_V4DF_INT:
26868 nargs_constant = 1;
26870 case V16QI_FTYPE_V16QI_V16QI_V16QI:
26871 case V8SF_FTYPE_V8SF_V8SF_V8SF:
26872 case V4DF_FTYPE_V4DF_V4DF_V4DF:
26873 case V4SF_FTYPE_V4SF_V4SF_V4SF:
26874 case V2DF_FTYPE_V2DF_V2DF_V2DF:
26877 case V16QI_FTYPE_V16QI_V16QI_INT:
26878 case V8HI_FTYPE_V8HI_V8HI_INT:
26879 case V8SI_FTYPE_V8SI_V8SI_INT:
26880 case V8SI_FTYPE_V8SI_V4SI_INT:
26881 case V8SF_FTYPE_V8SF_V8SF_INT:
26882 case V8SF_FTYPE_V8SF_V4SF_INT:
26883 case V4SI_FTYPE_V4SI_V4SI_INT:
26884 case V4DF_FTYPE_V4DF_V4DF_INT:
26885 case V4DF_FTYPE_V4DF_V2DF_INT:
26886 case V4SF_FTYPE_V4SF_V4SF_INT:
26887 case V2DI_FTYPE_V2DI_V2DI_INT:
26888 case V2DF_FTYPE_V2DF_V2DF_INT:
26890 nargs_constant = 1;
26892 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
26895 nargs_constant = 1;
26897 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
26900 nargs_constant = 1;
26902 case V2DI_FTYPE_V2DI_UINT_UINT:
26904 nargs_constant = 2;
26906 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
26907 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
26908 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
26909 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
26911 nargs_constant = 1;
26913 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
26915 nargs_constant = 2;
26918 gcc_unreachable ();
26921 gcc_assert (nargs <= ARRAY_SIZE (args));
26923 if (comparison != UNKNOWN)
26925 gcc_assert (nargs == 2);
26926 return ix86_expand_sse_compare (d, exp, target, swap);
26929 if (rmode == VOIDmode || rmode == tmode)
26933 || GET_MODE (target) != tmode
26934 || !insn_p->operand[0].predicate (target, tmode))
26935 target = gen_reg_rtx (tmode);
26936 real_target = target;
26940 target = gen_reg_rtx (rmode);
26941 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
26944 for (i = 0; i < nargs; i++)
26946 tree arg = CALL_EXPR_ARG (exp, i);
26947 rtx op = expand_normal (arg);
26948 enum machine_mode mode = insn_p->operand[i + 1].mode;
26949 bool match = insn_p->operand[i + 1].predicate (op, mode);
26951 if (last_arg_count && (i + 1) == nargs)
26953 /* SIMD shift insns take either an 8-bit immediate or
26954 register as count. But builtin functions take int as
26955 count. If count doesn't match, we put it in register. */
26958 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
26959 if (!insn_p->operand[i + 1].predicate (op, mode))
26960 op = copy_to_reg (op);
26963 else if ((nargs - i) <= nargs_constant)
26968 case CODE_FOR_sse4_1_roundpd:
26969 case CODE_FOR_sse4_1_roundps:
26970 case CODE_FOR_sse4_1_roundsd:
26971 case CODE_FOR_sse4_1_roundss:
26972 case CODE_FOR_sse4_1_blendps:
26973 case CODE_FOR_avx_blendpd256:
26974 case CODE_FOR_avx_vpermilv4df:
26975 case CODE_FOR_avx_roundpd256:
26976 case CODE_FOR_avx_roundps256:
26977 error ("the last argument must be a 4-bit immediate");
26980 case CODE_FOR_sse4_1_blendpd:
26981 case CODE_FOR_avx_vpermilv2df:
26982 case CODE_FOR_xop_vpermil2v2df3:
26983 case CODE_FOR_xop_vpermil2v4sf3:
26984 case CODE_FOR_xop_vpermil2v4df3:
26985 case CODE_FOR_xop_vpermil2v8sf3:
26986 error ("the last argument must be a 2-bit immediate");
26989 case CODE_FOR_avx_vextractf128v4df:
26990 case CODE_FOR_avx_vextractf128v8sf:
26991 case CODE_FOR_avx_vextractf128v8si:
26992 case CODE_FOR_avx_vinsertf128v4df:
26993 case CODE_FOR_avx_vinsertf128v8sf:
26994 case CODE_FOR_avx_vinsertf128v8si:
26995 error ("the last argument must be a 1-bit immediate");
26998 case CODE_FOR_avx_vmcmpv2df3:
26999 case CODE_FOR_avx_vmcmpv4sf3:
27000 case CODE_FOR_avx_cmpv2df3:
27001 case CODE_FOR_avx_cmpv4sf3:
27002 case CODE_FOR_avx_cmpv4df3:
27003 case CODE_FOR_avx_cmpv8sf3:
27004 error ("the last argument must be a 5-bit immediate");
27008 switch (nargs_constant)
27011 if ((nargs - i) == nargs_constant)
27013 error ("the next to last argument must be an 8-bit immediate");
27017 error ("the last argument must be an 8-bit immediate");
27020 gcc_unreachable ();
27027 if (VECTOR_MODE_P (mode))
27028 op = safe_vector_operand (op, mode);
27030 /* If we aren't optimizing, only allow one memory operand to
27032 if (memory_operand (op, mode))
27035 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
27037 if (optimize || !match || num_memory > 1)
27038 op = copy_to_mode_reg (mode, op);
27042 op = copy_to_reg (op);
27043 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
27048 args[i].mode = mode;
27054 pat = GEN_FCN (icode) (real_target, args[0].op);
27057 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
27060 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
27064 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
27065 args[2].op, args[3].op);
27068 gcc_unreachable ();
27078 /* Subroutine of ix86_expand_builtin to take care of special insns
27079 with variable number of operands. */
27082 ix86_expand_special_args_builtin (const struct builtin_description *d,
27083 tree exp, rtx target)
27087 unsigned int i, nargs, arg_adjust, memory;
27091 enum machine_mode mode;
27093 enum insn_code icode = d->icode;
27094 bool last_arg_constant = false;
27095 const struct insn_data_d *insn_p = &insn_data[icode];
27096 enum machine_mode tmode = insn_p->operand[0].mode;
27097 enum { load, store } klass;
27099 switch ((enum ix86_builtin_func_type) d->flag)
27101 case VOID_FTYPE_VOID:
27102 if (icode == CODE_FOR_avx_vzeroupper)
27103 target = GEN_INT (vzeroupper_intrinsic);
27104 emit_insn (GEN_FCN (icode) (target));
27106 case VOID_FTYPE_UINT64:
27107 case VOID_FTYPE_UNSIGNED:
27113 case UINT64_FTYPE_VOID:
27114 case UNSIGNED_FTYPE_VOID:
27119 case UINT64_FTYPE_PUNSIGNED:
27120 case V2DI_FTYPE_PV2DI:
27121 case V32QI_FTYPE_PCCHAR:
27122 case V16QI_FTYPE_PCCHAR:
27123 case V8SF_FTYPE_PCV4SF:
27124 case V8SF_FTYPE_PCFLOAT:
27125 case V4SF_FTYPE_PCFLOAT:
27126 case V4DF_FTYPE_PCV2DF:
27127 case V4DF_FTYPE_PCDOUBLE:
27128 case V2DF_FTYPE_PCDOUBLE:
27129 case VOID_FTYPE_PVOID:
27134 case VOID_FTYPE_PV2SF_V4SF:
27135 case VOID_FTYPE_PV4DI_V4DI:
27136 case VOID_FTYPE_PV2DI_V2DI:
27137 case VOID_FTYPE_PCHAR_V32QI:
27138 case VOID_FTYPE_PCHAR_V16QI:
27139 case VOID_FTYPE_PFLOAT_V8SF:
27140 case VOID_FTYPE_PFLOAT_V4SF:
27141 case VOID_FTYPE_PDOUBLE_V4DF:
27142 case VOID_FTYPE_PDOUBLE_V2DF:
27143 case VOID_FTYPE_PULONGLONG_ULONGLONG:
27144 case VOID_FTYPE_PINT_INT:
27147 /* Reserve memory operand for target. */
27148 memory = ARRAY_SIZE (args);
27150 case V4SF_FTYPE_V4SF_PCV2SF:
27151 case V2DF_FTYPE_V2DF_PCDOUBLE:
27156 case V8SF_FTYPE_PCV8SF_V8SI:
27157 case V4DF_FTYPE_PCV4DF_V4DI:
27158 case V4SF_FTYPE_PCV4SF_V4SI:
27159 case V2DF_FTYPE_PCV2DF_V2DI:
27164 case VOID_FTYPE_PV8SF_V8SI_V8SF:
27165 case VOID_FTYPE_PV4DF_V4DI_V4DF:
27166 case VOID_FTYPE_PV4SF_V4SI_V4SF:
27167 case VOID_FTYPE_PV2DF_V2DI_V2DF:
27170 /* Reserve memory operand for target. */
27171 memory = ARRAY_SIZE (args);
27173 case VOID_FTYPE_UINT_UINT_UINT:
27174 case VOID_FTYPE_UINT64_UINT_UINT:
27175 case UCHAR_FTYPE_UINT_UINT_UINT:
27176 case UCHAR_FTYPE_UINT64_UINT_UINT:
27179 memory = ARRAY_SIZE (args);
27180 last_arg_constant = true;
27183 gcc_unreachable ();
27186 gcc_assert (nargs <= ARRAY_SIZE (args));
27188 if (klass == store)
27190 arg = CALL_EXPR_ARG (exp, 0);
27191 op = expand_normal (arg);
27192 gcc_assert (target == 0);
27194 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
27196 target = force_reg (tmode, op);
27204 || GET_MODE (target) != tmode
27205 || !insn_p->operand[0].predicate (target, tmode))
27206 target = gen_reg_rtx (tmode);
27209 for (i = 0; i < nargs; i++)
27211 enum machine_mode mode = insn_p->operand[i + 1].mode;
27214 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
27215 op = expand_normal (arg);
27216 match = insn_p->operand[i + 1].predicate (op, mode);
27218 if (last_arg_constant && (i + 1) == nargs)
27222 if (icode == CODE_FOR_lwp_lwpvalsi3
27223 || icode == CODE_FOR_lwp_lwpinssi3
27224 || icode == CODE_FOR_lwp_lwpvaldi3
27225 || icode == CODE_FOR_lwp_lwpinsdi3)
27226 error ("the last argument must be a 32-bit immediate");
27228 error ("the last argument must be an 8-bit immediate");
27236 /* This must be the memory operand. */
27237 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
27238 gcc_assert (GET_MODE (op) == mode
27239 || GET_MODE (op) == VOIDmode);
27243 /* This must be register. */
27244 if (VECTOR_MODE_P (mode))
27245 op = safe_vector_operand (op, mode);
27247 gcc_assert (GET_MODE (op) == mode
27248 || GET_MODE (op) == VOIDmode);
27249 op = copy_to_mode_reg (mode, op);
27254 args[i].mode = mode;
27260 pat = GEN_FCN (icode) (target);
27263 pat = GEN_FCN (icode) (target, args[0].op);
27266 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
27269 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
27272 gcc_unreachable ();
27278 return klass == store ? 0 : target;
27281 /* Return the integer constant in ARG. Constrain it to be in the range
27282 of the subparts of VEC_TYPE; issue an error if not. */
27285 get_element_number (tree vec_type, tree arg)
27287 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
27289 if (!host_integerp (arg, 1)
27290 || (elt = tree_low_cst (arg, 1), elt > max))
27292 error ("selector must be an integer constant in the range 0..%wi", max);
27299 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
27300 ix86_expand_vector_init. We DO have language-level syntax for this, in
27301 the form of (type){ init-list }. Except that since we can't place emms
27302 instructions from inside the compiler, we can't allow the use of MMX
27303 registers unless the user explicitly asks for it. So we do *not* define
27304 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
27305 we have builtins invoked by mmintrin.h that gives us license to emit
27306 these sorts of instructions. */
27309 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
27311 enum machine_mode tmode = TYPE_MODE (type);
27312 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
27313 int i, n_elt = GET_MODE_NUNITS (tmode);
27314 rtvec v = rtvec_alloc (n_elt);
27316 gcc_assert (VECTOR_MODE_P (tmode));
27317 gcc_assert (call_expr_nargs (exp) == n_elt);
27319 for (i = 0; i < n_elt; ++i)
27321 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
27322 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
27325 if (!target || !register_operand (target, tmode))
27326 target = gen_reg_rtx (tmode);
27328 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
27332 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
27333 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
27334 had a language-level syntax for referencing vector elements. */
27337 ix86_expand_vec_ext_builtin (tree exp, rtx target)
27339 enum machine_mode tmode, mode0;
27344 arg0 = CALL_EXPR_ARG (exp, 0);
27345 arg1 = CALL_EXPR_ARG (exp, 1);
27347 op0 = expand_normal (arg0);
27348 elt = get_element_number (TREE_TYPE (arg0), arg1);
27350 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
27351 mode0 = TYPE_MODE (TREE_TYPE (arg0));
27352 gcc_assert (VECTOR_MODE_P (mode0));
27354 op0 = force_reg (mode0, op0);
27356 if (optimize || !target || !register_operand (target, tmode))
27357 target = gen_reg_rtx (tmode);
27359 ix86_expand_vector_extract (true, target, op0, elt);
27364 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
27365 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
27366 a language-level syntax for referencing vector elements. */
27369 ix86_expand_vec_set_builtin (tree exp)
27371 enum machine_mode tmode, mode1;
27372 tree arg0, arg1, arg2;
27374 rtx op0, op1, target;
27376 arg0 = CALL_EXPR_ARG (exp, 0);
27377 arg1 = CALL_EXPR_ARG (exp, 1);
27378 arg2 = CALL_EXPR_ARG (exp, 2);
27380 tmode = TYPE_MODE (TREE_TYPE (arg0));
27381 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
27382 gcc_assert (VECTOR_MODE_P (tmode));
27384 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
27385 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
27386 elt = get_element_number (TREE_TYPE (arg0), arg2);
27388 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
27389 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
27391 op0 = force_reg (tmode, op0);
27392 op1 = force_reg (mode1, op1);
27394 /* OP0 is the source of these builtin functions and shouldn't be
27395 modified. Create a copy, use it and return it as target. */
27396 target = gen_reg_rtx (tmode);
27397 emit_move_insn (target, op0);
27398 ix86_expand_vector_set (true, target, op1, elt);
27403 /* Expand an expression EXP that calls a built-in function,
27404 with result going to TARGET if that's convenient
27405 (and in mode MODE if that's convenient).
27406 SUBTARGET may be used as the target for computing one of EXP's operands.
27407 IGNORE is nonzero if the value is to be ignored. */
27410 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
27411 enum machine_mode mode ATTRIBUTE_UNUSED,
27412 int ignore ATTRIBUTE_UNUSED)
27414 const struct builtin_description *d;
27416 enum insn_code icode;
27417 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
27418 tree arg0, arg1, arg2;
27419 rtx op0, op1, op2, pat;
27420 enum machine_mode mode0, mode1, mode2;
27421 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
27423 /* Determine whether the builtin function is available under the current ISA.
27424 Originally the builtin was not created if it wasn't applicable to the
27425 current ISA based on the command line switches. With function specific
27426 options, we need to check in the context of the function making the call
27427 whether it is supported. */
27428 if (ix86_builtins_isa[fcode].isa
27429 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
27431 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
27432 NULL, NULL, false);
27435 error ("%qE needs unknown isa option", fndecl);
27438 gcc_assert (opts != NULL);
27439 error ("%qE needs isa option %s", fndecl, opts);
27447 case IX86_BUILTIN_MASKMOVQ:
27448 case IX86_BUILTIN_MASKMOVDQU:
27449 icode = (fcode == IX86_BUILTIN_MASKMOVQ
27450 ? CODE_FOR_mmx_maskmovq
27451 : CODE_FOR_sse2_maskmovdqu);
27452 /* Note the arg order is different from the operand order. */
27453 arg1 = CALL_EXPR_ARG (exp, 0);
27454 arg2 = CALL_EXPR_ARG (exp, 1);
27455 arg0 = CALL_EXPR_ARG (exp, 2);
27456 op0 = expand_normal (arg0);
27457 op1 = expand_normal (arg1);
27458 op2 = expand_normal (arg2);
27459 mode0 = insn_data[icode].operand[0].mode;
27460 mode1 = insn_data[icode].operand[1].mode;
27461 mode2 = insn_data[icode].operand[2].mode;
27463 op0 = force_reg (Pmode, op0);
27464 op0 = gen_rtx_MEM (mode1, op0);
27466 if (!insn_data[icode].operand[0].predicate (op0, mode0))
27467 op0 = copy_to_mode_reg (mode0, op0);
27468 if (!insn_data[icode].operand[1].predicate (op1, mode1))
27469 op1 = copy_to_mode_reg (mode1, op1);
27470 if (!insn_data[icode].operand[2].predicate (op2, mode2))
27471 op2 = copy_to_mode_reg (mode2, op2);
27472 pat = GEN_FCN (icode) (op0, op1, op2);
27478 case IX86_BUILTIN_LDMXCSR:
27479 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
27480 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
27481 emit_move_insn (target, op0);
27482 emit_insn (gen_sse_ldmxcsr (target));
27485 case IX86_BUILTIN_STMXCSR:
27486 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
27487 emit_insn (gen_sse_stmxcsr (target));
27488 return copy_to_mode_reg (SImode, target);
27490 case IX86_BUILTIN_CLFLUSH:
27491 arg0 = CALL_EXPR_ARG (exp, 0);
27492 op0 = expand_normal (arg0);
27493 icode = CODE_FOR_sse2_clflush;
27494 if (!insn_data[icode].operand[0].predicate (op0, Pmode))
27495 op0 = copy_to_mode_reg (Pmode, op0);
27497 emit_insn (gen_sse2_clflush (op0));
27500 case IX86_BUILTIN_MONITOR:
27501 arg0 = CALL_EXPR_ARG (exp, 0);
27502 arg1 = CALL_EXPR_ARG (exp, 1);
27503 arg2 = CALL_EXPR_ARG (exp, 2);
27504 op0 = expand_normal (arg0);
27505 op1 = expand_normal (arg1);
27506 op2 = expand_normal (arg2);
27508 op0 = copy_to_mode_reg (Pmode, op0);
27510 op1 = copy_to_mode_reg (SImode, op1);
27512 op2 = copy_to_mode_reg (SImode, op2);
27513 emit_insn (ix86_gen_monitor (op0, op1, op2));
27516 case IX86_BUILTIN_MWAIT:
27517 arg0 = CALL_EXPR_ARG (exp, 0);
27518 arg1 = CALL_EXPR_ARG (exp, 1);
27519 op0 = expand_normal (arg0);
27520 op1 = expand_normal (arg1);
27522 op0 = copy_to_mode_reg (SImode, op0);
27524 op1 = copy_to_mode_reg (SImode, op1);
27525 emit_insn (gen_sse3_mwait (op0, op1));
27528 case IX86_BUILTIN_VEC_INIT_V2SI:
27529 case IX86_BUILTIN_VEC_INIT_V4HI:
27530 case IX86_BUILTIN_VEC_INIT_V8QI:
27531 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
27533 case IX86_BUILTIN_VEC_EXT_V2DF:
27534 case IX86_BUILTIN_VEC_EXT_V2DI:
27535 case IX86_BUILTIN_VEC_EXT_V4SF:
27536 case IX86_BUILTIN_VEC_EXT_V4SI:
27537 case IX86_BUILTIN_VEC_EXT_V8HI:
27538 case IX86_BUILTIN_VEC_EXT_V2SI:
27539 case IX86_BUILTIN_VEC_EXT_V4HI:
27540 case IX86_BUILTIN_VEC_EXT_V16QI:
27541 return ix86_expand_vec_ext_builtin (exp, target);
27543 case IX86_BUILTIN_VEC_SET_V2DI:
27544 case IX86_BUILTIN_VEC_SET_V4SF:
27545 case IX86_BUILTIN_VEC_SET_V4SI:
27546 case IX86_BUILTIN_VEC_SET_V8HI:
27547 case IX86_BUILTIN_VEC_SET_V4HI:
27548 case IX86_BUILTIN_VEC_SET_V16QI:
27549 return ix86_expand_vec_set_builtin (exp);
27551 case IX86_BUILTIN_VEC_PERM_V2DF:
27552 case IX86_BUILTIN_VEC_PERM_V4SF:
27553 case IX86_BUILTIN_VEC_PERM_V2DI:
27554 case IX86_BUILTIN_VEC_PERM_V4SI:
27555 case IX86_BUILTIN_VEC_PERM_V8HI:
27556 case IX86_BUILTIN_VEC_PERM_V16QI:
27557 case IX86_BUILTIN_VEC_PERM_V2DI_U:
27558 case IX86_BUILTIN_VEC_PERM_V4SI_U:
27559 case IX86_BUILTIN_VEC_PERM_V8HI_U:
27560 case IX86_BUILTIN_VEC_PERM_V16QI_U:
27561 case IX86_BUILTIN_VEC_PERM_V4DF:
27562 case IX86_BUILTIN_VEC_PERM_V8SF:
27563 return ix86_expand_vec_perm_builtin (exp);
27565 case IX86_BUILTIN_INFQ:
27566 case IX86_BUILTIN_HUGE_VALQ:
27568 REAL_VALUE_TYPE inf;
27572 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
27574 tmp = validize_mem (force_const_mem (mode, tmp));
27577 target = gen_reg_rtx (mode);
27579 emit_move_insn (target, tmp);
27583 case IX86_BUILTIN_LLWPCB:
27584 arg0 = CALL_EXPR_ARG (exp, 0);
27585 op0 = expand_normal (arg0);
27586 icode = CODE_FOR_lwp_llwpcb;
27587 if (!insn_data[icode].operand[0].predicate (op0, Pmode))
27588 op0 = copy_to_mode_reg (Pmode, op0);
27589 emit_insn (gen_lwp_llwpcb (op0));
27592 case IX86_BUILTIN_SLWPCB:
27593 icode = CODE_FOR_lwp_slwpcb;
27595 || !insn_data[icode].operand[0].predicate (target, Pmode))
27596 target = gen_reg_rtx (Pmode);
27597 emit_insn (gen_lwp_slwpcb (target));
27600 case IX86_BUILTIN_BEXTRI32:
27601 case IX86_BUILTIN_BEXTRI64:
27602 arg0 = CALL_EXPR_ARG (exp, 0);
27603 arg1 = CALL_EXPR_ARG (exp, 1);
27604 op0 = expand_normal (arg0);
27605 op1 = expand_normal (arg1);
27606 icode = (fcode == IX86_BUILTIN_BEXTRI32
27607 ? CODE_FOR_tbm_bextri_si
27608 : CODE_FOR_tbm_bextri_di);
27609 if (!CONST_INT_P (op1))
27611 error ("last argument must be an immediate");
27616 unsigned char length = (INTVAL (op1) >> 8) & 0xFF;
27617 unsigned char lsb_index = INTVAL (op1) & 0xFF;
27618 op1 = GEN_INT (length);
27619 op2 = GEN_INT (lsb_index);
27620 pat = GEN_FCN (icode) (target, op0, op1, op2);
27626 case IX86_BUILTIN_RDRAND16_STEP:
27627 icode = CODE_FOR_rdrandhi_1;
27631 case IX86_BUILTIN_RDRAND32_STEP:
27632 icode = CODE_FOR_rdrandsi_1;
27636 case IX86_BUILTIN_RDRAND64_STEP:
27637 icode = CODE_FOR_rdranddi_1;
27641 op0 = gen_reg_rtx (mode0);
27642 emit_insn (GEN_FCN (icode) (op0));
27644 op1 = gen_reg_rtx (SImode);
27645 emit_move_insn (op1, CONST1_RTX (SImode));
27647 /* Emit SImode conditional move. */
27648 if (mode0 == HImode)
27650 op2 = gen_reg_rtx (SImode);
27651 emit_insn (gen_zero_extendhisi2 (op2, op0));
27653 else if (mode0 == SImode)
27656 op2 = gen_rtx_SUBREG (SImode, op0, 0);
27658 pat = gen_rtx_GEU (VOIDmode, gen_rtx_REG (CCCmode, FLAGS_REG),
27660 emit_insn (gen_rtx_SET (VOIDmode, op1,
27661 gen_rtx_IF_THEN_ELSE (SImode, pat, op2, op1)));
27662 emit_move_insn (target, op1);
27664 arg0 = CALL_EXPR_ARG (exp, 0);
27665 op1 = expand_normal (arg0);
27666 if (!address_operand (op1, VOIDmode))
27667 op1 = copy_addr_to_reg (op1);
27668 emit_move_insn (gen_rtx_MEM (mode0, op1), op0);
27675 for (i = 0, d = bdesc_special_args;
27676 i < ARRAY_SIZE (bdesc_special_args);
27678 if (d->code == fcode)
27679 return ix86_expand_special_args_builtin (d, exp, target);
27681 for (i = 0, d = bdesc_args;
27682 i < ARRAY_SIZE (bdesc_args);
27684 if (d->code == fcode)
27687 case IX86_BUILTIN_FABSQ:
27688 case IX86_BUILTIN_COPYSIGNQ:
27690 /* Emit a normal call if SSE2 isn't available. */
27691 return expand_call (exp, target, ignore);
27693 return ix86_expand_args_builtin (d, exp, target);
27696 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
27697 if (d->code == fcode)
27698 return ix86_expand_sse_comi (d, exp, target);
27700 for (i = 0, d = bdesc_pcmpestr;
27701 i < ARRAY_SIZE (bdesc_pcmpestr);
27703 if (d->code == fcode)
27704 return ix86_expand_sse_pcmpestr (d, exp, target);
27706 for (i = 0, d = bdesc_pcmpistr;
27707 i < ARRAY_SIZE (bdesc_pcmpistr);
27709 if (d->code == fcode)
27710 return ix86_expand_sse_pcmpistr (d, exp, target);
27712 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
27713 if (d->code == fcode)
27714 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
27715 (enum ix86_builtin_func_type)
27716 d->flag, d->comparison);
27718 gcc_unreachable ();
27721 /* Returns a function decl for a vectorized version of the builtin function
27722 with builtin function code FN and the result vector type TYPE, or NULL_TREE
27723 if it is not available. */
27726 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
27729 enum machine_mode in_mode, out_mode;
27731 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
27733 if (TREE_CODE (type_out) != VECTOR_TYPE
27734 || TREE_CODE (type_in) != VECTOR_TYPE
27735 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
27738 out_mode = TYPE_MODE (TREE_TYPE (type_out));
27739 out_n = TYPE_VECTOR_SUBPARTS (type_out);
27740 in_mode = TYPE_MODE (TREE_TYPE (type_in));
27741 in_n = TYPE_VECTOR_SUBPARTS (type_in);
27745 case BUILT_IN_SQRT:
27746 if (out_mode == DFmode && in_mode == DFmode)
27748 if (out_n == 2 && in_n == 2)
27749 return ix86_builtins[IX86_BUILTIN_SQRTPD];
27750 else if (out_n == 4 && in_n == 4)
27751 return ix86_builtins[IX86_BUILTIN_SQRTPD256];
27755 case BUILT_IN_SQRTF:
27756 if (out_mode == SFmode && in_mode == SFmode)
27758 if (out_n == 4 && in_n == 4)
27759 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
27760 else if (out_n == 8 && in_n == 8)
27761 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR256];
27765 case BUILT_IN_LRINT:
27766 if (out_mode == SImode && out_n == 4
27767 && in_mode == DFmode && in_n == 2)
27768 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
27771 case BUILT_IN_LRINTF:
27772 if (out_mode == SImode && in_mode == SFmode)
27774 if (out_n == 4 && in_n == 4)
27775 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
27776 else if (out_n == 8 && in_n == 8)
27777 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ256];
27781 case BUILT_IN_COPYSIGN:
27782 if (out_mode == DFmode && in_mode == DFmode)
27784 if (out_n == 2 && in_n == 2)
27785 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
27786 else if (out_n == 4 && in_n == 4)
27787 return ix86_builtins[IX86_BUILTIN_CPYSGNPD256];
27791 case BUILT_IN_COPYSIGNF:
27792 if (out_mode == SFmode && in_mode == SFmode)
27794 if (out_n == 4 && in_n == 4)
27795 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
27796 else if (out_n == 8 && in_n == 8)
27797 return ix86_builtins[IX86_BUILTIN_CPYSGNPS256];
27801 case BUILT_IN_FLOOR:
27802 /* The round insn does not trap on denormals. */
27803 if (flag_trapping_math || !TARGET_ROUND)
27806 if (out_mode == DFmode && in_mode == DFmode)
27808 if (out_n == 2 && in_n == 2)
27809 return ix86_builtins[IX86_BUILTIN_FLOORPD];
27810 else if (out_n == 4 && in_n == 4)
27811 return ix86_builtins[IX86_BUILTIN_FLOORPD256];
27815 case BUILT_IN_FLOORF:
27816 /* The round insn does not trap on denormals. */
27817 if (flag_trapping_math || !TARGET_ROUND)
27820 if (out_mode == SFmode && in_mode == SFmode)
27822 if (out_n == 4 && in_n == 4)
27823 return ix86_builtins[IX86_BUILTIN_FLOORPS];
27824 else if (out_n == 8 && in_n == 8)
27825 return ix86_builtins[IX86_BUILTIN_FLOORPS256];
27829 case BUILT_IN_CEIL:
27830 /* The round insn does not trap on denormals. */
27831 if (flag_trapping_math || !TARGET_ROUND)
27834 if (out_mode == DFmode && in_mode == DFmode)
27836 if (out_n == 2 && in_n == 2)
27837 return ix86_builtins[IX86_BUILTIN_CEILPD];
27838 else if (out_n == 4 && in_n == 4)
27839 return ix86_builtins[IX86_BUILTIN_CEILPD256];
27843 case BUILT_IN_CEILF:
27844 /* The round insn does not trap on denormals. */
27845 if (flag_trapping_math || !TARGET_ROUND)
27848 if (out_mode == SFmode && in_mode == SFmode)
27850 if (out_n == 4 && in_n == 4)
27851 return ix86_builtins[IX86_BUILTIN_CEILPS];
27852 else if (out_n == 8 && in_n == 8)
27853 return ix86_builtins[IX86_BUILTIN_CEILPS256];
27857 case BUILT_IN_TRUNC:
27858 /* The round insn does not trap on denormals. */
27859 if (flag_trapping_math || !TARGET_ROUND)
27862 if (out_mode == DFmode && in_mode == DFmode)
27864 if (out_n == 2 && in_n == 2)
27865 return ix86_builtins[IX86_BUILTIN_TRUNCPD];
27866 else if (out_n == 4 && in_n == 4)
27867 return ix86_builtins[IX86_BUILTIN_TRUNCPD256];
27871 case BUILT_IN_TRUNCF:
27872 /* The round insn does not trap on denormals. */
27873 if (flag_trapping_math || !TARGET_ROUND)
27876 if (out_mode == SFmode && in_mode == SFmode)
27878 if (out_n == 4 && in_n == 4)
27879 return ix86_builtins[IX86_BUILTIN_TRUNCPS];
27880 else if (out_n == 8 && in_n == 8)
27881 return ix86_builtins[IX86_BUILTIN_TRUNCPS256];
27885 case BUILT_IN_RINT:
27886 /* The round insn does not trap on denormals. */
27887 if (flag_trapping_math || !TARGET_ROUND)
27890 if (out_mode == DFmode && in_mode == DFmode)
27892 if (out_n == 2 && in_n == 2)
27893 return ix86_builtins[IX86_BUILTIN_RINTPD];
27894 else if (out_n == 4 && in_n == 4)
27895 return ix86_builtins[IX86_BUILTIN_RINTPD256];
27899 case BUILT_IN_RINTF:
27900 /* The round insn does not trap on denormals. */
27901 if (flag_trapping_math || !TARGET_ROUND)
27904 if (out_mode == SFmode && in_mode == SFmode)
27906 if (out_n == 4 && in_n == 4)
27907 return ix86_builtins[IX86_BUILTIN_RINTPS];
27908 else if (out_n == 8 && in_n == 8)
27909 return ix86_builtins[IX86_BUILTIN_RINTPS256];
27914 if (out_mode == DFmode && in_mode == DFmode)
27916 if (out_n == 2 && in_n == 2)
27917 return ix86_builtins[IX86_BUILTIN_VFMADDPD];
27918 if (out_n == 4 && in_n == 4)
27919 return ix86_builtins[IX86_BUILTIN_VFMADDPD256];
27923 case BUILT_IN_FMAF:
27924 if (out_mode == SFmode && in_mode == SFmode)
27926 if (out_n == 4 && in_n == 4)
27927 return ix86_builtins[IX86_BUILTIN_VFMADDPS];
27928 if (out_n == 8 && in_n == 8)
27929 return ix86_builtins[IX86_BUILTIN_VFMADDPS256];
27937 /* Dispatch to a handler for a vectorization library. */
27938 if (ix86_veclib_handler)
27939 return ix86_veclib_handler ((enum built_in_function) fn, type_out,
27945 /* Handler for an SVML-style interface to
27946 a library with vectorized intrinsics. */
27949 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
27952 tree fntype, new_fndecl, args;
27955 enum machine_mode el_mode, in_mode;
27958 /* The SVML is suitable for unsafe math only. */
27959 if (!flag_unsafe_math_optimizations)
27962 el_mode = TYPE_MODE (TREE_TYPE (type_out));
27963 n = TYPE_VECTOR_SUBPARTS (type_out);
27964 in_mode = TYPE_MODE (TREE_TYPE (type_in));
27965 in_n = TYPE_VECTOR_SUBPARTS (type_in);
27966 if (el_mode != in_mode
27974 case BUILT_IN_LOG10:
27976 case BUILT_IN_TANH:
27978 case BUILT_IN_ATAN:
27979 case BUILT_IN_ATAN2:
27980 case BUILT_IN_ATANH:
27981 case BUILT_IN_CBRT:
27982 case BUILT_IN_SINH:
27984 case BUILT_IN_ASINH:
27985 case BUILT_IN_ASIN:
27986 case BUILT_IN_COSH:
27988 case BUILT_IN_ACOSH:
27989 case BUILT_IN_ACOS:
27990 if (el_mode != DFmode || n != 2)
27994 case BUILT_IN_EXPF:
27995 case BUILT_IN_LOGF:
27996 case BUILT_IN_LOG10F:
27997 case BUILT_IN_POWF:
27998 case BUILT_IN_TANHF:
27999 case BUILT_IN_TANF:
28000 case BUILT_IN_ATANF:
28001 case BUILT_IN_ATAN2F:
28002 case BUILT_IN_ATANHF:
28003 case BUILT_IN_CBRTF:
28004 case BUILT_IN_SINHF:
28005 case BUILT_IN_SINF:
28006 case BUILT_IN_ASINHF:
28007 case BUILT_IN_ASINF:
28008 case BUILT_IN_COSHF:
28009 case BUILT_IN_COSF:
28010 case BUILT_IN_ACOSHF:
28011 case BUILT_IN_ACOSF:
28012 if (el_mode != SFmode || n != 4)
28020 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
28022 if (fn == BUILT_IN_LOGF)
28023 strcpy (name, "vmlsLn4");
28024 else if (fn == BUILT_IN_LOG)
28025 strcpy (name, "vmldLn2");
28028 sprintf (name, "vmls%s", bname+10);
28029 name[strlen (name)-1] = '4';
28032 sprintf (name, "vmld%s2", bname+10);
28034 /* Convert to uppercase. */
28038 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
28039 args = TREE_CHAIN (args))
28043 fntype = build_function_type_list (type_out, type_in, NULL);
28045 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
28047 /* Build a function declaration for the vectorized function. */
28048 new_fndecl = build_decl (BUILTINS_LOCATION,
28049 FUNCTION_DECL, get_identifier (name), fntype);
28050 TREE_PUBLIC (new_fndecl) = 1;
28051 DECL_EXTERNAL (new_fndecl) = 1;
28052 DECL_IS_NOVOPS (new_fndecl) = 1;
28053 TREE_READONLY (new_fndecl) = 1;
28058 /* Handler for an ACML-style interface to
28059 a library with vectorized intrinsics. */
28062 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
28064 char name[20] = "__vr.._";
28065 tree fntype, new_fndecl, args;
28068 enum machine_mode el_mode, in_mode;
28071 /* The ACML is 64bits only and suitable for unsafe math only as
28072 it does not correctly support parts of IEEE with the required
28073 precision such as denormals. */
28075 || !flag_unsafe_math_optimizations)
28078 el_mode = TYPE_MODE (TREE_TYPE (type_out));
28079 n = TYPE_VECTOR_SUBPARTS (type_out);
28080 in_mode = TYPE_MODE (TREE_TYPE (type_in));
28081 in_n = TYPE_VECTOR_SUBPARTS (type_in);
28082 if (el_mode != in_mode
28092 case BUILT_IN_LOG2:
28093 case BUILT_IN_LOG10:
28096 if (el_mode != DFmode
28101 case BUILT_IN_SINF:
28102 case BUILT_IN_COSF:
28103 case BUILT_IN_EXPF:
28104 case BUILT_IN_POWF:
28105 case BUILT_IN_LOGF:
28106 case BUILT_IN_LOG2F:
28107 case BUILT_IN_LOG10F:
28110 if (el_mode != SFmode
28119 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
28120 sprintf (name + 7, "%s", bname+10);
28123 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
28124 args = TREE_CHAIN (args))
28128 fntype = build_function_type_list (type_out, type_in, NULL);
28130 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
28132 /* Build a function declaration for the vectorized function. */
28133 new_fndecl = build_decl (BUILTINS_LOCATION,
28134 FUNCTION_DECL, get_identifier (name), fntype);
28135 TREE_PUBLIC (new_fndecl) = 1;
28136 DECL_EXTERNAL (new_fndecl) = 1;
28137 DECL_IS_NOVOPS (new_fndecl) = 1;
28138 TREE_READONLY (new_fndecl) = 1;
28144 /* Returns a decl of a function that implements conversion of an integer vector
28145 into a floating-point vector, or vice-versa. DEST_TYPE and SRC_TYPE
28146 are the types involved when converting according to CODE.
28147 Return NULL_TREE if it is not available. */
28150 ix86_vectorize_builtin_conversion (unsigned int code,
28151 tree dest_type, tree src_type)
28159 switch (TYPE_MODE (src_type))
28162 switch (TYPE_MODE (dest_type))
28165 return (TYPE_UNSIGNED (src_type)
28166 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
28167 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
28169 return (TYPE_UNSIGNED (src_type)
28171 : ix86_builtins[IX86_BUILTIN_CVTDQ2PD256]);
28177 switch (TYPE_MODE (dest_type))
28180 return (TYPE_UNSIGNED (src_type)
28182 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS256]);
28191 case FIX_TRUNC_EXPR:
28192 switch (TYPE_MODE (dest_type))
28195 switch (TYPE_MODE (src_type))
28198 return (TYPE_UNSIGNED (dest_type)
28200 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ]);
28202 return (TYPE_UNSIGNED (dest_type)
28204 : ix86_builtins[IX86_BUILTIN_CVTTPD2DQ256]);
28211 switch (TYPE_MODE (src_type))
28214 return (TYPE_UNSIGNED (dest_type)
28216 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ256]);
28233 /* Returns a code for a target-specific builtin that implements
28234 reciprocal of the function, or NULL_TREE if not available. */
28237 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
28238 bool sqrt ATTRIBUTE_UNUSED)
28240 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
28241 && flag_finite_math_only && !flag_trapping_math
28242 && flag_unsafe_math_optimizations))
28246 /* Machine dependent builtins. */
28249 /* Vectorized version of sqrt to rsqrt conversion. */
28250 case IX86_BUILTIN_SQRTPS_NR:
28251 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
28253 case IX86_BUILTIN_SQRTPS_NR256:
28254 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR256];
28260 /* Normal builtins. */
28263 /* Sqrt to rsqrt conversion. */
28264 case BUILT_IN_SQRTF:
28265 return ix86_builtins[IX86_BUILTIN_RSQRTF];
28272 /* Helper for avx_vpermilps256_operand et al. This is also used by
28273 the expansion functions to turn the parallel back into a mask.
28274 The return value is 0 for no match and the imm8+1 for a match. */
28277 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
28279 unsigned i, nelt = GET_MODE_NUNITS (mode);
28281 unsigned char ipar[8];
28283 if (XVECLEN (par, 0) != (int) nelt)
28286 /* Validate that all of the elements are constants, and not totally
28287 out of range. Copy the data into an integral array to make the
28288 subsequent checks easier. */
28289 for (i = 0; i < nelt; ++i)
28291 rtx er = XVECEXP (par, 0, i);
28292 unsigned HOST_WIDE_INT ei;
28294 if (!CONST_INT_P (er))
28305 /* In the 256-bit DFmode case, we can only move elements within
28307 for (i = 0; i < 2; ++i)
28311 mask |= ipar[i] << i;
28313 for (i = 2; i < 4; ++i)
28317 mask |= (ipar[i] - 2) << i;
28322 /* In the 256-bit SFmode case, we have full freedom of movement
28323 within the low 128-bit lane, but the high 128-bit lane must
28324 mirror the exact same pattern. */
28325 for (i = 0; i < 4; ++i)
28326 if (ipar[i] + 4 != ipar[i + 4])
28333 /* In the 128-bit case, we've full freedom in the placement of
28334 the elements from the source operand. */
28335 for (i = 0; i < nelt; ++i)
28336 mask |= ipar[i] << (i * (nelt / 2));
28340 gcc_unreachable ();
28343 /* Make sure success has a non-zero value by adding one. */
28347 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
28348 the expansion functions to turn the parallel back into a mask.
28349 The return value is 0 for no match and the imm8+1 for a match. */
28352 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
28354 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
28356 unsigned char ipar[8];
28358 if (XVECLEN (par, 0) != (int) nelt)
28361 /* Validate that all of the elements are constants, and not totally
28362 out of range. Copy the data into an integral array to make the
28363 subsequent checks easier. */
28364 for (i = 0; i < nelt; ++i)
28366 rtx er = XVECEXP (par, 0, i);
28367 unsigned HOST_WIDE_INT ei;
28369 if (!CONST_INT_P (er))
28372 if (ei >= 2 * nelt)
28377 /* Validate that the halves of the permute are halves. */
28378 for (i = 0; i < nelt2 - 1; ++i)
28379 if (ipar[i] + 1 != ipar[i + 1])
28381 for (i = nelt2; i < nelt - 1; ++i)
28382 if (ipar[i] + 1 != ipar[i + 1])
28385 /* Reconstruct the mask. */
28386 for (i = 0; i < 2; ++i)
28388 unsigned e = ipar[i * nelt2];
28392 mask |= e << (i * 4);
28395 /* Make sure success has a non-zero value by adding one. */
28400 /* Store OPERAND to the memory after reload is completed. This means
28401 that we can't easily use assign_stack_local. */
28403 ix86_force_to_memory (enum machine_mode mode, rtx operand)
28407 gcc_assert (reload_completed);
28408 if (ix86_using_red_zone ())
28410 result = gen_rtx_MEM (mode,
28411 gen_rtx_PLUS (Pmode,
28413 GEN_INT (-RED_ZONE_SIZE)));
28414 emit_move_insn (result, operand);
28416 else if (TARGET_64BIT)
28422 operand = gen_lowpart (DImode, operand);
28426 gen_rtx_SET (VOIDmode,
28427 gen_rtx_MEM (DImode,
28428 gen_rtx_PRE_DEC (DImode,
28429 stack_pointer_rtx)),
28433 gcc_unreachable ();
28435 result = gen_rtx_MEM (mode, stack_pointer_rtx);
28444 split_double_mode (mode, &operand, 1, operands, operands + 1);
28446 gen_rtx_SET (VOIDmode,
28447 gen_rtx_MEM (SImode,
28448 gen_rtx_PRE_DEC (Pmode,
28449 stack_pointer_rtx)),
28452 gen_rtx_SET (VOIDmode,
28453 gen_rtx_MEM (SImode,
28454 gen_rtx_PRE_DEC (Pmode,
28455 stack_pointer_rtx)),
28460 /* Store HImodes as SImodes. */
28461 operand = gen_lowpart (SImode, operand);
28465 gen_rtx_SET (VOIDmode,
28466 gen_rtx_MEM (GET_MODE (operand),
28467 gen_rtx_PRE_DEC (SImode,
28468 stack_pointer_rtx)),
28472 gcc_unreachable ();
28474 result = gen_rtx_MEM (mode, stack_pointer_rtx);
28479 /* Free operand from the memory. */
28481 ix86_free_from_memory (enum machine_mode mode)
28483 if (!ix86_using_red_zone ())
28487 if (mode == DImode || TARGET_64BIT)
28491 /* Use LEA to deallocate stack space. In peephole2 it will be converted
28492 to pop or add instruction if registers are available. */
28493 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
28494 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
28499 /* Implement TARGET_PREFERRED_RELOAD_CLASS.
28501 Put float CONST_DOUBLE in the constant pool instead of fp regs.
28502 QImode must go into class Q_REGS.
28503 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
28504 movdf to do mem-to-mem moves through integer regs. */
28507 ix86_preferred_reload_class (rtx x, reg_class_t regclass)
28509 enum machine_mode mode = GET_MODE (x);
28511 /* We're only allowed to return a subclass of CLASS. Many of the
28512 following checks fail for NO_REGS, so eliminate that early. */
28513 if (regclass == NO_REGS)
28516 /* All classes can load zeros. */
28517 if (x == CONST0_RTX (mode))
28520 /* Force constants into memory if we are loading a (nonzero) constant into
28521 an MMX or SSE register. This is because there are no MMX/SSE instructions
28522 to load from a constant. */
28524 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
28527 /* Prefer SSE regs only, if we can use them for math. */
28528 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
28529 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
28531 /* Floating-point constants need more complex checks. */
28532 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
28534 /* General regs can load everything. */
28535 if (reg_class_subset_p (regclass, GENERAL_REGS))
28538 /* Floats can load 0 and 1 plus some others. Note that we eliminated
28539 zero above. We only want to wind up preferring 80387 registers if
28540 we plan on doing computation with them. */
28542 && standard_80387_constant_p (x))
28544 /* Limit class to non-sse. */
28545 if (regclass == FLOAT_SSE_REGS)
28547 if (regclass == FP_TOP_SSE_REGS)
28549 if (regclass == FP_SECOND_SSE_REGS)
28550 return FP_SECOND_REG;
28551 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
28558 /* Generally when we see PLUS here, it's the function invariant
28559 (plus soft-fp const_int). Which can only be computed into general
28561 if (GET_CODE (x) == PLUS)
28562 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
28564 /* QImode constants are easy to load, but non-constant QImode data
28565 must go into Q_REGS. */
28566 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
28568 if (reg_class_subset_p (regclass, Q_REGS))
28570 if (reg_class_subset_p (Q_REGS, regclass))
28578 /* Discourage putting floating-point values in SSE registers unless
28579 SSE math is being used, and likewise for the 387 registers. */
28581 ix86_preferred_output_reload_class (rtx x, reg_class_t regclass)
28583 enum machine_mode mode = GET_MODE (x);
28585 /* Restrict the output reload class to the register bank that we are doing
28586 math on. If we would like not to return a subset of CLASS, reject this
28587 alternative: if reload cannot do this, it will still use its choice. */
28588 mode = GET_MODE (x);
28589 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
28590 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
28592 if (X87_FLOAT_MODE_P (mode))
28594 if (regclass == FP_TOP_SSE_REGS)
28596 else if (regclass == FP_SECOND_SSE_REGS)
28597 return FP_SECOND_REG;
28599 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
28606 ix86_secondary_reload (bool in_p, rtx x, reg_class_t rclass,
28607 enum machine_mode mode,
28608 secondary_reload_info *sri ATTRIBUTE_UNUSED)
28610 /* QImode spills from non-QI registers require
28611 intermediate register on 32bit targets. */
28613 && !in_p && mode == QImode
28614 && (rclass == GENERAL_REGS
28615 || rclass == LEGACY_REGS
28616 || rclass == INDEX_REGS))
28625 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
28626 regno = true_regnum (x);
28628 /* Return Q_REGS if the operand is in memory. */
28633 /* This condition handles corner case where an expression involving
28634 pointers gets vectorized. We're trying to use the address of a
28635 stack slot as a vector initializer.
28637 (set (reg:V2DI 74 [ vect_cst_.2 ])
28638 (vec_duplicate:V2DI (reg/f:DI 20 frame)))
28640 Eventually frame gets turned into sp+offset like this:
28642 (set (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28643 (vec_duplicate:V2DI (plus:DI (reg/f:DI 7 sp)
28644 (const_int 392 [0x188]))))
28646 That later gets turned into:
28648 (set (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28649 (vec_duplicate:V2DI (plus:DI (reg/f:DI 7 sp)
28650 (mem/u/c/i:DI (symbol_ref/u:DI ("*.LC0") [flags 0x2]) [0 S8 A64]))))
28652 We'll have the following reload recorded:
28654 Reload 0: reload_in (DI) =
28655 (plus:DI (reg/f:DI 7 sp)
28656 (mem/u/c/i:DI (symbol_ref/u:DI ("*.LC0") [flags 0x2]) [0 S8 A64]))
28657 reload_out (V2DI) = (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28658 SSE_REGS, RELOAD_OTHER (opnum = 0), can't combine
28659 reload_in_reg: (plus:DI (reg/f:DI 7 sp) (const_int 392 [0x188]))
28660 reload_out_reg: (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28661 reload_reg_rtx: (reg:V2DI 22 xmm1)
28663 Which isn't going to work since SSE instructions can't handle scalar
28664 additions. Returning GENERAL_REGS forces the addition into integer
28665 register and reload can handle subsequent reloads without problems. */
28667 if (in_p && GET_CODE (x) == PLUS
28668 && SSE_CLASS_P (rclass)
28669 && SCALAR_INT_MODE_P (mode))
28670 return GENERAL_REGS;
28675 /* Implement TARGET_CLASS_LIKELY_SPILLED_P. */
28678 ix86_class_likely_spilled_p (reg_class_t rclass)
28689 case SSE_FIRST_REG:
28691 case FP_SECOND_REG:
28701 /* If we are copying between general and FP registers, we need a memory
28702 location. The same is true for SSE and MMX registers.
28704 To optimize register_move_cost performance, allow inline variant.
28706 The macro can't work reliably when one of the CLASSES is class containing
28707 registers from multiple units (SSE, MMX, integer). We avoid this by never
28708 combining those units in single alternative in the machine description.
28709 Ensure that this constraint holds to avoid unexpected surprises.
28711 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
28712 enforce these sanity checks. */
28715 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
28716 enum machine_mode mode, int strict)
28718 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
28719 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
28720 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
28721 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
28722 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
28723 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
28725 gcc_assert (!strict);
28729 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
28732 /* ??? This is a lie. We do have moves between mmx/general, and for
28733 mmx/sse2. But by saying we need secondary memory we discourage the
28734 register allocator from using the mmx registers unless needed. */
28735 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
28738 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
28740 /* SSE1 doesn't have any direct moves from other classes. */
28744 /* If the target says that inter-unit moves are more expensive
28745 than moving through memory, then don't generate them. */
28746 if (!TARGET_INTER_UNIT_MOVES)
28749 /* Between SSE and general, we have moves no larger than word size. */
28750 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
28758 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
28759 enum machine_mode mode, int strict)
28761 return inline_secondary_memory_needed (class1, class2, mode, strict);
28764 /* Return true if the registers in CLASS cannot represent the change from
28765 modes FROM to TO. */
28768 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
28769 enum reg_class regclass)
28774 /* x87 registers can't do subreg at all, as all values are reformatted
28775 to extended precision. */
28776 if (MAYBE_FLOAT_CLASS_P (regclass))
28779 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
28781 /* Vector registers do not support QI or HImode loads. If we don't
28782 disallow a change to these modes, reload will assume it's ok to
28783 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
28784 the vec_dupv4hi pattern. */
28785 if (GET_MODE_SIZE (from) < 4)
28788 /* Vector registers do not support subreg with nonzero offsets, which
28789 are otherwise valid for integer registers. Since we can't see
28790 whether we have a nonzero offset from here, prohibit all
28791 nonparadoxical subregs changing size. */
28792 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
28799 /* Return the cost of moving data of mode M between a
28800 register and memory. A value of 2 is the default; this cost is
28801 relative to those in `REGISTER_MOVE_COST'.
28803 This function is used extensively by register_move_cost that is used to
28804 build tables at startup. Make it inline in this case.
28805 When IN is 2, return maximum of in and out move cost.
28807 If moving between registers and memory is more expensive than
28808 between two registers, you should define this macro to express the
28811 Model also increased moving costs of QImode registers in non
28815 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
28819 if (FLOAT_CLASS_P (regclass))
28837 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
28838 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
28840 if (SSE_CLASS_P (regclass))
28843 switch (GET_MODE_SIZE (mode))
28858 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
28859 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
28861 if (MMX_CLASS_P (regclass))
28864 switch (GET_MODE_SIZE (mode))
28876 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
28877 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
28879 switch (GET_MODE_SIZE (mode))
28882 if (Q_CLASS_P (regclass) || TARGET_64BIT)
28885 return ix86_cost->int_store[0];
28886 if (TARGET_PARTIAL_REG_DEPENDENCY
28887 && optimize_function_for_speed_p (cfun))
28888 cost = ix86_cost->movzbl_load;
28890 cost = ix86_cost->int_load[0];
28892 return MAX (cost, ix86_cost->int_store[0]);
28898 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
28900 return ix86_cost->movzbl_load;
28902 return ix86_cost->int_store[0] + 4;
28907 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
28908 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
28910 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
28911 if (mode == TFmode)
28914 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
28916 cost = ix86_cost->int_load[2];
28918 cost = ix86_cost->int_store[2];
28919 return (cost * (((int) GET_MODE_SIZE (mode)
28920 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
28925 ix86_memory_move_cost (enum machine_mode mode, reg_class_t regclass,
28928 return inline_memory_move_cost (mode, (enum reg_class) regclass, in ? 1 : 0);
28932 /* Return the cost of moving data from a register in class CLASS1 to
28933 one in class CLASS2.
28935 It is not required that the cost always equal 2 when FROM is the same as TO;
28936 on some machines it is expensive to move between registers if they are not
28937 general registers. */
28940 ix86_register_move_cost (enum machine_mode mode, reg_class_t class1_i,
28941 reg_class_t class2_i)
28943 enum reg_class class1 = (enum reg_class) class1_i;
28944 enum reg_class class2 = (enum reg_class) class2_i;
28946 /* In case we require secondary memory, compute cost of the store followed
28947 by load. In order to avoid bad register allocation choices, we need
28948 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
28950 if (inline_secondary_memory_needed (class1, class2, mode, 0))
28954 cost += inline_memory_move_cost (mode, class1, 2);
28955 cost += inline_memory_move_cost (mode, class2, 2);
28957 /* In case of copying from general_purpose_register we may emit multiple
28958 stores followed by single load causing memory size mismatch stall.
28959 Count this as arbitrarily high cost of 20. */
28960 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
28963 /* In the case of FP/MMX moves, the registers actually overlap, and we
28964 have to switch modes in order to treat them differently. */
28965 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
28966 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
28972 /* Moves between SSE/MMX and integer unit are expensive. */
28973 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
28974 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
28976 /* ??? By keeping returned value relatively high, we limit the number
28977 of moves between integer and MMX/SSE registers for all targets.
28978 Additionally, high value prevents problem with x86_modes_tieable_p(),
28979 where integer modes in MMX/SSE registers are not tieable
28980 because of missing QImode and HImode moves to, from or between
28981 MMX/SSE registers. */
28982 return MAX (8, ix86_cost->mmxsse_to_integer);
28984 if (MAYBE_FLOAT_CLASS_P (class1))
28985 return ix86_cost->fp_move;
28986 if (MAYBE_SSE_CLASS_P (class1))
28987 return ix86_cost->sse_move;
28988 if (MAYBE_MMX_CLASS_P (class1))
28989 return ix86_cost->mmx_move;
28993 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
28996 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
28998 /* Flags and only flags can only hold CCmode values. */
28999 if (CC_REGNO_P (regno))
29000 return GET_MODE_CLASS (mode) == MODE_CC;
29001 if (GET_MODE_CLASS (mode) == MODE_CC
29002 || GET_MODE_CLASS (mode) == MODE_RANDOM
29003 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
29005 if (FP_REGNO_P (regno))
29006 return VALID_FP_MODE_P (mode);
29007 if (SSE_REGNO_P (regno))
29009 /* We implement the move patterns for all vector modes into and
29010 out of SSE registers, even when no operation instructions
29011 are available. OImode move is available only when AVX is
29013 return ((TARGET_AVX && mode == OImode)
29014 || VALID_AVX256_REG_MODE (mode)
29015 || VALID_SSE_REG_MODE (mode)
29016 || VALID_SSE2_REG_MODE (mode)
29017 || VALID_MMX_REG_MODE (mode)
29018 || VALID_MMX_REG_MODE_3DNOW (mode));
29020 if (MMX_REGNO_P (regno))
29022 /* We implement the move patterns for 3DNOW modes even in MMX mode,
29023 so if the register is available at all, then we can move data of
29024 the given mode into or out of it. */
29025 return (VALID_MMX_REG_MODE (mode)
29026 || VALID_MMX_REG_MODE_3DNOW (mode));
29029 if (mode == QImode)
29031 /* Take care for QImode values - they can be in non-QI regs,
29032 but then they do cause partial register stalls. */
29033 if (regno <= BX_REG || TARGET_64BIT)
29035 if (!TARGET_PARTIAL_REG_STALL)
29037 return reload_in_progress || reload_completed;
29039 /* We handle both integer and floats in the general purpose registers. */
29040 else if (VALID_INT_MODE_P (mode))
29042 else if (VALID_FP_MODE_P (mode))
29044 else if (VALID_DFP_MODE_P (mode))
29046 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
29047 on to use that value in smaller contexts, this can easily force a
29048 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
29049 supporting DImode, allow it. */
29050 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
29056 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
29057 tieable integer mode. */
29060 ix86_tieable_integer_mode_p (enum machine_mode mode)
29069 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
29072 return TARGET_64BIT;
29079 /* Return true if MODE1 is accessible in a register that can hold MODE2
29080 without copying. That is, all register classes that can hold MODE2
29081 can also hold MODE1. */
29084 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
29086 if (mode1 == mode2)
29089 if (ix86_tieable_integer_mode_p (mode1)
29090 && ix86_tieable_integer_mode_p (mode2))
29093 /* MODE2 being XFmode implies fp stack or general regs, which means we
29094 can tie any smaller floating point modes to it. Note that we do not
29095 tie this with TFmode. */
29096 if (mode2 == XFmode)
29097 return mode1 == SFmode || mode1 == DFmode;
29099 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
29100 that we can tie it with SFmode. */
29101 if (mode2 == DFmode)
29102 return mode1 == SFmode;
29104 /* If MODE2 is only appropriate for an SSE register, then tie with
29105 any other mode acceptable to SSE registers. */
29106 if (GET_MODE_SIZE (mode2) == 16
29107 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
29108 return (GET_MODE_SIZE (mode1) == 16
29109 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
29111 /* If MODE2 is appropriate for an MMX register, then tie
29112 with any other mode acceptable to MMX registers. */
29113 if (GET_MODE_SIZE (mode2) == 8
29114 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
29115 return (GET_MODE_SIZE (mode1) == 8
29116 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
29121 /* Compute a (partial) cost for rtx X. Return true if the complete
29122 cost has been computed, and false if subexpressions should be
29123 scanned. In either case, *TOTAL contains the cost result. */
29126 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
29128 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
29129 enum machine_mode mode = GET_MODE (x);
29130 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
29138 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
29140 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
29142 else if (flag_pic && SYMBOLIC_CONST (x)
29144 || (!GET_CODE (x) != LABEL_REF
29145 && (GET_CODE (x) != SYMBOL_REF
29146 || !SYMBOL_REF_LOCAL_P (x)))))
29153 if (mode == VOIDmode)
29156 switch (standard_80387_constant_p (x))
29161 default: /* Other constants */
29166 /* Start with (MEM (SYMBOL_REF)), since that's where
29167 it'll probably end up. Add a penalty for size. */
29168 *total = (COSTS_N_INSNS (1)
29169 + (flag_pic != 0 && !TARGET_64BIT)
29170 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
29176 /* The zero extensions is often completely free on x86_64, so make
29177 it as cheap as possible. */
29178 if (TARGET_64BIT && mode == DImode
29179 && GET_MODE (XEXP (x, 0)) == SImode)
29181 else if (TARGET_ZERO_EXTEND_WITH_AND)
29182 *total = cost->add;
29184 *total = cost->movzx;
29188 *total = cost->movsx;
29192 if (CONST_INT_P (XEXP (x, 1))
29193 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
29195 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
29198 *total = cost->add;
29201 if ((value == 2 || value == 3)
29202 && cost->lea <= cost->shift_const)
29204 *total = cost->lea;
29214 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
29216 if (CONST_INT_P (XEXP (x, 1)))
29218 if (INTVAL (XEXP (x, 1)) > 32)
29219 *total = cost->shift_const + COSTS_N_INSNS (2);
29221 *total = cost->shift_const * 2;
29225 if (GET_CODE (XEXP (x, 1)) == AND)
29226 *total = cost->shift_var * 2;
29228 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
29233 if (CONST_INT_P (XEXP (x, 1)))
29234 *total = cost->shift_const;
29236 *total = cost->shift_var;
29244 gcc_assert (FLOAT_MODE_P (mode));
29245 gcc_assert (TARGET_FMA || TARGET_FMA4);
29247 /* ??? SSE scalar/vector cost should be used here. */
29248 /* ??? Bald assumption that fma has the same cost as fmul. */
29249 *total = cost->fmul;
29250 *total += rtx_cost (XEXP (x, 1), FMA, speed);
29252 /* Negate in op0 or op2 is free: FMS, FNMA, FNMS. */
29254 if (GET_CODE (sub) == NEG)
29256 *total += rtx_cost (sub, FMA, speed);
29259 if (GET_CODE (sub) == NEG)
29261 *total += rtx_cost (sub, FMA, speed);
29266 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29268 /* ??? SSE scalar cost should be used here. */
29269 *total = cost->fmul;
29272 else if (X87_FLOAT_MODE_P (mode))
29274 *total = cost->fmul;
29277 else if (FLOAT_MODE_P (mode))
29279 /* ??? SSE vector cost should be used here. */
29280 *total = cost->fmul;
29285 rtx op0 = XEXP (x, 0);
29286 rtx op1 = XEXP (x, 1);
29288 if (CONST_INT_P (XEXP (x, 1)))
29290 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
29291 for (nbits = 0; value != 0; value &= value - 1)
29295 /* This is arbitrary. */
29298 /* Compute costs correctly for widening multiplication. */
29299 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
29300 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
29301 == GET_MODE_SIZE (mode))
29303 int is_mulwiden = 0;
29304 enum machine_mode inner_mode = GET_MODE (op0);
29306 if (GET_CODE (op0) == GET_CODE (op1))
29307 is_mulwiden = 1, op1 = XEXP (op1, 0);
29308 else if (CONST_INT_P (op1))
29310 if (GET_CODE (op0) == SIGN_EXTEND)
29311 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
29314 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
29318 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
29321 *total = (cost->mult_init[MODE_INDEX (mode)]
29322 + nbits * cost->mult_bit
29323 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
29332 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29333 /* ??? SSE cost should be used here. */
29334 *total = cost->fdiv;
29335 else if (X87_FLOAT_MODE_P (mode))
29336 *total = cost->fdiv;
29337 else if (FLOAT_MODE_P (mode))
29338 /* ??? SSE vector cost should be used here. */
29339 *total = cost->fdiv;
29341 *total = cost->divide[MODE_INDEX (mode)];
29345 if (GET_MODE_CLASS (mode) == MODE_INT
29346 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
29348 if (GET_CODE (XEXP (x, 0)) == PLUS
29349 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
29350 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
29351 && CONSTANT_P (XEXP (x, 1)))
29353 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
29354 if (val == 2 || val == 4 || val == 8)
29356 *total = cost->lea;
29357 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
29358 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
29359 outer_code, speed);
29360 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
29364 else if (GET_CODE (XEXP (x, 0)) == MULT
29365 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
29367 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
29368 if (val == 2 || val == 4 || val == 8)
29370 *total = cost->lea;
29371 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
29372 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
29376 else if (GET_CODE (XEXP (x, 0)) == PLUS)
29378 *total = cost->lea;
29379 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
29380 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
29381 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
29388 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29390 /* ??? SSE cost should be used here. */
29391 *total = cost->fadd;
29394 else if (X87_FLOAT_MODE_P (mode))
29396 *total = cost->fadd;
29399 else if (FLOAT_MODE_P (mode))
29401 /* ??? SSE vector cost should be used here. */
29402 *total = cost->fadd;
29410 if (!TARGET_64BIT && mode == DImode)
29412 *total = (cost->add * 2
29413 + (rtx_cost (XEXP (x, 0), outer_code, speed)
29414 << (GET_MODE (XEXP (x, 0)) != DImode))
29415 + (rtx_cost (XEXP (x, 1), outer_code, speed)
29416 << (GET_MODE (XEXP (x, 1)) != DImode)));
29422 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29424 /* ??? SSE cost should be used here. */
29425 *total = cost->fchs;
29428 else if (X87_FLOAT_MODE_P (mode))
29430 *total = cost->fchs;
29433 else if (FLOAT_MODE_P (mode))
29435 /* ??? SSE vector cost should be used here. */
29436 *total = cost->fchs;
29442 if (!TARGET_64BIT && mode == DImode)
29443 *total = cost->add * 2;
29445 *total = cost->add;
29449 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
29450 && XEXP (XEXP (x, 0), 1) == const1_rtx
29451 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
29452 && XEXP (x, 1) == const0_rtx)
29454 /* This kind of construct is implemented using test[bwl].
29455 Treat it as if we had an AND. */
29456 *total = (cost->add
29457 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
29458 + rtx_cost (const1_rtx, outer_code, speed));
29464 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
29469 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29470 /* ??? SSE cost should be used here. */
29471 *total = cost->fabs;
29472 else if (X87_FLOAT_MODE_P (mode))
29473 *total = cost->fabs;
29474 else if (FLOAT_MODE_P (mode))
29475 /* ??? SSE vector cost should be used here. */
29476 *total = cost->fabs;
29480 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29481 /* ??? SSE cost should be used here. */
29482 *total = cost->fsqrt;
29483 else if (X87_FLOAT_MODE_P (mode))
29484 *total = cost->fsqrt;
29485 else if (FLOAT_MODE_P (mode))
29486 /* ??? SSE vector cost should be used here. */
29487 *total = cost->fsqrt;
29491 if (XINT (x, 1) == UNSPEC_TP)
29498 case VEC_DUPLICATE:
29499 /* ??? Assume all of these vector manipulation patterns are
29500 recognizable. In which case they all pretty much have the
29502 *total = COSTS_N_INSNS (1);
29512 static int current_machopic_label_num;
29514 /* Given a symbol name and its associated stub, write out the
29515 definition of the stub. */
29518 machopic_output_stub (FILE *file, const char *symb, const char *stub)
29520 unsigned int length;
29521 char *binder_name, *symbol_name, lazy_ptr_name[32];
29522 int label = ++current_machopic_label_num;
29524 /* For 64-bit we shouldn't get here. */
29525 gcc_assert (!TARGET_64BIT);
29527 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
29528 symb = targetm.strip_name_encoding (symb);
29530 length = strlen (stub);
29531 binder_name = XALLOCAVEC (char, length + 32);
29532 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
29534 length = strlen (symb);
29535 symbol_name = XALLOCAVEC (char, length + 32);
29536 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
29538 sprintf (lazy_ptr_name, "L%d$lz", label);
29540 if (MACHOPIC_ATT_STUB)
29541 switch_to_section (darwin_sections[machopic_picsymbol_stub3_section]);
29542 else if (MACHOPIC_PURE)
29544 if (TARGET_DEEP_BRANCH_PREDICTION)
29545 switch_to_section (darwin_sections[machopic_picsymbol_stub2_section]);
29547 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
29550 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
29552 fprintf (file, "%s:\n", stub);
29553 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29555 if (MACHOPIC_ATT_STUB)
29557 fprintf (file, "\thlt ; hlt ; hlt ; hlt ; hlt\n");
29559 else if (MACHOPIC_PURE)
29562 if (TARGET_DEEP_BRANCH_PREDICTION)
29564 /* 25-byte PIC stub using "CALL get_pc_thunk". */
29565 rtx tmp = gen_rtx_REG (SImode, 2 /* ECX */);
29566 output_set_got (tmp, NULL_RTX); /* "CALL ___<cpu>.get_pc_thunk.cx". */
29567 fprintf (file, "LPC$%d:\tmovl\t%s-LPC$%d(%%ecx),%%ecx\n", label, lazy_ptr_name, label);
29571 /* 26-byte PIC stub using inline picbase: "CALL L42 ! L42: pop %eax". */
29572 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%ecx\n", label, label);
29573 fprintf (file, "\tmovl %s-LPC$%d(%%ecx),%%ecx\n", lazy_ptr_name, label);
29575 fprintf (file, "\tjmp\t*%%ecx\n");
29578 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
29580 /* The AT&T-style ("self-modifying") stub is not lazily bound, thus
29581 it needs no stub-binding-helper. */
29582 if (MACHOPIC_ATT_STUB)
29585 fprintf (file, "%s:\n", binder_name);
29589 fprintf (file, "\tlea\t%s-%s(%%ecx),%%ecx\n", lazy_ptr_name, binder_name);
29590 fprintf (file, "\tpushl\t%%ecx\n");
29593 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
29595 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
29597 /* N.B. Keep the correspondence of these
29598 'symbol_ptr/symbol_ptr2/symbol_ptr3' sections consistent with the
29599 old-pic/new-pic/non-pic stubs; altering this will break
29600 compatibility with existing dylibs. */
29604 if (TARGET_DEEP_BRANCH_PREDICTION)
29605 /* 25-byte PIC stub using "CALL get_pc_thunk". */
29606 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr2_section]);
29608 /* 26-byte PIC stub using inline picbase: "CALL L42 ! L42: pop %ebx". */
29609 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
29612 /* 16-byte -mdynamic-no-pic stub. */
29613 switch_to_section(darwin_sections[machopic_lazy_symbol_ptr3_section]);
29615 fprintf (file, "%s:\n", lazy_ptr_name);
29616 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29617 fprintf (file, ASM_LONG "%s\n", binder_name);
29619 #endif /* TARGET_MACHO */
29621 /* Order the registers for register allocator. */
29624 x86_order_regs_for_local_alloc (void)
29629 /* First allocate the local general purpose registers. */
29630 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
29631 if (GENERAL_REGNO_P (i) && call_used_regs[i])
29632 reg_alloc_order [pos++] = i;
29634 /* Global general purpose registers. */
29635 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
29636 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
29637 reg_alloc_order [pos++] = i;
29639 /* x87 registers come first in case we are doing FP math
29641 if (!TARGET_SSE_MATH)
29642 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
29643 reg_alloc_order [pos++] = i;
29645 /* SSE registers. */
29646 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
29647 reg_alloc_order [pos++] = i;
29648 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
29649 reg_alloc_order [pos++] = i;
29651 /* x87 registers. */
29652 if (TARGET_SSE_MATH)
29653 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
29654 reg_alloc_order [pos++] = i;
29656 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
29657 reg_alloc_order [pos++] = i;
29659 /* Initialize the rest of array as we do not allocate some registers
29661 while (pos < FIRST_PSEUDO_REGISTER)
29662 reg_alloc_order [pos++] = 0;
29665 /* Handle a "callee_pop_aggregate_return" attribute; arguments as
29666 in struct attribute_spec handler. */
29668 ix86_handle_callee_pop_aggregate_return (tree *node, tree name,
29670 int flags ATTRIBUTE_UNUSED,
29671 bool *no_add_attrs)
29673 if (TREE_CODE (*node) != FUNCTION_TYPE
29674 && TREE_CODE (*node) != METHOD_TYPE
29675 && TREE_CODE (*node) != FIELD_DECL
29676 && TREE_CODE (*node) != TYPE_DECL)
29678 warning (OPT_Wattributes, "%qE attribute only applies to functions",
29680 *no_add_attrs = true;
29685 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
29687 *no_add_attrs = true;
29690 if (is_attribute_p ("callee_pop_aggregate_return", name))
29694 cst = TREE_VALUE (args);
29695 if (TREE_CODE (cst) != INTEGER_CST)
29697 warning (OPT_Wattributes,
29698 "%qE attribute requires an integer constant argument",
29700 *no_add_attrs = true;
29702 else if (compare_tree_int (cst, 0) != 0
29703 && compare_tree_int (cst, 1) != 0)
29705 warning (OPT_Wattributes,
29706 "argument to %qE attribute is neither zero, nor one",
29708 *no_add_attrs = true;
29717 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
29718 struct attribute_spec.handler. */
29720 ix86_handle_abi_attribute (tree *node, tree name,
29721 tree args ATTRIBUTE_UNUSED,
29722 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29724 if (TREE_CODE (*node) != FUNCTION_TYPE
29725 && TREE_CODE (*node) != METHOD_TYPE
29726 && TREE_CODE (*node) != FIELD_DECL
29727 && TREE_CODE (*node) != TYPE_DECL)
29729 warning (OPT_Wattributes, "%qE attribute only applies to functions",
29731 *no_add_attrs = true;
29736 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
29738 *no_add_attrs = true;
29742 /* Can combine regparm with all attributes but fastcall. */
29743 if (is_attribute_p ("ms_abi", name))
29745 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
29747 error ("ms_abi and sysv_abi attributes are not compatible");
29752 else if (is_attribute_p ("sysv_abi", name))
29754 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
29756 error ("ms_abi and sysv_abi attributes are not compatible");
29765 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
29766 struct attribute_spec.handler. */
29768 ix86_handle_struct_attribute (tree *node, tree name,
29769 tree args ATTRIBUTE_UNUSED,
29770 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29773 if (DECL_P (*node))
29775 if (TREE_CODE (*node) == TYPE_DECL)
29776 type = &TREE_TYPE (*node);
29781 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
29782 || TREE_CODE (*type) == UNION_TYPE)))
29784 warning (OPT_Wattributes, "%qE attribute ignored",
29786 *no_add_attrs = true;
29789 else if ((is_attribute_p ("ms_struct", name)
29790 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
29791 || ((is_attribute_p ("gcc_struct", name)
29792 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
29794 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
29796 *no_add_attrs = true;
29803 ix86_handle_fndecl_attribute (tree *node, tree name,
29804 tree args ATTRIBUTE_UNUSED,
29805 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29807 if (TREE_CODE (*node) != FUNCTION_DECL)
29809 warning (OPT_Wattributes, "%qE attribute only applies to functions",
29811 *no_add_attrs = true;
29817 ix86_ms_bitfield_layout_p (const_tree record_type)
29819 return ((TARGET_MS_BITFIELD_LAYOUT
29820 && !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
29821 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type)));
29824 /* Returns an expression indicating where the this parameter is
29825 located on entry to the FUNCTION. */
29828 x86_this_parameter (tree function)
29830 tree type = TREE_TYPE (function);
29831 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
29836 const int *parm_regs;
29838 if (ix86_function_type_abi (type) == MS_ABI)
29839 parm_regs = x86_64_ms_abi_int_parameter_registers;
29841 parm_regs = x86_64_int_parameter_registers;
29842 return gen_rtx_REG (DImode, parm_regs[aggr]);
29845 nregs = ix86_function_regparm (type, function);
29847 if (nregs > 0 && !stdarg_p (type))
29851 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
29852 regno = aggr ? DX_REG : CX_REG;
29853 else if (ix86_is_type_thiscall (type))
29857 return gen_rtx_MEM (SImode,
29858 plus_constant (stack_pointer_rtx, 4));
29867 return gen_rtx_MEM (SImode,
29868 plus_constant (stack_pointer_rtx, 4));
29871 return gen_rtx_REG (SImode, regno);
29874 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
29877 /* Determine whether x86_output_mi_thunk can succeed. */
29880 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
29881 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
29882 HOST_WIDE_INT vcall_offset, const_tree function)
29884 /* 64-bit can handle anything. */
29888 /* For 32-bit, everything's fine if we have one free register. */
29889 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
29892 /* Need a free register for vcall_offset. */
29896 /* Need a free register for GOT references. */
29897 if (flag_pic && !targetm.binds_local_p (function))
29900 /* Otherwise ok. */
29904 /* Output the assembler code for a thunk function. THUNK_DECL is the
29905 declaration for the thunk function itself, FUNCTION is the decl for
29906 the target function. DELTA is an immediate constant offset to be
29907 added to THIS. If VCALL_OFFSET is nonzero, the word at
29908 *(*this + vcall_offset) should be added to THIS. */
29911 x86_output_mi_thunk (FILE *file,
29912 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
29913 HOST_WIDE_INT vcall_offset, tree function)
29916 rtx this_param = x86_this_parameter (function);
29919 /* Make sure unwind info is emitted for the thunk if needed. */
29920 final_start_function (emit_barrier (), file, 1);
29922 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
29923 pull it in now and let DELTA benefit. */
29924 if (REG_P (this_param))
29925 this_reg = this_param;
29926 else if (vcall_offset)
29928 /* Put the this parameter into %eax. */
29929 xops[0] = this_param;
29930 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
29931 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
29934 this_reg = NULL_RTX;
29936 /* Adjust the this parameter by a fixed constant. */
29939 xops[0] = GEN_INT (delta);
29940 xops[1] = this_reg ? this_reg : this_param;
29943 if (!x86_64_general_operand (xops[0], DImode))
29945 tmp = gen_rtx_REG (DImode, R10_REG);
29947 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
29949 xops[1] = this_param;
29951 if (x86_maybe_negate_const_int (&xops[0], DImode))
29952 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
29954 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
29956 else if (x86_maybe_negate_const_int (&xops[0], SImode))
29957 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
29959 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
29962 /* Adjust the this parameter by a value stored in the vtable. */
29966 tmp = gen_rtx_REG (DImode, R10_REG);
29969 int tmp_regno = CX_REG;
29970 if (lookup_attribute ("fastcall",
29971 TYPE_ATTRIBUTES (TREE_TYPE (function)))
29972 || ix86_is_type_thiscall (TREE_TYPE (function)))
29973 tmp_regno = AX_REG;
29974 tmp = gen_rtx_REG (SImode, tmp_regno);
29977 xops[0] = gen_rtx_MEM (Pmode, this_reg);
29979 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
29981 /* Adjust the this parameter. */
29982 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
29983 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
29985 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
29986 xops[0] = GEN_INT (vcall_offset);
29988 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
29989 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
29991 xops[1] = this_reg;
29992 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
29995 /* If necessary, drop THIS back to its stack slot. */
29996 if (this_reg && this_reg != this_param)
29998 xops[0] = this_reg;
29999 xops[1] = this_param;
30000 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
30003 xops[0] = XEXP (DECL_RTL (function), 0);
30006 if (!flag_pic || targetm.binds_local_p (function)
30007 || DEFAULT_ABI == MS_ABI)
30008 output_asm_insn ("jmp\t%P0", xops);
30009 /* All thunks should be in the same object as their target,
30010 and thus binds_local_p should be true. */
30011 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
30012 gcc_unreachable ();
30015 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
30016 tmp = gen_rtx_CONST (Pmode, tmp);
30017 tmp = gen_rtx_MEM (QImode, tmp);
30019 output_asm_insn ("jmp\t%A0", xops);
30024 if (!flag_pic || targetm.binds_local_p (function))
30025 output_asm_insn ("jmp\t%P0", xops);
30030 rtx sym_ref = XEXP (DECL_RTL (function), 0);
30031 if (TARGET_MACHO_BRANCH_ISLANDS)
30032 sym_ref = (gen_rtx_SYMBOL_REF
30034 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
30035 tmp = gen_rtx_MEM (QImode, sym_ref);
30037 output_asm_insn ("jmp\t%0", xops);
30040 #endif /* TARGET_MACHO */
30042 tmp = gen_rtx_REG (SImode, CX_REG);
30043 output_set_got (tmp, NULL_RTX);
30046 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
30047 output_asm_insn ("jmp\t{*}%1", xops);
30050 final_end_function ();
30054 x86_file_start (void)
30056 default_file_start ();
30058 darwin_file_start ();
30060 if (X86_FILE_START_VERSION_DIRECTIVE)
30061 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
30062 if (X86_FILE_START_FLTUSED)
30063 fputs ("\t.global\t__fltused\n", asm_out_file);
30064 if (ix86_asm_dialect == ASM_INTEL)
30065 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
30069 x86_field_alignment (tree field, int computed)
30071 enum machine_mode mode;
30072 tree type = TREE_TYPE (field);
30074 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
30076 mode = TYPE_MODE (strip_array_types (type));
30077 if (mode == DFmode || mode == DCmode
30078 || GET_MODE_CLASS (mode) == MODE_INT
30079 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
30080 return MIN (32, computed);
30084 /* Output assembler code to FILE to increment profiler label # LABELNO
30085 for profiling a function entry. */
30087 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
30089 const char *mcount_name = (flag_fentry ? MCOUNT_NAME_BEFORE_PROLOGUE
30094 #ifndef NO_PROFILE_COUNTERS
30095 fprintf (file, "\tleaq\t%sP%d(%%rip),%%r11\n", LPREFIX, labelno);
30098 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
30099 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", mcount_name);
30101 fprintf (file, "\tcall\t%s\n", mcount_name);
30105 #ifndef NO_PROFILE_COUNTERS
30106 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
30109 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", mcount_name);
30113 #ifndef NO_PROFILE_COUNTERS
30114 fprintf (file, "\tmovl\t$%sP%d,%%" PROFILE_COUNT_REGISTER "\n",
30117 fprintf (file, "\tcall\t%s\n", mcount_name);
30121 /* We don't have exact information about the insn sizes, but we may assume
30122 quite safely that we are informed about all 1 byte insns and memory
30123 address sizes. This is enough to eliminate unnecessary padding in
30127 min_insn_size (rtx insn)
30131 if (!INSN_P (insn) || !active_insn_p (insn))
30134 /* Discard alignments we've emit and jump instructions. */
30135 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
30136 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
30138 if (JUMP_TABLE_DATA_P (insn))
30141 /* Important case - calls are always 5 bytes.
30142 It is common to have many calls in the row. */
30144 && symbolic_reference_mentioned_p (PATTERN (insn))
30145 && !SIBLING_CALL_P (insn))
30147 len = get_attr_length (insn);
30151 /* For normal instructions we rely on get_attr_length being exact,
30152 with a few exceptions. */
30153 if (!JUMP_P (insn))
30155 enum attr_type type = get_attr_type (insn);
30160 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
30161 || asm_noperands (PATTERN (insn)) >= 0)
30168 /* Otherwise trust get_attr_length. */
30172 l = get_attr_length_address (insn);
30173 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
30182 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
30184 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
30188 ix86_avoid_jump_mispredicts (void)
30190 rtx insn, start = get_insns ();
30191 int nbytes = 0, njumps = 0;
30194 /* Look for all minimal intervals of instructions containing 4 jumps.
30195 The intervals are bounded by START and INSN. NBYTES is the total
30196 size of instructions in the interval including INSN and not including
30197 START. When the NBYTES is smaller than 16 bytes, it is possible
30198 that the end of START and INSN ends up in the same 16byte page.
30200 The smallest offset in the page INSN can start is the case where START
30201 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
30202 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
30204 for (insn = start; insn; insn = NEXT_INSN (insn))
30208 if (LABEL_P (insn))
30210 int align = label_to_alignment (insn);
30211 int max_skip = label_to_max_skip (insn);
30215 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
30216 already in the current 16 byte page, because otherwise
30217 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
30218 bytes to reach 16 byte boundary. */
30220 || (align <= 3 && max_skip != (1 << align) - 1))
30223 fprintf (dump_file, "Label %i with max_skip %i\n",
30224 INSN_UID (insn), max_skip);
30227 while (nbytes + max_skip >= 16)
30229 start = NEXT_INSN (start);
30230 if ((JUMP_P (start)
30231 && GET_CODE (PATTERN (start)) != ADDR_VEC
30232 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
30234 njumps--, isjump = 1;
30237 nbytes -= min_insn_size (start);
30243 min_size = min_insn_size (insn);
30244 nbytes += min_size;
30246 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
30247 INSN_UID (insn), min_size);
30249 && GET_CODE (PATTERN (insn)) != ADDR_VEC
30250 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
30258 start = NEXT_INSN (start);
30259 if ((JUMP_P (start)
30260 && GET_CODE (PATTERN (start)) != ADDR_VEC
30261 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
30263 njumps--, isjump = 1;
30266 nbytes -= min_insn_size (start);
30268 gcc_assert (njumps >= 0);
30270 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
30271 INSN_UID (start), INSN_UID (insn), nbytes);
30273 if (njumps == 3 && isjump && nbytes < 16)
30275 int padsize = 15 - nbytes + min_insn_size (insn);
30278 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
30279 INSN_UID (insn), padsize);
30280 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
30286 /* AMD Athlon works faster
30287 when RET is not destination of conditional jump or directly preceded
30288 by other jump instruction. We avoid the penalty by inserting NOP just
30289 before the RET instructions in such cases. */
30291 ix86_pad_returns (void)
30296 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
30298 basic_block bb = e->src;
30299 rtx ret = BB_END (bb);
30301 bool replace = false;
30303 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
30304 || optimize_bb_for_size_p (bb))
30306 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
30307 if (active_insn_p (prev) || LABEL_P (prev))
30309 if (prev && LABEL_P (prev))
30314 FOR_EACH_EDGE (e, ei, bb->preds)
30315 if (EDGE_FREQUENCY (e) && e->src->index >= 0
30316 && !(e->flags & EDGE_FALLTHRU))
30321 prev = prev_active_insn (ret);
30323 && ((JUMP_P (prev) && any_condjump_p (prev))
30326 /* Empty functions get branch mispredict even when
30327 the jump destination is not visible to us. */
30328 if (!prev && !optimize_function_for_size_p (cfun))
30333 emit_jump_insn_before (gen_return_internal_long (), ret);
30339 /* Count the minimum number of instructions in BB. Return 4 if the
30340 number of instructions >= 4. */
30343 ix86_count_insn_bb (basic_block bb)
30346 int insn_count = 0;
30348 /* Count number of instructions in this block. Return 4 if the number
30349 of instructions >= 4. */
30350 FOR_BB_INSNS (bb, insn)
30352 /* Only happen in exit blocks. */
30354 && GET_CODE (PATTERN (insn)) == RETURN)
30357 if (NONDEBUG_INSN_P (insn)
30358 && GET_CODE (PATTERN (insn)) != USE
30359 && GET_CODE (PATTERN (insn)) != CLOBBER)
30362 if (insn_count >= 4)
30371 /* Count the minimum number of instructions in code path in BB.
30372 Return 4 if the number of instructions >= 4. */
30375 ix86_count_insn (basic_block bb)
30379 int min_prev_count;
30381 /* Only bother counting instructions along paths with no
30382 more than 2 basic blocks between entry and exit. Given
30383 that BB has an edge to exit, determine if a predecessor
30384 of BB has an edge from entry. If so, compute the number
30385 of instructions in the predecessor block. If there
30386 happen to be multiple such blocks, compute the minimum. */
30387 min_prev_count = 4;
30388 FOR_EACH_EDGE (e, ei, bb->preds)
30391 edge_iterator prev_ei;
30393 if (e->src == ENTRY_BLOCK_PTR)
30395 min_prev_count = 0;
30398 FOR_EACH_EDGE (prev_e, prev_ei, e->src->preds)
30400 if (prev_e->src == ENTRY_BLOCK_PTR)
30402 int count = ix86_count_insn_bb (e->src);
30403 if (count < min_prev_count)
30404 min_prev_count = count;
30410 if (min_prev_count < 4)
30411 min_prev_count += ix86_count_insn_bb (bb);
30413 return min_prev_count;
30416 /* Pad short funtion to 4 instructions. */
30419 ix86_pad_short_function (void)
30424 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
30426 rtx ret = BB_END (e->src);
30427 if (JUMP_P (ret) && GET_CODE (PATTERN (ret)) == RETURN)
30429 int insn_count = ix86_count_insn (e->src);
30431 /* Pad short function. */
30432 if (insn_count < 4)
30436 /* Find epilogue. */
30439 || NOTE_KIND (insn) != NOTE_INSN_EPILOGUE_BEG))
30440 insn = PREV_INSN (insn);
30445 /* Two NOPs count as one instruction. */
30446 insn_count = 2 * (4 - insn_count);
30447 emit_insn_before (gen_nops (GEN_INT (insn_count)), insn);
30453 /* Implement machine specific optimizations. We implement padding of returns
30454 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
30458 /* We are freeing block_for_insn in the toplev to keep compatibility
30459 with old MDEP_REORGS that are not CFG based. Recompute it now. */
30460 compute_bb_for_insn ();
30462 if (optimize && optimize_function_for_speed_p (cfun))
30464 if (TARGET_PAD_SHORT_FUNCTION)
30465 ix86_pad_short_function ();
30466 else if (TARGET_PAD_RETURNS)
30467 ix86_pad_returns ();
30468 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
30469 if (TARGET_FOUR_JUMP_LIMIT)
30470 ix86_avoid_jump_mispredicts ();
30474 /* Run the vzeroupper optimization if needed. */
30475 if (TARGET_VZEROUPPER)
30476 move_or_delete_vzeroupper ();
30479 /* Return nonzero when QImode register that must be represented via REX prefix
30482 x86_extended_QIreg_mentioned_p (rtx insn)
30485 extract_insn_cached (insn);
30486 for (i = 0; i < recog_data.n_operands; i++)
30487 if (REG_P (recog_data.operand[i])
30488 && REGNO (recog_data.operand[i]) > BX_REG)
30493 /* Return nonzero when P points to register encoded via REX prefix.
30494 Called via for_each_rtx. */
30496 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
30498 unsigned int regno;
30501 regno = REGNO (*p);
30502 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
30505 /* Return true when INSN mentions register that must be encoded using REX
30508 x86_extended_reg_mentioned_p (rtx insn)
30510 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
30511 extended_reg_mentioned_1, NULL);
30514 /* If profitable, negate (without causing overflow) integer constant
30515 of mode MODE at location LOC. Return true in this case. */
30517 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
30521 if (!CONST_INT_P (*loc))
30527 /* DImode x86_64 constants must fit in 32 bits. */
30528 gcc_assert (x86_64_immediate_operand (*loc, mode));
30539 gcc_unreachable ();
30542 /* Avoid overflows. */
30543 if (mode_signbit_p (mode, *loc))
30546 val = INTVAL (*loc);
30548 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
30549 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
30550 if ((val < 0 && val != -128)
30553 *loc = GEN_INT (-val);
30560 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
30561 optabs would emit if we didn't have TFmode patterns. */
30564 x86_emit_floatuns (rtx operands[2])
30566 rtx neglab, donelab, i0, i1, f0, in, out;
30567 enum machine_mode mode, inmode;
30569 inmode = GET_MODE (operands[1]);
30570 gcc_assert (inmode == SImode || inmode == DImode);
30573 in = force_reg (inmode, operands[1]);
30574 mode = GET_MODE (out);
30575 neglab = gen_label_rtx ();
30576 donelab = gen_label_rtx ();
30577 f0 = gen_reg_rtx (mode);
30579 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
30581 expand_float (out, in, 0);
30583 emit_jump_insn (gen_jump (donelab));
30586 emit_label (neglab);
30588 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
30590 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
30592 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
30594 expand_float (f0, i0, 0);
30596 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
30598 emit_label (donelab);
30601 /* AVX does not support 32-byte integer vector operations,
30602 thus the longest vector we are faced with is V16QImode. */
30603 #define MAX_VECT_LEN 16
30605 struct expand_vec_perm_d
30607 rtx target, op0, op1;
30608 unsigned char perm[MAX_VECT_LEN];
30609 enum machine_mode vmode;
30610 unsigned char nelt;
30614 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
30615 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
30617 /* Get a vector mode of the same size as the original but with elements
30618 twice as wide. This is only guaranteed to apply to integral vectors. */
30620 static inline enum machine_mode
30621 get_mode_wider_vector (enum machine_mode o)
30623 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
30624 enum machine_mode n = GET_MODE_WIDER_MODE (o);
30625 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
30626 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
30630 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
30631 with all elements equal to VAR. Return true if successful. */
30634 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
30635 rtx target, rtx val)
30658 /* First attempt to recognize VAL as-is. */
30659 dup = gen_rtx_VEC_DUPLICATE (mode, val);
30660 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
30661 if (recog_memoized (insn) < 0)
30664 /* If that fails, force VAL into a register. */
30667 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
30668 seq = get_insns ();
30671 emit_insn_before (seq, insn);
30673 ok = recog_memoized (insn) >= 0;
30682 if (TARGET_SSE || TARGET_3DNOW_A)
30686 val = gen_lowpart (SImode, val);
30687 x = gen_rtx_TRUNCATE (HImode, val);
30688 x = gen_rtx_VEC_DUPLICATE (mode, x);
30689 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30702 struct expand_vec_perm_d dperm;
30706 memset (&dperm, 0, sizeof (dperm));
30707 dperm.target = target;
30708 dperm.vmode = mode;
30709 dperm.nelt = GET_MODE_NUNITS (mode);
30710 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
30712 /* Extend to SImode using a paradoxical SUBREG. */
30713 tmp1 = gen_reg_rtx (SImode);
30714 emit_move_insn (tmp1, gen_lowpart (SImode, val));
30716 /* Insert the SImode value as low element of a V4SImode vector. */
30717 tmp2 = gen_lowpart (V4SImode, dperm.op0);
30718 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
30720 ok = (expand_vec_perm_1 (&dperm)
30721 || expand_vec_perm_broadcast_1 (&dperm));
30733 /* Replicate the value once into the next wider mode and recurse. */
30735 enum machine_mode smode, wsmode, wvmode;
30738 smode = GET_MODE_INNER (mode);
30739 wvmode = get_mode_wider_vector (mode);
30740 wsmode = GET_MODE_INNER (wvmode);
30742 val = convert_modes (wsmode, smode, val, true);
30743 x = expand_simple_binop (wsmode, ASHIFT, val,
30744 GEN_INT (GET_MODE_BITSIZE (smode)),
30745 NULL_RTX, 1, OPTAB_LIB_WIDEN);
30746 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
30748 x = gen_lowpart (wvmode, target);
30749 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
30757 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
30758 rtx x = gen_reg_rtx (hvmode);
30760 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
30763 x = gen_rtx_VEC_CONCAT (mode, x, x);
30764 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30773 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
30774 whose ONE_VAR element is VAR, and other elements are zero. Return true
30778 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
30779 rtx target, rtx var, int one_var)
30781 enum machine_mode vsimode;
30784 bool use_vector_set = false;
30789 /* For SSE4.1, we normally use vector set. But if the second
30790 element is zero and inter-unit moves are OK, we use movq
30792 use_vector_set = (TARGET_64BIT
30794 && !(TARGET_INTER_UNIT_MOVES
30800 use_vector_set = TARGET_SSE4_1;
30803 use_vector_set = TARGET_SSE2;
30806 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
30813 use_vector_set = TARGET_AVX;
30816 /* Use ix86_expand_vector_set in 64bit mode only. */
30817 use_vector_set = TARGET_AVX && TARGET_64BIT;
30823 if (use_vector_set)
30825 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
30826 var = force_reg (GET_MODE_INNER (mode), var);
30827 ix86_expand_vector_set (mmx_ok, target, var, one_var);
30843 var = force_reg (GET_MODE_INNER (mode), var);
30844 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
30845 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30850 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
30851 new_target = gen_reg_rtx (mode);
30853 new_target = target;
30854 var = force_reg (GET_MODE_INNER (mode), var);
30855 x = gen_rtx_VEC_DUPLICATE (mode, var);
30856 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
30857 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
30860 /* We need to shuffle the value to the correct position, so
30861 create a new pseudo to store the intermediate result. */
30863 /* With SSE2, we can use the integer shuffle insns. */
30864 if (mode != V4SFmode && TARGET_SSE2)
30866 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
30868 GEN_INT (one_var == 1 ? 0 : 1),
30869 GEN_INT (one_var == 2 ? 0 : 1),
30870 GEN_INT (one_var == 3 ? 0 : 1)));
30871 if (target != new_target)
30872 emit_move_insn (target, new_target);
30876 /* Otherwise convert the intermediate result to V4SFmode and
30877 use the SSE1 shuffle instructions. */
30878 if (mode != V4SFmode)
30880 tmp = gen_reg_rtx (V4SFmode);
30881 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
30886 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
30888 GEN_INT (one_var == 1 ? 0 : 1),
30889 GEN_INT (one_var == 2 ? 0+4 : 1+4),
30890 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
30892 if (mode != V4SFmode)
30893 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
30894 else if (tmp != target)
30895 emit_move_insn (target, tmp);
30897 else if (target != new_target)
30898 emit_move_insn (target, new_target);
30903 vsimode = V4SImode;
30909 vsimode = V2SImode;
30915 /* Zero extend the variable element to SImode and recurse. */
30916 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
30918 x = gen_reg_rtx (vsimode);
30919 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
30921 gcc_unreachable ();
30923 emit_move_insn (target, gen_lowpart (mode, x));
30931 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
30932 consisting of the values in VALS. It is known that all elements
30933 except ONE_VAR are constants. Return true if successful. */
30936 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
30937 rtx target, rtx vals, int one_var)
30939 rtx var = XVECEXP (vals, 0, one_var);
30940 enum machine_mode wmode;
30943 const_vec = copy_rtx (vals);
30944 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
30945 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
30953 /* For the two element vectors, it's just as easy to use
30954 the general case. */
30958 /* Use ix86_expand_vector_set in 64bit mode only. */
30981 /* There's no way to set one QImode entry easily. Combine
30982 the variable value with its adjacent constant value, and
30983 promote to an HImode set. */
30984 x = XVECEXP (vals, 0, one_var ^ 1);
30987 var = convert_modes (HImode, QImode, var, true);
30988 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
30989 NULL_RTX, 1, OPTAB_LIB_WIDEN);
30990 x = GEN_INT (INTVAL (x) & 0xff);
30994 var = convert_modes (HImode, QImode, var, true);
30995 x = gen_int_mode (INTVAL (x) << 8, HImode);
30997 if (x != const0_rtx)
30998 var = expand_simple_binop (HImode, IOR, var, x, var,
30999 1, OPTAB_LIB_WIDEN);
31001 x = gen_reg_rtx (wmode);
31002 emit_move_insn (x, gen_lowpart (wmode, const_vec));
31003 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
31005 emit_move_insn (target, gen_lowpart (mode, x));
31012 emit_move_insn (target, const_vec);
31013 ix86_expand_vector_set (mmx_ok, target, var, one_var);
31017 /* A subroutine of ix86_expand_vector_init_general. Use vector
31018 concatenate to handle the most general case: all values variable,
31019 and none identical. */
31022 ix86_expand_vector_init_concat (enum machine_mode mode,
31023 rtx target, rtx *ops, int n)
31025 enum machine_mode cmode, hmode = VOIDmode;
31026 rtx first[8], second[4];
31066 gcc_unreachable ();
31069 if (!register_operand (ops[1], cmode))
31070 ops[1] = force_reg (cmode, ops[1]);
31071 if (!register_operand (ops[0], cmode))
31072 ops[0] = force_reg (cmode, ops[0]);
31073 emit_insn (gen_rtx_SET (VOIDmode, target,
31074 gen_rtx_VEC_CONCAT (mode, ops[0],
31094 gcc_unreachable ();
31110 gcc_unreachable ();
31115 /* FIXME: We process inputs backward to help RA. PR 36222. */
31118 for (; i > 0; i -= 2, j--)
31120 first[j] = gen_reg_rtx (cmode);
31121 v = gen_rtvec (2, ops[i - 1], ops[i]);
31122 ix86_expand_vector_init (false, first[j],
31123 gen_rtx_PARALLEL (cmode, v));
31129 gcc_assert (hmode != VOIDmode);
31130 for (i = j = 0; i < n; i += 2, j++)
31132 second[j] = gen_reg_rtx (hmode);
31133 ix86_expand_vector_init_concat (hmode, second [j],
31137 ix86_expand_vector_init_concat (mode, target, second, n);
31140 ix86_expand_vector_init_concat (mode, target, first, n);
31144 gcc_unreachable ();
31148 /* A subroutine of ix86_expand_vector_init_general. Use vector
31149 interleave to handle the most general case: all values variable,
31150 and none identical. */
31153 ix86_expand_vector_init_interleave (enum machine_mode mode,
31154 rtx target, rtx *ops, int n)
31156 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
31159 rtx (*gen_load_even) (rtx, rtx, rtx);
31160 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
31161 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
31166 gen_load_even = gen_vec_setv8hi;
31167 gen_interleave_first_low = gen_vec_interleave_lowv4si;
31168 gen_interleave_second_low = gen_vec_interleave_lowv2di;
31169 inner_mode = HImode;
31170 first_imode = V4SImode;
31171 second_imode = V2DImode;
31172 third_imode = VOIDmode;
31175 gen_load_even = gen_vec_setv16qi;
31176 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
31177 gen_interleave_second_low = gen_vec_interleave_lowv4si;
31178 inner_mode = QImode;
31179 first_imode = V8HImode;
31180 second_imode = V4SImode;
31181 third_imode = V2DImode;
31184 gcc_unreachable ();
31187 for (i = 0; i < n; i++)
31189 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
31190 op0 = gen_reg_rtx (SImode);
31191 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
31193 /* Insert the SImode value as low element of V4SImode vector. */
31194 op1 = gen_reg_rtx (V4SImode);
31195 op0 = gen_rtx_VEC_MERGE (V4SImode,
31196 gen_rtx_VEC_DUPLICATE (V4SImode,
31198 CONST0_RTX (V4SImode),
31200 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
31202 /* Cast the V4SImode vector back to a vector in orignal mode. */
31203 op0 = gen_reg_rtx (mode);
31204 emit_move_insn (op0, gen_lowpart (mode, op1));
31206 /* Load even elements into the second positon. */
31207 emit_insn (gen_load_even (op0,
31208 force_reg (inner_mode,
31212 /* Cast vector to FIRST_IMODE vector. */
31213 ops[i] = gen_reg_rtx (first_imode);
31214 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
31217 /* Interleave low FIRST_IMODE vectors. */
31218 for (i = j = 0; i < n; i += 2, j++)
31220 op0 = gen_reg_rtx (first_imode);
31221 emit_insn (gen_interleave_first_low (op0, ops[i], ops[i + 1]));
31223 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
31224 ops[j] = gen_reg_rtx (second_imode);
31225 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
31228 /* Interleave low SECOND_IMODE vectors. */
31229 switch (second_imode)
31232 for (i = j = 0; i < n / 2; i += 2, j++)
31234 op0 = gen_reg_rtx (second_imode);
31235 emit_insn (gen_interleave_second_low (op0, ops[i],
31238 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
31240 ops[j] = gen_reg_rtx (third_imode);
31241 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
31243 second_imode = V2DImode;
31244 gen_interleave_second_low = gen_vec_interleave_lowv2di;
31248 op0 = gen_reg_rtx (second_imode);
31249 emit_insn (gen_interleave_second_low (op0, ops[0],
31252 /* Cast the SECOND_IMODE vector back to a vector on original
31254 emit_insn (gen_rtx_SET (VOIDmode, target,
31255 gen_lowpart (mode, op0)));
31259 gcc_unreachable ();
31263 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
31264 all values variable, and none identical. */
31267 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
31268 rtx target, rtx vals)
31270 rtx ops[32], op0, op1;
31271 enum machine_mode half_mode = VOIDmode;
31278 if (!mmx_ok && !TARGET_SSE)
31290 n = GET_MODE_NUNITS (mode);
31291 for (i = 0; i < n; i++)
31292 ops[i] = XVECEXP (vals, 0, i);
31293 ix86_expand_vector_init_concat (mode, target, ops, n);
31297 half_mode = V16QImode;
31301 half_mode = V8HImode;
31305 n = GET_MODE_NUNITS (mode);
31306 for (i = 0; i < n; i++)
31307 ops[i] = XVECEXP (vals, 0, i);
31308 op0 = gen_reg_rtx (half_mode);
31309 op1 = gen_reg_rtx (half_mode);
31310 ix86_expand_vector_init_interleave (half_mode, op0, ops,
31312 ix86_expand_vector_init_interleave (half_mode, op1,
31313 &ops [n >> 1], n >> 2);
31314 emit_insn (gen_rtx_SET (VOIDmode, target,
31315 gen_rtx_VEC_CONCAT (mode, op0, op1)));
31319 if (!TARGET_SSE4_1)
31327 /* Don't use ix86_expand_vector_init_interleave if we can't
31328 move from GPR to SSE register directly. */
31329 if (!TARGET_INTER_UNIT_MOVES)
31332 n = GET_MODE_NUNITS (mode);
31333 for (i = 0; i < n; i++)
31334 ops[i] = XVECEXP (vals, 0, i);
31335 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
31343 gcc_unreachable ();
31347 int i, j, n_elts, n_words, n_elt_per_word;
31348 enum machine_mode inner_mode;
31349 rtx words[4], shift;
31351 inner_mode = GET_MODE_INNER (mode);
31352 n_elts = GET_MODE_NUNITS (mode);
31353 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
31354 n_elt_per_word = n_elts / n_words;
31355 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
31357 for (i = 0; i < n_words; ++i)
31359 rtx word = NULL_RTX;
31361 for (j = 0; j < n_elt_per_word; ++j)
31363 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
31364 elt = convert_modes (word_mode, inner_mode, elt, true);
31370 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
31371 word, 1, OPTAB_LIB_WIDEN);
31372 word = expand_simple_binop (word_mode, IOR, word, elt,
31373 word, 1, OPTAB_LIB_WIDEN);
31381 emit_move_insn (target, gen_lowpart (mode, words[0]));
31382 else if (n_words == 2)
31384 rtx tmp = gen_reg_rtx (mode);
31385 emit_clobber (tmp);
31386 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
31387 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
31388 emit_move_insn (target, tmp);
31390 else if (n_words == 4)
31392 rtx tmp = gen_reg_rtx (V4SImode);
31393 gcc_assert (word_mode == SImode);
31394 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
31395 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
31396 emit_move_insn (target, gen_lowpart (mode, tmp));
31399 gcc_unreachable ();
31403 /* Initialize vector TARGET via VALS. Suppress the use of MMX
31404 instructions unless MMX_OK is true. */
31407 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
31409 enum machine_mode mode = GET_MODE (target);
31410 enum machine_mode inner_mode = GET_MODE_INNER (mode);
31411 int n_elts = GET_MODE_NUNITS (mode);
31412 int n_var = 0, one_var = -1;
31413 bool all_same = true, all_const_zero = true;
31417 for (i = 0; i < n_elts; ++i)
31419 x = XVECEXP (vals, 0, i);
31420 if (!(CONST_INT_P (x)
31421 || GET_CODE (x) == CONST_DOUBLE
31422 || GET_CODE (x) == CONST_FIXED))
31423 n_var++, one_var = i;
31424 else if (x != CONST0_RTX (inner_mode))
31425 all_const_zero = false;
31426 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
31430 /* Constants are best loaded from the constant pool. */
31433 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
31437 /* If all values are identical, broadcast the value. */
31439 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
31440 XVECEXP (vals, 0, 0)))
31443 /* Values where only one field is non-constant are best loaded from
31444 the pool and overwritten via move later. */
31448 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
31449 XVECEXP (vals, 0, one_var),
31453 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
31457 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
31461 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
31463 enum machine_mode mode = GET_MODE (target);
31464 enum machine_mode inner_mode = GET_MODE_INNER (mode);
31465 enum machine_mode half_mode;
31466 bool use_vec_merge = false;
31468 static rtx (*gen_extract[6][2]) (rtx, rtx)
31470 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
31471 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
31472 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
31473 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
31474 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
31475 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
31477 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
31479 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
31480 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
31481 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
31482 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
31483 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
31484 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
31494 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
31495 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
31497 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
31499 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
31500 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31506 use_vec_merge = TARGET_SSE4_1;
31514 /* For the two element vectors, we implement a VEC_CONCAT with
31515 the extraction of the other element. */
31517 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
31518 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
31521 op0 = val, op1 = tmp;
31523 op0 = tmp, op1 = val;
31525 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
31526 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31531 use_vec_merge = TARGET_SSE4_1;
31538 use_vec_merge = true;
31542 /* tmp = target = A B C D */
31543 tmp = copy_to_reg (target);
31544 /* target = A A B B */
31545 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
31546 /* target = X A B B */
31547 ix86_expand_vector_set (false, target, val, 0);
31548 /* target = A X C D */
31549 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
31550 const1_rtx, const0_rtx,
31551 GEN_INT (2+4), GEN_INT (3+4)));
31555 /* tmp = target = A B C D */
31556 tmp = copy_to_reg (target);
31557 /* tmp = X B C D */
31558 ix86_expand_vector_set (false, tmp, val, 0);
31559 /* target = A B X D */
31560 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
31561 const0_rtx, const1_rtx,
31562 GEN_INT (0+4), GEN_INT (3+4)));
31566 /* tmp = target = A B C D */
31567 tmp = copy_to_reg (target);
31568 /* tmp = X B C D */
31569 ix86_expand_vector_set (false, tmp, val, 0);
31570 /* target = A B X D */
31571 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
31572 const0_rtx, const1_rtx,
31573 GEN_INT (2+4), GEN_INT (0+4)));
31577 gcc_unreachable ();
31582 use_vec_merge = TARGET_SSE4_1;
31586 /* Element 0 handled by vec_merge below. */
31589 use_vec_merge = true;
31595 /* With SSE2, use integer shuffles to swap element 0 and ELT,
31596 store into element 0, then shuffle them back. */
31600 order[0] = GEN_INT (elt);
31601 order[1] = const1_rtx;
31602 order[2] = const2_rtx;
31603 order[3] = GEN_INT (3);
31604 order[elt] = const0_rtx;
31606 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
31607 order[1], order[2], order[3]));
31609 ix86_expand_vector_set (false, target, val, 0);
31611 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
31612 order[1], order[2], order[3]));
31616 /* For SSE1, we have to reuse the V4SF code. */
31617 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
31618 gen_lowpart (SFmode, val), elt);
31623 use_vec_merge = TARGET_SSE2;
31626 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
31630 use_vec_merge = TARGET_SSE4_1;
31637 half_mode = V16QImode;
31643 half_mode = V8HImode;
31649 half_mode = V4SImode;
31655 half_mode = V2DImode;
31661 half_mode = V4SFmode;
31667 half_mode = V2DFmode;
31673 /* Compute offset. */
31677 gcc_assert (i <= 1);
31679 /* Extract the half. */
31680 tmp = gen_reg_rtx (half_mode);
31681 emit_insn (gen_extract[j][i] (tmp, target));
31683 /* Put val in tmp at elt. */
31684 ix86_expand_vector_set (false, tmp, val, elt);
31687 emit_insn (gen_insert[j][i] (target, target, tmp));
31696 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
31697 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
31698 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31702 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
31704 emit_move_insn (mem, target);
31706 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
31707 emit_move_insn (tmp, val);
31709 emit_move_insn (target, mem);
31714 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
31716 enum machine_mode mode = GET_MODE (vec);
31717 enum machine_mode inner_mode = GET_MODE_INNER (mode);
31718 bool use_vec_extr = false;
31731 use_vec_extr = true;
31735 use_vec_extr = TARGET_SSE4_1;
31747 tmp = gen_reg_rtx (mode);
31748 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
31749 GEN_INT (elt), GEN_INT (elt),
31750 GEN_INT (elt+4), GEN_INT (elt+4)));
31754 tmp = gen_reg_rtx (mode);
31755 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
31759 gcc_unreachable ();
31762 use_vec_extr = true;
31767 use_vec_extr = TARGET_SSE4_1;
31781 tmp = gen_reg_rtx (mode);
31782 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
31783 GEN_INT (elt), GEN_INT (elt),
31784 GEN_INT (elt), GEN_INT (elt)));
31788 tmp = gen_reg_rtx (mode);
31789 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
31793 gcc_unreachable ();
31796 use_vec_extr = true;
31801 /* For SSE1, we have to reuse the V4SF code. */
31802 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
31803 gen_lowpart (V4SFmode, vec), elt);
31809 use_vec_extr = TARGET_SSE2;
31812 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
31816 use_vec_extr = TARGET_SSE4_1;
31820 /* ??? Could extract the appropriate HImode element and shift. */
31827 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
31828 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
31830 /* Let the rtl optimizers know about the zero extension performed. */
31831 if (inner_mode == QImode || inner_mode == HImode)
31833 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
31834 target = gen_lowpart (SImode, target);
31837 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31841 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
31843 emit_move_insn (mem, vec);
31845 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
31846 emit_move_insn (target, tmp);
31850 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
31851 pattern to reduce; DEST is the destination; IN is the input vector. */
31854 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
31856 rtx tmp1, tmp2, tmp3;
31858 tmp1 = gen_reg_rtx (V4SFmode);
31859 tmp2 = gen_reg_rtx (V4SFmode);
31860 tmp3 = gen_reg_rtx (V4SFmode);
31862 emit_insn (gen_sse_movhlps (tmp1, in, in));
31863 emit_insn (fn (tmp2, tmp1, in));
31865 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
31866 const1_rtx, const1_rtx,
31867 GEN_INT (1+4), GEN_INT (1+4)));
31868 emit_insn (fn (dest, tmp2, tmp3));
31871 /* Target hook for scalar_mode_supported_p. */
31873 ix86_scalar_mode_supported_p (enum machine_mode mode)
31875 if (DECIMAL_FLOAT_MODE_P (mode))
31876 return default_decimal_float_supported_p ();
31877 else if (mode == TFmode)
31880 return default_scalar_mode_supported_p (mode);
31883 /* Implements target hook vector_mode_supported_p. */
31885 ix86_vector_mode_supported_p (enum machine_mode mode)
31887 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
31889 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
31891 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
31893 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
31895 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
31900 /* Target hook for c_mode_for_suffix. */
31901 static enum machine_mode
31902 ix86_c_mode_for_suffix (char suffix)
31912 /* Worker function for TARGET_MD_ASM_CLOBBERS.
31914 We do this in the new i386 backend to maintain source compatibility
31915 with the old cc0-based compiler. */
31918 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
31919 tree inputs ATTRIBUTE_UNUSED,
31922 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
31924 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
31929 /* Implements target vector targetm.asm.encode_section_info. This
31930 is not used by netware. */
31932 static void ATTRIBUTE_UNUSED
31933 ix86_encode_section_info (tree decl, rtx rtl, int first)
31935 default_encode_section_info (decl, rtl, first);
31937 if (TREE_CODE (decl) == VAR_DECL
31938 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
31939 && ix86_in_large_data_p (decl))
31940 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
31943 /* Worker function for REVERSE_CONDITION. */
31946 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
31948 return (mode != CCFPmode && mode != CCFPUmode
31949 ? reverse_condition (code)
31950 : reverse_condition_maybe_unordered (code));
31953 /* Output code to perform an x87 FP register move, from OPERANDS[1]
31957 output_387_reg_move (rtx insn, rtx *operands)
31959 if (REG_P (operands[0]))
31961 if (REG_P (operands[1])
31962 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
31964 if (REGNO (operands[0]) == FIRST_STACK_REG)
31965 return output_387_ffreep (operands, 0);
31966 return "fstp\t%y0";
31968 if (STACK_TOP_P (operands[0]))
31969 return "fld%Z1\t%y1";
31972 else if (MEM_P (operands[0]))
31974 gcc_assert (REG_P (operands[1]));
31975 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
31976 return "fstp%Z0\t%y0";
31979 /* There is no non-popping store to memory for XFmode.
31980 So if we need one, follow the store with a load. */
31981 if (GET_MODE (operands[0]) == XFmode)
31982 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
31984 return "fst%Z0\t%y0";
31991 /* Output code to perform a conditional jump to LABEL, if C2 flag in
31992 FP status register is set. */
31995 ix86_emit_fp_unordered_jump (rtx label)
31997 rtx reg = gen_reg_rtx (HImode);
32000 emit_insn (gen_x86_fnstsw_1 (reg));
32002 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
32004 emit_insn (gen_x86_sahf_1 (reg));
32006 temp = gen_rtx_REG (CCmode, FLAGS_REG);
32007 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
32011 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
32013 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
32014 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
32017 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
32018 gen_rtx_LABEL_REF (VOIDmode, label),
32020 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
32022 emit_jump_insn (temp);
32023 predict_jump (REG_BR_PROB_BASE * 10 / 100);
32026 /* Output code to perform a log1p XFmode calculation. */
32028 void ix86_emit_i387_log1p (rtx op0, rtx op1)
32030 rtx label1 = gen_label_rtx ();
32031 rtx label2 = gen_label_rtx ();
32033 rtx tmp = gen_reg_rtx (XFmode);
32034 rtx tmp2 = gen_reg_rtx (XFmode);
32037 emit_insn (gen_absxf2 (tmp, op1));
32038 test = gen_rtx_GE (VOIDmode, tmp,
32039 CONST_DOUBLE_FROM_REAL_VALUE (
32040 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
32042 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
32044 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
32045 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
32046 emit_jump (label2);
32048 emit_label (label1);
32049 emit_move_insn (tmp, CONST1_RTX (XFmode));
32050 emit_insn (gen_addxf3 (tmp, op1, tmp));
32051 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
32052 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
32054 emit_label (label2);
32057 /* Output code to perform a Newton-Rhapson approximation of a single precision
32058 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
32060 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
32062 rtx x0, x1, e0, e1;
32064 x0 = gen_reg_rtx (mode);
32065 e0 = gen_reg_rtx (mode);
32066 e1 = gen_reg_rtx (mode);
32067 x1 = gen_reg_rtx (mode);
32069 /* a / b = a * ((rcp(b) + rcp(b)) - (b * rcp(b) * rcp (b))) */
32071 /* x0 = rcp(b) estimate */
32072 emit_insn (gen_rtx_SET (VOIDmode, x0,
32073 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
32076 emit_insn (gen_rtx_SET (VOIDmode, e0,
32077 gen_rtx_MULT (mode, x0, b)));
32080 emit_insn (gen_rtx_SET (VOIDmode, e0,
32081 gen_rtx_MULT (mode, x0, e0)));
32084 emit_insn (gen_rtx_SET (VOIDmode, e1,
32085 gen_rtx_PLUS (mode, x0, x0)));
32088 emit_insn (gen_rtx_SET (VOIDmode, x1,
32089 gen_rtx_MINUS (mode, e1, e0)));
32092 emit_insn (gen_rtx_SET (VOIDmode, res,
32093 gen_rtx_MULT (mode, a, x1)));
32096 /* Output code to perform a Newton-Rhapson approximation of a
32097 single precision floating point [reciprocal] square root. */
32099 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
32102 rtx x0, e0, e1, e2, e3, mthree, mhalf;
32105 x0 = gen_reg_rtx (mode);
32106 e0 = gen_reg_rtx (mode);
32107 e1 = gen_reg_rtx (mode);
32108 e2 = gen_reg_rtx (mode);
32109 e3 = gen_reg_rtx (mode);
32111 real_from_integer (&r, VOIDmode, -3, -1, 0);
32112 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
32114 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
32115 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
32117 if (VECTOR_MODE_P (mode))
32119 mthree = ix86_build_const_vector (mode, true, mthree);
32120 mhalf = ix86_build_const_vector (mode, true, mhalf);
32123 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
32124 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
32126 /* x0 = rsqrt(a) estimate */
32127 emit_insn (gen_rtx_SET (VOIDmode, x0,
32128 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
32131 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
32136 zero = gen_reg_rtx (mode);
32137 mask = gen_reg_rtx (mode);
32139 zero = force_reg (mode, CONST0_RTX(mode));
32140 emit_insn (gen_rtx_SET (VOIDmode, mask,
32141 gen_rtx_NE (mode, zero, a)));
32143 emit_insn (gen_rtx_SET (VOIDmode, x0,
32144 gen_rtx_AND (mode, x0, mask)));
32148 emit_insn (gen_rtx_SET (VOIDmode, e0,
32149 gen_rtx_MULT (mode, x0, a)));
32151 emit_insn (gen_rtx_SET (VOIDmode, e1,
32152 gen_rtx_MULT (mode, e0, x0)));
32155 mthree = force_reg (mode, mthree);
32156 emit_insn (gen_rtx_SET (VOIDmode, e2,
32157 gen_rtx_PLUS (mode, e1, mthree)));
32159 mhalf = force_reg (mode, mhalf);
32161 /* e3 = -.5 * x0 */
32162 emit_insn (gen_rtx_SET (VOIDmode, e3,
32163 gen_rtx_MULT (mode, x0, mhalf)));
32165 /* e3 = -.5 * e0 */
32166 emit_insn (gen_rtx_SET (VOIDmode, e3,
32167 gen_rtx_MULT (mode, e0, mhalf)));
32168 /* ret = e2 * e3 */
32169 emit_insn (gen_rtx_SET (VOIDmode, res,
32170 gen_rtx_MULT (mode, e2, e3)));
32173 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
32175 static void ATTRIBUTE_UNUSED
32176 i386_solaris_elf_named_section (const char *name, unsigned int flags,
32179 /* With Binutils 2.15, the "@unwind" marker must be specified on
32180 every occurrence of the ".eh_frame" section, not just the first
32183 && strcmp (name, ".eh_frame") == 0)
32185 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
32186 flags & SECTION_WRITE ? "aw" : "a");
32189 default_elf_asm_named_section (name, flags, decl);
32192 /* Return the mangling of TYPE if it is an extended fundamental type. */
32194 static const char *
32195 ix86_mangle_type (const_tree type)
32197 type = TYPE_MAIN_VARIANT (type);
32199 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32200 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32203 switch (TYPE_MODE (type))
32206 /* __float128 is "g". */
32209 /* "long double" or __float80 is "e". */
32216 /* For 32-bit code we can save PIC register setup by using
32217 __stack_chk_fail_local hidden function instead of calling
32218 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
32219 register, so it is better to call __stack_chk_fail directly. */
32222 ix86_stack_protect_fail (void)
32224 return TARGET_64BIT
32225 ? default_external_stack_protect_fail ()
32226 : default_hidden_stack_protect_fail ();
32229 /* Select a format to encode pointers in exception handling data. CODE
32230 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
32231 true if the symbol may be affected by dynamic relocations.
32233 ??? All x86 object file formats are capable of representing this.
32234 After all, the relocation needed is the same as for the call insn.
32235 Whether or not a particular assembler allows us to enter such, I
32236 guess we'll have to see. */
32238 asm_preferred_eh_data_format (int code, int global)
32242 int type = DW_EH_PE_sdata8;
32244 || ix86_cmodel == CM_SMALL_PIC
32245 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
32246 type = DW_EH_PE_sdata4;
32247 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
32249 if (ix86_cmodel == CM_SMALL
32250 || (ix86_cmodel == CM_MEDIUM && code))
32251 return DW_EH_PE_udata4;
32252 return DW_EH_PE_absptr;
32255 /* Expand copysign from SIGN to the positive value ABS_VALUE
32256 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
32259 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
32261 enum machine_mode mode = GET_MODE (sign);
32262 rtx sgn = gen_reg_rtx (mode);
32263 if (mask == NULL_RTX)
32265 enum machine_mode vmode;
32267 if (mode == SFmode)
32269 else if (mode == DFmode)
32274 mask = ix86_build_signbit_mask (vmode, VECTOR_MODE_P (mode), false);
32275 if (!VECTOR_MODE_P (mode))
32277 /* We need to generate a scalar mode mask in this case. */
32278 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
32279 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
32280 mask = gen_reg_rtx (mode);
32281 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
32285 mask = gen_rtx_NOT (mode, mask);
32286 emit_insn (gen_rtx_SET (VOIDmode, sgn,
32287 gen_rtx_AND (mode, mask, sign)));
32288 emit_insn (gen_rtx_SET (VOIDmode, result,
32289 gen_rtx_IOR (mode, abs_value, sgn)));
32292 /* Expand fabs (OP0) and return a new rtx that holds the result. The
32293 mask for masking out the sign-bit is stored in *SMASK, if that is
32296 ix86_expand_sse_fabs (rtx op0, rtx *smask)
32298 enum machine_mode vmode, mode = GET_MODE (op0);
32301 xa = gen_reg_rtx (mode);
32302 if (mode == SFmode)
32304 else if (mode == DFmode)
32308 mask = ix86_build_signbit_mask (vmode, VECTOR_MODE_P (mode), true);
32309 if (!VECTOR_MODE_P (mode))
32311 /* We need to generate a scalar mode mask in this case. */
32312 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
32313 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
32314 mask = gen_reg_rtx (mode);
32315 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
32317 emit_insn (gen_rtx_SET (VOIDmode, xa,
32318 gen_rtx_AND (mode, op0, mask)));
32326 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
32327 swapping the operands if SWAP_OPERANDS is true. The expanded
32328 code is a forward jump to a newly created label in case the
32329 comparison is true. The generated label rtx is returned. */
32331 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
32332 bool swap_operands)
32343 label = gen_label_rtx ();
32344 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
32345 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32346 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
32347 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
32348 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
32349 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
32350 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
32351 JUMP_LABEL (tmp) = label;
32356 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
32357 using comparison code CODE. Operands are swapped for the comparison if
32358 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
32360 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
32361 bool swap_operands)
32363 rtx (*insn)(rtx, rtx, rtx, rtx);
32364 enum machine_mode mode = GET_MODE (op0);
32365 rtx mask = gen_reg_rtx (mode);
32374 insn = mode == DFmode ? gen_setcc_df_sse : gen_setcc_sf_sse;
32376 emit_insn (insn (mask, op0, op1,
32377 gen_rtx_fmt_ee (code, mode, op0, op1)));
32381 /* Generate and return a rtx of mode MODE for 2**n where n is the number
32382 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
32384 ix86_gen_TWO52 (enum machine_mode mode)
32386 REAL_VALUE_TYPE TWO52r;
32389 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
32390 TWO52 = const_double_from_real_value (TWO52r, mode);
32391 TWO52 = force_reg (mode, TWO52);
32396 /* Expand SSE sequence for computing lround from OP1 storing
32399 ix86_expand_lround (rtx op0, rtx op1)
32401 /* C code for the stuff we're doing below:
32402 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
32405 enum machine_mode mode = GET_MODE (op1);
32406 const struct real_format *fmt;
32407 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
32410 /* load nextafter (0.5, 0.0) */
32411 fmt = REAL_MODE_FORMAT (mode);
32412 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
32413 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
32415 /* adj = copysign (0.5, op1) */
32416 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
32417 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
32419 /* adj = op1 + adj */
32420 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
32422 /* op0 = (imode)adj */
32423 expand_fix (op0, adj, 0);
32426 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
32429 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
32431 /* C code for the stuff we're doing below (for do_floor):
32433 xi -= (double)xi > op1 ? 1 : 0;
32436 enum machine_mode fmode = GET_MODE (op1);
32437 enum machine_mode imode = GET_MODE (op0);
32438 rtx ireg, freg, label, tmp;
32440 /* reg = (long)op1 */
32441 ireg = gen_reg_rtx (imode);
32442 expand_fix (ireg, op1, 0);
32444 /* freg = (double)reg */
32445 freg = gen_reg_rtx (fmode);
32446 expand_float (freg, ireg, 0);
32448 /* ireg = (freg > op1) ? ireg - 1 : ireg */
32449 label = ix86_expand_sse_compare_and_jump (UNLE,
32450 freg, op1, !do_floor);
32451 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
32452 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
32453 emit_move_insn (ireg, tmp);
32455 emit_label (label);
32456 LABEL_NUSES (label) = 1;
32458 emit_move_insn (op0, ireg);
32461 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
32462 result in OPERAND0. */
32464 ix86_expand_rint (rtx operand0, rtx operand1)
32466 /* C code for the stuff we're doing below:
32467 xa = fabs (operand1);
32468 if (!isless (xa, 2**52))
32470 xa = xa + 2**52 - 2**52;
32471 return copysign (xa, operand1);
32473 enum machine_mode mode = GET_MODE (operand0);
32474 rtx res, xa, label, TWO52, mask;
32476 res = gen_reg_rtx (mode);
32477 emit_move_insn (res, operand1);
32479 /* xa = abs (operand1) */
32480 xa = ix86_expand_sse_fabs (res, &mask);
32482 /* if (!isless (xa, TWO52)) goto label; */
32483 TWO52 = ix86_gen_TWO52 (mode);
32484 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32486 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32487 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
32489 ix86_sse_copysign_to_positive (res, xa, res, mask);
32491 emit_label (label);
32492 LABEL_NUSES (label) = 1;
32494 emit_move_insn (operand0, res);
32497 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
32500 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
32502 /* C code for the stuff we expand below.
32503 double xa = fabs (x), x2;
32504 if (!isless (xa, TWO52))
32506 xa = xa + TWO52 - TWO52;
32507 x2 = copysign (xa, x);
32516 enum machine_mode mode = GET_MODE (operand0);
32517 rtx xa, TWO52, tmp, label, one, res, mask;
32519 TWO52 = ix86_gen_TWO52 (mode);
32521 /* Temporary for holding the result, initialized to the input
32522 operand to ease control flow. */
32523 res = gen_reg_rtx (mode);
32524 emit_move_insn (res, operand1);
32526 /* xa = abs (operand1) */
32527 xa = ix86_expand_sse_fabs (res, &mask);
32529 /* if (!isless (xa, TWO52)) goto label; */
32530 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32532 /* xa = xa + TWO52 - TWO52; */
32533 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32534 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
32536 /* xa = copysign (xa, operand1) */
32537 ix86_sse_copysign_to_positive (xa, xa, res, mask);
32539 /* generate 1.0 or -1.0 */
32540 one = force_reg (mode,
32541 const_double_from_real_value (do_floor
32542 ? dconst1 : dconstm1, mode));
32544 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
32545 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
32546 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32547 gen_rtx_AND (mode, one, tmp)));
32548 /* We always need to subtract here to preserve signed zero. */
32549 tmp = expand_simple_binop (mode, MINUS,
32550 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32551 emit_move_insn (res, tmp);
32553 emit_label (label);
32554 LABEL_NUSES (label) = 1;
32556 emit_move_insn (operand0, res);
32559 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
32562 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
32564 /* C code for the stuff we expand below.
32565 double xa = fabs (x), x2;
32566 if (!isless (xa, TWO52))
32568 x2 = (double)(long)x;
32575 if (HONOR_SIGNED_ZEROS (mode))
32576 return copysign (x2, x);
32579 enum machine_mode mode = GET_MODE (operand0);
32580 rtx xa, xi, TWO52, tmp, label, one, res, mask;
32582 TWO52 = ix86_gen_TWO52 (mode);
32584 /* Temporary for holding the result, initialized to the input
32585 operand to ease control flow. */
32586 res = gen_reg_rtx (mode);
32587 emit_move_insn (res, operand1);
32589 /* xa = abs (operand1) */
32590 xa = ix86_expand_sse_fabs (res, &mask);
32592 /* if (!isless (xa, TWO52)) goto label; */
32593 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32595 /* xa = (double)(long)x */
32596 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
32597 expand_fix (xi, res, 0);
32598 expand_float (xa, xi, 0);
32601 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
32603 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
32604 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
32605 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32606 gen_rtx_AND (mode, one, tmp)));
32607 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
32608 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32609 emit_move_insn (res, tmp);
32611 if (HONOR_SIGNED_ZEROS (mode))
32612 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
32614 emit_label (label);
32615 LABEL_NUSES (label) = 1;
32617 emit_move_insn (operand0, res);
32620 /* Expand SSE sequence for computing round from OPERAND1 storing
32621 into OPERAND0. Sequence that works without relying on DImode truncation
32622 via cvttsd2siq that is only available on 64bit targets. */
32624 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
32626 /* C code for the stuff we expand below.
32627 double xa = fabs (x), xa2, x2;
32628 if (!isless (xa, TWO52))
32630 Using the absolute value and copying back sign makes
32631 -0.0 -> -0.0 correct.
32632 xa2 = xa + TWO52 - TWO52;
32637 else if (dxa > 0.5)
32639 x2 = copysign (xa2, x);
32642 enum machine_mode mode = GET_MODE (operand0);
32643 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
32645 TWO52 = ix86_gen_TWO52 (mode);
32647 /* Temporary for holding the result, initialized to the input
32648 operand to ease control flow. */
32649 res = gen_reg_rtx (mode);
32650 emit_move_insn (res, operand1);
32652 /* xa = abs (operand1) */
32653 xa = ix86_expand_sse_fabs (res, &mask);
32655 /* if (!isless (xa, TWO52)) goto label; */
32656 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32658 /* xa2 = xa + TWO52 - TWO52; */
32659 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32660 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
32662 /* dxa = xa2 - xa; */
32663 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
32665 /* generate 0.5, 1.0 and -0.5 */
32666 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
32667 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
32668 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
32672 tmp = gen_reg_rtx (mode);
32673 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
32674 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
32675 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32676 gen_rtx_AND (mode, one, tmp)));
32677 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32678 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
32679 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
32680 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32681 gen_rtx_AND (mode, one, tmp)));
32682 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32684 /* res = copysign (xa2, operand1) */
32685 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
32687 emit_label (label);
32688 LABEL_NUSES (label) = 1;
32690 emit_move_insn (operand0, res);
32693 /* Expand SSE sequence for computing trunc from OPERAND1 storing
32696 ix86_expand_trunc (rtx operand0, rtx operand1)
32698 /* C code for SSE variant we expand below.
32699 double xa = fabs (x), x2;
32700 if (!isless (xa, TWO52))
32702 x2 = (double)(long)x;
32703 if (HONOR_SIGNED_ZEROS (mode))
32704 return copysign (x2, x);
32707 enum machine_mode mode = GET_MODE (operand0);
32708 rtx xa, xi, TWO52, label, res, mask;
32710 TWO52 = ix86_gen_TWO52 (mode);
32712 /* Temporary for holding the result, initialized to the input
32713 operand to ease control flow. */
32714 res = gen_reg_rtx (mode);
32715 emit_move_insn (res, operand1);
32717 /* xa = abs (operand1) */
32718 xa = ix86_expand_sse_fabs (res, &mask);
32720 /* if (!isless (xa, TWO52)) goto label; */
32721 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32723 /* x = (double)(long)x */
32724 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
32725 expand_fix (xi, res, 0);
32726 expand_float (res, xi, 0);
32728 if (HONOR_SIGNED_ZEROS (mode))
32729 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
32731 emit_label (label);
32732 LABEL_NUSES (label) = 1;
32734 emit_move_insn (operand0, res);
32737 /* Expand SSE sequence for computing trunc from OPERAND1 storing
32740 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
32742 enum machine_mode mode = GET_MODE (operand0);
32743 rtx xa, mask, TWO52, label, one, res, smask, tmp;
32745 /* C code for SSE variant we expand below.
32746 double xa = fabs (x), x2;
32747 if (!isless (xa, TWO52))
32749 xa2 = xa + TWO52 - TWO52;
32753 x2 = copysign (xa2, x);
32757 TWO52 = ix86_gen_TWO52 (mode);
32759 /* Temporary for holding the result, initialized to the input
32760 operand to ease control flow. */
32761 res = gen_reg_rtx (mode);
32762 emit_move_insn (res, operand1);
32764 /* xa = abs (operand1) */
32765 xa = ix86_expand_sse_fabs (res, &smask);
32767 /* if (!isless (xa, TWO52)) goto label; */
32768 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32770 /* res = xa + TWO52 - TWO52; */
32771 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32772 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
32773 emit_move_insn (res, tmp);
32776 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
32778 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
32779 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
32780 emit_insn (gen_rtx_SET (VOIDmode, mask,
32781 gen_rtx_AND (mode, mask, one)));
32782 tmp = expand_simple_binop (mode, MINUS,
32783 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
32784 emit_move_insn (res, tmp);
32786 /* res = copysign (res, operand1) */
32787 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
32789 emit_label (label);
32790 LABEL_NUSES (label) = 1;
32792 emit_move_insn (operand0, res);
32795 /* Expand SSE sequence for computing round from OPERAND1 storing
32798 ix86_expand_round (rtx operand0, rtx operand1)
32800 /* C code for the stuff we're doing below:
32801 double xa = fabs (x);
32802 if (!isless (xa, TWO52))
32804 xa = (double)(long)(xa + nextafter (0.5, 0.0));
32805 return copysign (xa, x);
32807 enum machine_mode mode = GET_MODE (operand0);
32808 rtx res, TWO52, xa, label, xi, half, mask;
32809 const struct real_format *fmt;
32810 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
32812 /* Temporary for holding the result, initialized to the input
32813 operand to ease control flow. */
32814 res = gen_reg_rtx (mode);
32815 emit_move_insn (res, operand1);
32817 TWO52 = ix86_gen_TWO52 (mode);
32818 xa = ix86_expand_sse_fabs (res, &mask);
32819 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32821 /* load nextafter (0.5, 0.0) */
32822 fmt = REAL_MODE_FORMAT (mode);
32823 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
32824 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
32826 /* xa = xa + 0.5 */
32827 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
32828 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
32830 /* xa = (double)(int64_t)xa */
32831 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
32832 expand_fix (xi, xa, 0);
32833 expand_float (xa, xi, 0);
32835 /* res = copysign (xa, operand1) */
32836 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
32838 emit_label (label);
32839 LABEL_NUSES (label) = 1;
32841 emit_move_insn (operand0, res);
32845 /* Table of valid machine attributes. */
32846 static const struct attribute_spec ix86_attribute_table[] =
32848 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
32849 affects_type_identity } */
32850 /* Stdcall attribute says callee is responsible for popping arguments
32851 if they are not variable. */
32852 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32854 /* Fastcall attribute says callee is responsible for popping arguments
32855 if they are not variable. */
32856 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32858 /* Thiscall attribute says callee is responsible for popping arguments
32859 if they are not variable. */
32860 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32862 /* Cdecl attribute says the callee is a normal C declaration */
32863 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32865 /* Regparm attribute specifies how many integer arguments are to be
32866 passed in registers. */
32867 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute,
32869 /* Sseregparm attribute says we are using x86_64 calling conventions
32870 for FP arguments. */
32871 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32873 /* force_align_arg_pointer says this function realigns the stack at entry. */
32874 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
32875 false, true, true, ix86_handle_cconv_attribute, false },
32876 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
32877 { "dllimport", 0, 0, false, false, false, handle_dll_attribute, false },
32878 { "dllexport", 0, 0, false, false, false, handle_dll_attribute, false },
32879 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute,
32882 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute,
32884 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute,
32886 #ifdef SUBTARGET_ATTRIBUTE_TABLE
32887 SUBTARGET_ATTRIBUTE_TABLE,
32889 /* ms_abi and sysv_abi calling convention function attributes. */
32890 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute, true },
32891 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute, true },
32892 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute,
32894 { "callee_pop_aggregate_return", 1, 1, false, true, true,
32895 ix86_handle_callee_pop_aggregate_return, true },
32897 { NULL, 0, 0, false, false, false, NULL, false }
32900 /* Implement targetm.vectorize.builtin_vectorization_cost. */
32902 ix86_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
32903 tree vectype ATTRIBUTE_UNUSED,
32904 int misalign ATTRIBUTE_UNUSED)
32906 switch (type_of_cost)
32909 return ix86_cost->scalar_stmt_cost;
32912 return ix86_cost->scalar_load_cost;
32915 return ix86_cost->scalar_store_cost;
32918 return ix86_cost->vec_stmt_cost;
32921 return ix86_cost->vec_align_load_cost;
32924 return ix86_cost->vec_store_cost;
32926 case vec_to_scalar:
32927 return ix86_cost->vec_to_scalar_cost;
32929 case scalar_to_vec:
32930 return ix86_cost->scalar_to_vec_cost;
32932 case unaligned_load:
32933 case unaligned_store:
32934 return ix86_cost->vec_unalign_load_cost;
32936 case cond_branch_taken:
32937 return ix86_cost->cond_taken_branch_cost;
32939 case cond_branch_not_taken:
32940 return ix86_cost->cond_not_taken_branch_cost;
32946 gcc_unreachable ();
32951 /* Implement targetm.vectorize.builtin_vec_perm. */
32954 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
32956 tree itype = TREE_TYPE (vec_type);
32957 bool u = TYPE_UNSIGNED (itype);
32958 enum machine_mode vmode = TYPE_MODE (vec_type);
32959 enum ix86_builtins fcode;
32960 bool ok = TARGET_SSE2;
32966 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
32969 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
32971 itype = ix86_get_builtin_type (IX86_BT_DI);
32976 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
32980 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
32982 itype = ix86_get_builtin_type (IX86_BT_SI);
32986 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
32989 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
32992 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
32995 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
33005 *mask_type = itype;
33006 return ix86_builtins[(int) fcode];
33009 /* Return a vector mode with twice as many elements as VMODE. */
33010 /* ??? Consider moving this to a table generated by genmodes.c. */
33012 static enum machine_mode
33013 doublesize_vector_mode (enum machine_mode vmode)
33017 case V2SFmode: return V4SFmode;
33018 case V1DImode: return V2DImode;
33019 case V2SImode: return V4SImode;
33020 case V4HImode: return V8HImode;
33021 case V8QImode: return V16QImode;
33023 case V2DFmode: return V4DFmode;
33024 case V4SFmode: return V8SFmode;
33025 case V2DImode: return V4DImode;
33026 case V4SImode: return V8SImode;
33027 case V8HImode: return V16HImode;
33028 case V16QImode: return V32QImode;
33030 case V4DFmode: return V8DFmode;
33031 case V8SFmode: return V16SFmode;
33032 case V4DImode: return V8DImode;
33033 case V8SImode: return V16SImode;
33034 case V16HImode: return V32HImode;
33035 case V32QImode: return V64QImode;
33038 gcc_unreachable ();
33042 /* Construct (set target (vec_select op0 (parallel perm))) and
33043 return true if that's a valid instruction in the active ISA. */
33046 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
33048 rtx rperm[MAX_VECT_LEN], x;
33051 for (i = 0; i < nelt; ++i)
33052 rperm[i] = GEN_INT (perm[i]);
33054 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
33055 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
33056 x = gen_rtx_SET (VOIDmode, target, x);
33059 if (recog_memoized (x) < 0)
33067 /* Similar, but generate a vec_concat from op0 and op1 as well. */
33070 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
33071 const unsigned char *perm, unsigned nelt)
33073 enum machine_mode v2mode;
33076 v2mode = doublesize_vector_mode (GET_MODE (op0));
33077 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
33078 return expand_vselect (target, x, perm, nelt);
33081 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
33082 in terms of blendp[sd] / pblendw / pblendvb. */
33085 expand_vec_perm_blend (struct expand_vec_perm_d *d)
33087 enum machine_mode vmode = d->vmode;
33088 unsigned i, mask, nelt = d->nelt;
33089 rtx target, op0, op1, x;
33091 if (!TARGET_SSE4_1 || d->op0 == d->op1)
33093 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
33096 /* This is a blend, not a permute. Elements must stay in their
33097 respective lanes. */
33098 for (i = 0; i < nelt; ++i)
33100 unsigned e = d->perm[i];
33101 if (!(e == i || e == i + nelt))
33108 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
33109 decision should be extracted elsewhere, so that we only try that
33110 sequence once all budget==3 options have been tried. */
33112 /* For bytes, see if bytes move in pairs so we can use pblendw with
33113 an immediate argument, rather than pblendvb with a vector argument. */
33114 if (vmode == V16QImode)
33116 bool pblendw_ok = true;
33117 for (i = 0; i < 16 && pblendw_ok; i += 2)
33118 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
33122 rtx rperm[16], vperm;
33124 for (i = 0; i < nelt; ++i)
33125 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
33127 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
33128 vperm = force_reg (V16QImode, vperm);
33130 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
33135 target = d->target;
33147 for (i = 0; i < nelt; ++i)
33148 mask |= (d->perm[i] >= nelt) << i;
33152 for (i = 0; i < 2; ++i)
33153 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
33157 for (i = 0; i < 4; ++i)
33158 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
33162 for (i = 0; i < 8; ++i)
33163 mask |= (d->perm[i * 2] >= 16) << i;
33167 target = gen_lowpart (vmode, target);
33168 op0 = gen_lowpart (vmode, op0);
33169 op1 = gen_lowpart (vmode, op1);
33173 gcc_unreachable ();
33176 /* This matches five different patterns with the different modes. */
33177 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
33178 x = gen_rtx_SET (VOIDmode, target, x);
33184 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
33185 in terms of the variable form of vpermilps.
33187 Note that we will have already failed the immediate input vpermilps,
33188 which requires that the high and low part shuffle be identical; the
33189 variable form doesn't require that. */
33192 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
33194 rtx rperm[8], vperm;
33197 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
33200 /* We can only permute within the 128-bit lane. */
33201 for (i = 0; i < 8; ++i)
33203 unsigned e = d->perm[i];
33204 if (i < 4 ? e >= 4 : e < 4)
33211 for (i = 0; i < 8; ++i)
33213 unsigned e = d->perm[i];
33215 /* Within each 128-bit lane, the elements of op0 are numbered
33216 from 0 and the elements of op1 are numbered from 4. */
33222 rperm[i] = GEN_INT (e);
33225 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
33226 vperm = force_reg (V8SImode, vperm);
33227 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
33232 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
33233 in terms of pshufb or vpperm. */
33236 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
33238 unsigned i, nelt, eltsz;
33239 rtx rperm[16], vperm, target, op0, op1;
33241 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
33243 if (GET_MODE_SIZE (d->vmode) != 16)
33250 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
33252 for (i = 0; i < nelt; ++i)
33254 unsigned j, e = d->perm[i];
33255 for (j = 0; j < eltsz; ++j)
33256 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
33259 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
33260 vperm = force_reg (V16QImode, vperm);
33262 target = gen_lowpart (V16QImode, d->target);
33263 op0 = gen_lowpart (V16QImode, d->op0);
33264 if (d->op0 == d->op1)
33265 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
33268 op1 = gen_lowpart (V16QImode, d->op1);
33269 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
33275 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
33276 in a single instruction. */
33279 expand_vec_perm_1 (struct expand_vec_perm_d *d)
33281 unsigned i, nelt = d->nelt;
33282 unsigned char perm2[MAX_VECT_LEN];
33284 /* Check plain VEC_SELECT first, because AVX has instructions that could
33285 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
33286 input where SEL+CONCAT may not. */
33287 if (d->op0 == d->op1)
33289 int mask = nelt - 1;
33291 for (i = 0; i < nelt; i++)
33292 perm2[i] = d->perm[i] & mask;
33294 if (expand_vselect (d->target, d->op0, perm2, nelt))
33297 /* There are plenty of patterns in sse.md that are written for
33298 SEL+CONCAT and are not replicated for a single op. Perhaps
33299 that should be changed, to avoid the nastiness here. */
33301 /* Recognize interleave style patterns, which means incrementing
33302 every other permutation operand. */
33303 for (i = 0; i < nelt; i += 2)
33305 perm2[i] = d->perm[i] & mask;
33306 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
33308 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
33311 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
33314 for (i = 0; i < nelt; i += 4)
33316 perm2[i + 0] = d->perm[i + 0] & mask;
33317 perm2[i + 1] = d->perm[i + 1] & mask;
33318 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
33319 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
33322 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
33327 /* Finally, try the fully general two operand permute. */
33328 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
33331 /* Recognize interleave style patterns with reversed operands. */
33332 if (d->op0 != d->op1)
33334 for (i = 0; i < nelt; ++i)
33336 unsigned e = d->perm[i];
33344 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
33348 /* Try the SSE4.1 blend variable merge instructions. */
33349 if (expand_vec_perm_blend (d))
33352 /* Try one of the AVX vpermil variable permutations. */
33353 if (expand_vec_perm_vpermil (d))
33356 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
33357 if (expand_vec_perm_pshufb (d))
33363 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
33364 in terms of a pair of pshuflw + pshufhw instructions. */
33367 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
33369 unsigned char perm2[MAX_VECT_LEN];
33373 if (d->vmode != V8HImode || d->op0 != d->op1)
33376 /* The two permutations only operate in 64-bit lanes. */
33377 for (i = 0; i < 4; ++i)
33378 if (d->perm[i] >= 4)
33380 for (i = 4; i < 8; ++i)
33381 if (d->perm[i] < 4)
33387 /* Emit the pshuflw. */
33388 memcpy (perm2, d->perm, 4);
33389 for (i = 4; i < 8; ++i)
33391 ok = expand_vselect (d->target, d->op0, perm2, 8);
33394 /* Emit the pshufhw. */
33395 memcpy (perm2 + 4, d->perm + 4, 4);
33396 for (i = 0; i < 4; ++i)
33398 ok = expand_vselect (d->target, d->target, perm2, 8);
33404 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
33405 the permutation using the SSSE3 palignr instruction. This succeeds
33406 when all of the elements in PERM fit within one vector and we merely
33407 need to shift them down so that a single vector permutation has a
33408 chance to succeed. */
33411 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
33413 unsigned i, nelt = d->nelt;
33418 /* Even with AVX, palignr only operates on 128-bit vectors. */
33419 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
33422 min = nelt, max = 0;
33423 for (i = 0; i < nelt; ++i)
33425 unsigned e = d->perm[i];
33431 if (min == 0 || max - min >= nelt)
33434 /* Given that we have SSSE3, we know we'll be able to implement the
33435 single operand permutation after the palignr with pshufb. */
33439 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
33440 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
33441 gen_lowpart (TImode, d->op1),
33442 gen_lowpart (TImode, d->op0), shift));
33444 d->op0 = d->op1 = d->target;
33447 for (i = 0; i < nelt; ++i)
33449 unsigned e = d->perm[i] - min;
33455 /* Test for the degenerate case where the alignment by itself
33456 produces the desired permutation. */
33460 ok = expand_vec_perm_1 (d);
33466 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
33467 a two vector permutation into a single vector permutation by using
33468 an interleave operation to merge the vectors. */
33471 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
33473 struct expand_vec_perm_d dremap, dfinal;
33474 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
33475 unsigned contents, h1, h2, h3, h4;
33476 unsigned char remap[2 * MAX_VECT_LEN];
33480 if (d->op0 == d->op1)
33483 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
33484 lanes. We can use similar techniques with the vperm2f128 instruction,
33485 but it requires slightly different logic. */
33486 if (GET_MODE_SIZE (d->vmode) != 16)
33489 /* Examine from whence the elements come. */
33491 for (i = 0; i < nelt; ++i)
33492 contents |= 1u << d->perm[i];
33494 /* Split the two input vectors into 4 halves. */
33495 h1 = (1u << nelt2) - 1;
33500 memset (remap, 0xff, sizeof (remap));
33503 /* If the elements from the low halves use interleave low, and similarly
33504 for interleave high. If the elements are from mis-matched halves, we
33505 can use shufps for V4SF/V4SI or do a DImode shuffle. */
33506 if ((contents & (h1 | h3)) == contents)
33508 for (i = 0; i < nelt2; ++i)
33511 remap[i + nelt] = i * 2 + 1;
33512 dremap.perm[i * 2] = i;
33513 dremap.perm[i * 2 + 1] = i + nelt;
33516 else if ((contents & (h2 | h4)) == contents)
33518 for (i = 0; i < nelt2; ++i)
33520 remap[i + nelt2] = i * 2;
33521 remap[i + nelt + nelt2] = i * 2 + 1;
33522 dremap.perm[i * 2] = i + nelt2;
33523 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
33526 else if ((contents & (h1 | h4)) == contents)
33528 for (i = 0; i < nelt2; ++i)
33531 remap[i + nelt + nelt2] = i + nelt2;
33532 dremap.perm[i] = i;
33533 dremap.perm[i + nelt2] = i + nelt + nelt2;
33537 dremap.vmode = V2DImode;
33539 dremap.perm[0] = 0;
33540 dremap.perm[1] = 3;
33543 else if ((contents & (h2 | h3)) == contents)
33545 for (i = 0; i < nelt2; ++i)
33547 remap[i + nelt2] = i;
33548 remap[i + nelt] = i + nelt2;
33549 dremap.perm[i] = i + nelt2;
33550 dremap.perm[i + nelt2] = i + nelt;
33554 dremap.vmode = V2DImode;
33556 dremap.perm[0] = 1;
33557 dremap.perm[1] = 2;
33563 /* Use the remapping array set up above to move the elements from their
33564 swizzled locations into their final destinations. */
33566 for (i = 0; i < nelt; ++i)
33568 unsigned e = remap[d->perm[i]];
33569 gcc_assert (e < nelt);
33570 dfinal.perm[i] = e;
33572 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
33573 dfinal.op1 = dfinal.op0;
33574 dremap.target = dfinal.op0;
33576 /* Test if the final remap can be done with a single insn. For V4SFmode or
33577 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
33579 ok = expand_vec_perm_1 (&dfinal);
33580 seq = get_insns ();
33586 if (dremap.vmode != dfinal.vmode)
33588 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
33589 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
33590 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
33593 ok = expand_vec_perm_1 (&dremap);
33600 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
33601 permutation with two pshufb insns and an ior. We should have already
33602 failed all two instruction sequences. */
33605 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
33607 rtx rperm[2][16], vperm, l, h, op, m128;
33608 unsigned int i, nelt, eltsz;
33610 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
33612 gcc_assert (d->op0 != d->op1);
33615 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
33617 /* Generate two permutation masks. If the required element is within
33618 the given vector it is shuffled into the proper lane. If the required
33619 element is in the other vector, force a zero into the lane by setting
33620 bit 7 in the permutation mask. */
33621 m128 = GEN_INT (-128);
33622 for (i = 0; i < nelt; ++i)
33624 unsigned j, e = d->perm[i];
33625 unsigned which = (e >= nelt);
33629 for (j = 0; j < eltsz; ++j)
33631 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
33632 rperm[1-which][i*eltsz + j] = m128;
33636 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
33637 vperm = force_reg (V16QImode, vperm);
33639 l = gen_reg_rtx (V16QImode);
33640 op = gen_lowpart (V16QImode, d->op0);
33641 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
33643 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
33644 vperm = force_reg (V16QImode, vperm);
33646 h = gen_reg_rtx (V16QImode);
33647 op = gen_lowpart (V16QImode, d->op1);
33648 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
33650 op = gen_lowpart (V16QImode, d->target);
33651 emit_insn (gen_iorv16qi3 (op, l, h));
33656 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
33657 and extract-odd permutations. */
33660 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
33667 t1 = gen_reg_rtx (V4DFmode);
33668 t2 = gen_reg_rtx (V4DFmode);
33670 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
33671 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
33672 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
33674 /* Now an unpck[lh]pd will produce the result required. */
33676 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
33678 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
33684 int mask = odd ? 0xdd : 0x88;
33686 t1 = gen_reg_rtx (V8SFmode);
33687 t2 = gen_reg_rtx (V8SFmode);
33688 t3 = gen_reg_rtx (V8SFmode);
33690 /* Shuffle within the 128-bit lanes to produce:
33691 { 0 2 8 a 4 6 c e } | { 1 3 9 b 5 7 d f }. */
33692 emit_insn (gen_avx_shufps256 (t1, d->op0, d->op1,
33695 /* Shuffle the lanes around to produce:
33696 { 4 6 c e 0 2 8 a } and { 5 7 d f 1 3 9 b }. */
33697 emit_insn (gen_avx_vperm2f128v8sf3 (t2, t1, t1,
33700 /* Shuffle within the 128-bit lanes to produce:
33701 { 0 2 4 6 4 6 0 2 } | { 1 3 5 7 5 7 1 3 }. */
33702 emit_insn (gen_avx_shufps256 (t3, t1, t2, GEN_INT (0x44)));
33704 /* Shuffle within the 128-bit lanes to produce:
33705 { 8 a c e c e 8 a } | { 9 b d f d f 9 b }. */
33706 emit_insn (gen_avx_shufps256 (t2, t1, t2, GEN_INT (0xee)));
33708 /* Shuffle the lanes around to produce:
33709 { 0 2 4 6 8 a c e } | { 1 3 5 7 9 b d f }. */
33710 emit_insn (gen_avx_vperm2f128v8sf3 (d->target, t3, t2,
33719 /* These are always directly implementable by expand_vec_perm_1. */
33720 gcc_unreachable ();
33724 return expand_vec_perm_pshufb2 (d);
33727 /* We need 2*log2(N)-1 operations to achieve odd/even
33728 with interleave. */
33729 t1 = gen_reg_rtx (V8HImode);
33730 t2 = gen_reg_rtx (V8HImode);
33731 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
33732 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
33733 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
33734 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
33736 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
33738 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
33745 return expand_vec_perm_pshufb2 (d);
33748 t1 = gen_reg_rtx (V16QImode);
33749 t2 = gen_reg_rtx (V16QImode);
33750 t3 = gen_reg_rtx (V16QImode);
33751 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
33752 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
33753 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
33754 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
33755 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
33756 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
33758 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
33760 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
33766 gcc_unreachable ();
33772 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
33773 extract-even and extract-odd permutations. */
33776 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
33778 unsigned i, odd, nelt = d->nelt;
33781 if (odd != 0 && odd != 1)
33784 for (i = 1; i < nelt; ++i)
33785 if (d->perm[i] != 2 * i + odd)
33788 return expand_vec_perm_even_odd_1 (d, odd);
33791 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
33792 permutations. We assume that expand_vec_perm_1 has already failed. */
33795 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
33797 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
33798 enum machine_mode vmode = d->vmode;
33799 unsigned char perm2[4];
33807 /* These are special-cased in sse.md so that we can optionally
33808 use the vbroadcast instruction. They expand to two insns
33809 if the input happens to be in a register. */
33810 gcc_unreachable ();
33816 /* These are always implementable using standard shuffle patterns. */
33817 gcc_unreachable ();
33821 /* These can be implemented via interleave. We save one insn by
33822 stopping once we have promoted to V4SImode and then use pshufd. */
33825 optab otab = vec_interleave_low_optab;
33829 otab = vec_interleave_high_optab;
33834 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
33835 vmode = get_mode_wider_vector (vmode);
33836 op0 = gen_lowpart (vmode, op0);
33838 while (vmode != V4SImode);
33840 memset (perm2, elt, 4);
33841 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
33846 gcc_unreachable ();
33850 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
33851 broadcast permutations. */
33854 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
33856 unsigned i, elt, nelt = d->nelt;
33858 if (d->op0 != d->op1)
33862 for (i = 1; i < nelt; ++i)
33863 if (d->perm[i] != elt)
33866 return expand_vec_perm_broadcast_1 (d);
33869 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
33870 With all of the interface bits taken care of, perform the expansion
33871 in D and return true on success. */
33874 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
33876 /* Try a single instruction expansion. */
33877 if (expand_vec_perm_1 (d))
33880 /* Try sequences of two instructions. */
33882 if (expand_vec_perm_pshuflw_pshufhw (d))
33885 if (expand_vec_perm_palignr (d))
33888 if (expand_vec_perm_interleave2 (d))
33891 if (expand_vec_perm_broadcast (d))
33894 /* Try sequences of three instructions. */
33896 if (expand_vec_perm_pshufb2 (d))
33899 /* ??? Look for narrow permutations whose element orderings would
33900 allow the promotion to a wider mode. */
33902 /* ??? Look for sequences of interleave or a wider permute that place
33903 the data into the correct lanes for a half-vector shuffle like
33904 pshuf[lh]w or vpermilps. */
33906 /* ??? Look for sequences of interleave that produce the desired results.
33907 The combinatorics of punpck[lh] get pretty ugly... */
33909 if (expand_vec_perm_even_odd (d))
33915 /* Extract the values from the vector CST into the permutation array in D.
33916 Return 0 on error, 1 if all values from the permutation come from the
33917 first vector, 2 if all values from the second vector, and 3 otherwise. */
33920 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
33922 tree list = TREE_VECTOR_CST_ELTS (cst);
33923 unsigned i, nelt = d->nelt;
33926 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
33928 unsigned HOST_WIDE_INT e;
33930 if (!host_integerp (TREE_VALUE (list), 1))
33932 e = tree_low_cst (TREE_VALUE (list), 1);
33936 ret |= (e < nelt ? 1 : 2);
33939 gcc_assert (list == NULL);
33941 /* For all elements from second vector, fold the elements to first. */
33943 for (i = 0; i < nelt; ++i)
33944 d->perm[i] -= nelt;
33950 ix86_expand_vec_perm_builtin (tree exp)
33952 struct expand_vec_perm_d d;
33953 tree arg0, arg1, arg2;
33955 arg0 = CALL_EXPR_ARG (exp, 0);
33956 arg1 = CALL_EXPR_ARG (exp, 1);
33957 arg2 = CALL_EXPR_ARG (exp, 2);
33959 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
33960 d.nelt = GET_MODE_NUNITS (d.vmode);
33961 d.testing_p = false;
33962 gcc_assert (VECTOR_MODE_P (d.vmode));
33964 if (TREE_CODE (arg2) != VECTOR_CST)
33966 error_at (EXPR_LOCATION (exp),
33967 "vector permutation requires vector constant");
33971 switch (extract_vec_perm_cst (&d, arg2))
33977 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
33981 if (!operand_equal_p (arg0, arg1, 0))
33983 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
33984 d.op0 = force_reg (d.vmode, d.op0);
33985 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
33986 d.op1 = force_reg (d.vmode, d.op1);
33990 /* The elements of PERM do not suggest that only the first operand
33991 is used, but both operands are identical. Allow easier matching
33992 of the permutation by folding the permutation into the single
33995 unsigned i, nelt = d.nelt;
33996 for (i = 0; i < nelt; ++i)
33997 if (d.perm[i] >= nelt)
34003 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
34004 d.op0 = force_reg (d.vmode, d.op0);
34009 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
34010 d.op0 = force_reg (d.vmode, d.op0);
34015 d.target = gen_reg_rtx (d.vmode);
34016 if (ix86_expand_vec_perm_builtin_1 (&d))
34019 /* For compiler generated permutations, we should never got here, because
34020 the compiler should also be checking the ok hook. But since this is a
34021 builtin the user has access too, so don't abort. */
34025 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
34028 sorry ("vector permutation (%d %d %d %d)",
34029 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
34032 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
34033 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
34034 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
34037 sorry ("vector permutation "
34038 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
34039 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
34040 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
34041 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
34042 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
34045 gcc_unreachable ();
34048 return CONST0_RTX (d.vmode);
34051 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
34054 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
34056 struct expand_vec_perm_d d;
34060 d.vmode = TYPE_MODE (vec_type);
34061 d.nelt = GET_MODE_NUNITS (d.vmode);
34062 d.testing_p = true;
34064 /* Given sufficient ISA support we can just return true here
34065 for selected vector modes. */
34066 if (GET_MODE_SIZE (d.vmode) == 16)
34068 /* All implementable with a single vpperm insn. */
34071 /* All implementable with 2 pshufb + 1 ior. */
34074 /* All implementable with shufpd or unpck[lh]pd. */
34079 vec_mask = extract_vec_perm_cst (&d, mask);
34081 /* This hook is cannot be called in response to something that the
34082 user does (unlike the builtin expander) so we shouldn't ever see
34083 an error generated from the extract. */
34084 gcc_assert (vec_mask > 0 && vec_mask <= 3);
34085 one_vec = (vec_mask != 3);
34087 /* Implementable with shufps or pshufd. */
34088 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
34091 /* Otherwise we have to go through the motions and see if we can
34092 figure out how to generate the requested permutation. */
34093 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
34094 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
34096 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
34099 ret = ix86_expand_vec_perm_builtin_1 (&d);
34106 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
34108 struct expand_vec_perm_d d;
34114 d.vmode = GET_MODE (targ);
34115 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
34116 d.testing_p = false;
34118 for (i = 0; i < nelt; ++i)
34119 d.perm[i] = i * 2 + odd;
34121 /* We'll either be able to implement the permutation directly... */
34122 if (expand_vec_perm_1 (&d))
34125 /* ... or we use the special-case patterns. */
34126 expand_vec_perm_even_odd_1 (&d, odd);
34129 /* This function returns the calling abi specific va_list type node.
34130 It returns the FNDECL specific va_list type. */
34133 ix86_fn_abi_va_list (tree fndecl)
34136 return va_list_type_node;
34137 gcc_assert (fndecl != NULL_TREE);
34139 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
34140 return ms_va_list_type_node;
34142 return sysv_va_list_type_node;
34145 /* Returns the canonical va_list type specified by TYPE. If there
34146 is no valid TYPE provided, it return NULL_TREE. */
34149 ix86_canonical_va_list_type (tree type)
34153 /* Resolve references and pointers to va_list type. */
34154 if (TREE_CODE (type) == MEM_REF)
34155 type = TREE_TYPE (type);
34156 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
34157 type = TREE_TYPE (type);
34158 else if (POINTER_TYPE_P (type) && TREE_CODE (TREE_TYPE (type)) == ARRAY_TYPE)
34159 type = TREE_TYPE (type);
34161 if (TARGET_64BIT && va_list_type_node != NULL_TREE)
34163 wtype = va_list_type_node;
34164 gcc_assert (wtype != NULL_TREE);
34166 if (TREE_CODE (wtype) == ARRAY_TYPE)
34168 /* If va_list is an array type, the argument may have decayed
34169 to a pointer type, e.g. by being passed to another function.
34170 In that case, unwrap both types so that we can compare the
34171 underlying records. */
34172 if (TREE_CODE (htype) == ARRAY_TYPE
34173 || POINTER_TYPE_P (htype))
34175 wtype = TREE_TYPE (wtype);
34176 htype = TREE_TYPE (htype);
34179 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
34180 return va_list_type_node;
34181 wtype = sysv_va_list_type_node;
34182 gcc_assert (wtype != NULL_TREE);
34184 if (TREE_CODE (wtype) == ARRAY_TYPE)
34186 /* If va_list is an array type, the argument may have decayed
34187 to a pointer type, e.g. by being passed to another function.
34188 In that case, unwrap both types so that we can compare the
34189 underlying records. */
34190 if (TREE_CODE (htype) == ARRAY_TYPE
34191 || POINTER_TYPE_P (htype))
34193 wtype = TREE_TYPE (wtype);
34194 htype = TREE_TYPE (htype);
34197 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
34198 return sysv_va_list_type_node;
34199 wtype = ms_va_list_type_node;
34200 gcc_assert (wtype != NULL_TREE);
34202 if (TREE_CODE (wtype) == ARRAY_TYPE)
34204 /* If va_list is an array type, the argument may have decayed
34205 to a pointer type, e.g. by being passed to another function.
34206 In that case, unwrap both types so that we can compare the
34207 underlying records. */
34208 if (TREE_CODE (htype) == ARRAY_TYPE
34209 || POINTER_TYPE_P (htype))
34211 wtype = TREE_TYPE (wtype);
34212 htype = TREE_TYPE (htype);
34215 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
34216 return ms_va_list_type_node;
34219 return std_canonical_va_list_type (type);
34222 /* Iterate through the target-specific builtin types for va_list.
34223 IDX denotes the iterator, *PTREE is set to the result type of
34224 the va_list builtin, and *PNAME to its internal type.
34225 Returns zero if there is no element for this index, otherwise
34226 IDX should be increased upon the next call.
34227 Note, do not iterate a base builtin's name like __builtin_va_list.
34228 Used from c_common_nodes_and_builtins. */
34231 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
34241 *ptree = ms_va_list_type_node;
34242 *pname = "__builtin_ms_va_list";
34246 *ptree = sysv_va_list_type_node;
34247 *pname = "__builtin_sysv_va_list";
34255 #undef TARGET_SCHED_DISPATCH
34256 #define TARGET_SCHED_DISPATCH has_dispatch
34257 #undef TARGET_SCHED_DISPATCH_DO
34258 #define TARGET_SCHED_DISPATCH_DO do_dispatch
34260 /* The size of the dispatch window is the total number of bytes of
34261 object code allowed in a window. */
34262 #define DISPATCH_WINDOW_SIZE 16
34264 /* Number of dispatch windows considered for scheduling. */
34265 #define MAX_DISPATCH_WINDOWS 3
34267 /* Maximum number of instructions in a window. */
34270 /* Maximum number of immediate operands in a window. */
34273 /* Maximum number of immediate bits allowed in a window. */
34274 #define MAX_IMM_SIZE 128
34276 /* Maximum number of 32 bit immediates allowed in a window. */
34277 #define MAX_IMM_32 4
34279 /* Maximum number of 64 bit immediates allowed in a window. */
34280 #define MAX_IMM_64 2
34282 /* Maximum total of loads or prefetches allowed in a window. */
34285 /* Maximum total of stores allowed in a window. */
34286 #define MAX_STORE 1
34292 /* Dispatch groups. Istructions that affect the mix in a dispatch window. */
34293 enum dispatch_group {
34308 /* Number of allowable groups in a dispatch window. It is an array
34309 indexed by dispatch_group enum. 100 is used as a big number,
34310 because the number of these kind of operations does not have any
34311 effect in dispatch window, but we need them for other reasons in
34313 static unsigned int num_allowable_groups[disp_last] = {
34314 0, 2, 1, 1, 2, 4, 4, 2, 1, BIG, BIG
34317 char group_name[disp_last + 1][16] = {
34318 "disp_no_group", "disp_load", "disp_store", "disp_load_store",
34319 "disp_prefetch", "disp_imm", "disp_imm_32", "disp_imm_64",
34320 "disp_branch", "disp_cmp", "disp_jcc", "disp_last"
34323 /* Instruction path. */
34326 path_single, /* Single micro op. */
34327 path_double, /* Double micro op. */
34328 path_multi, /* Instructions with more than 2 micro op.. */
34332 /* sched_insn_info defines a window to the instructions scheduled in
34333 the basic block. It contains a pointer to the insn_info table and
34334 the instruction scheduled.
34336 Windows are allocated for each basic block and are linked
34338 typedef struct sched_insn_info_s {
34340 enum dispatch_group group;
34341 enum insn_path path;
34346 /* Linked list of dispatch windows. This is a two way list of
34347 dispatch windows of a basic block. It contains information about
34348 the number of uops in the window and the total number of
34349 instructions and of bytes in the object code for this dispatch
34351 typedef struct dispatch_windows_s {
34352 int num_insn; /* Number of insn in the window. */
34353 int num_uops; /* Number of uops in the window. */
34354 int window_size; /* Number of bytes in the window. */
34355 int window_num; /* Window number between 0 or 1. */
34356 int num_imm; /* Number of immediates in an insn. */
34357 int num_imm_32; /* Number of 32 bit immediates in an insn. */
34358 int num_imm_64; /* Number of 64 bit immediates in an insn. */
34359 int imm_size; /* Total immediates in the window. */
34360 int num_loads; /* Total memory loads in the window. */
34361 int num_stores; /* Total memory stores in the window. */
34362 int violation; /* Violation exists in window. */
34363 sched_insn_info *window; /* Pointer to the window. */
34364 struct dispatch_windows_s *next;
34365 struct dispatch_windows_s *prev;
34366 } dispatch_windows;
34368 /* Immediate valuse used in an insn. */
34369 typedef struct imm_info_s
34376 static dispatch_windows *dispatch_window_list;
34377 static dispatch_windows *dispatch_window_list1;
34379 /* Get dispatch group of insn. */
34381 static enum dispatch_group
34382 get_mem_group (rtx insn)
34384 enum attr_memory memory;
34386 if (INSN_CODE (insn) < 0)
34387 return disp_no_group;
34388 memory = get_attr_memory (insn);
34389 if (memory == MEMORY_STORE)
34392 if (memory == MEMORY_LOAD)
34395 if (memory == MEMORY_BOTH)
34396 return disp_load_store;
34398 return disp_no_group;
34401 /* Return true if insn is a compare instruction. */
34406 enum attr_type type;
34408 type = get_attr_type (insn);
34409 return (type == TYPE_TEST
34410 || type == TYPE_ICMP
34411 || type == TYPE_FCMP
34412 || GET_CODE (PATTERN (insn)) == COMPARE);
34415 /* Return true if a dispatch violation encountered. */
34418 dispatch_violation (void)
34420 if (dispatch_window_list->next)
34421 return dispatch_window_list->next->violation;
34422 return dispatch_window_list->violation;
34425 /* Return true if insn is a branch instruction. */
34428 is_branch (rtx insn)
34430 return (CALL_P (insn) || JUMP_P (insn));
34433 /* Return true if insn is a prefetch instruction. */
34436 is_prefetch (rtx insn)
34438 return NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == PREFETCH;
34441 /* This function initializes a dispatch window and the list container holding a
34442 pointer to the window. */
34445 init_window (int window_num)
34448 dispatch_windows *new_list;
34450 if (window_num == 0)
34451 new_list = dispatch_window_list;
34453 new_list = dispatch_window_list1;
34455 new_list->num_insn = 0;
34456 new_list->num_uops = 0;
34457 new_list->window_size = 0;
34458 new_list->next = NULL;
34459 new_list->prev = NULL;
34460 new_list->window_num = window_num;
34461 new_list->num_imm = 0;
34462 new_list->num_imm_32 = 0;
34463 new_list->num_imm_64 = 0;
34464 new_list->imm_size = 0;
34465 new_list->num_loads = 0;
34466 new_list->num_stores = 0;
34467 new_list->violation = false;
34469 for (i = 0; i < MAX_INSN; i++)
34471 new_list->window[i].insn = NULL;
34472 new_list->window[i].group = disp_no_group;
34473 new_list->window[i].path = no_path;
34474 new_list->window[i].byte_len = 0;
34475 new_list->window[i].imm_bytes = 0;
34480 /* This function allocates and initializes a dispatch window and the
34481 list container holding a pointer to the window. */
34483 static dispatch_windows *
34484 allocate_window (void)
34486 dispatch_windows *new_list = XNEW (struct dispatch_windows_s);
34487 new_list->window = XNEWVEC (struct sched_insn_info_s, MAX_INSN + 1);
34492 /* This routine initializes the dispatch scheduling information. It
34493 initiates building dispatch scheduler tables and constructs the
34494 first dispatch window. */
34497 init_dispatch_sched (void)
34499 /* Allocate a dispatch list and a window. */
34500 dispatch_window_list = allocate_window ();
34501 dispatch_window_list1 = allocate_window ();
34506 /* This function returns true if a branch is detected. End of a basic block
34507 does not have to be a branch, but here we assume only branches end a
34511 is_end_basic_block (enum dispatch_group group)
34513 return group == disp_branch;
34516 /* This function is called when the end of a window processing is reached. */
34519 process_end_window (void)
34521 gcc_assert (dispatch_window_list->num_insn <= MAX_INSN);
34522 if (dispatch_window_list->next)
34524 gcc_assert (dispatch_window_list1->num_insn <= MAX_INSN);
34525 gcc_assert (dispatch_window_list->window_size
34526 + dispatch_window_list1->window_size <= 48);
34532 /* Allocates a new dispatch window and adds it to WINDOW_LIST.
34533 WINDOW_NUM is either 0 or 1. A maximum of two windows are generated
34534 for 48 bytes of instructions. Note that these windows are not dispatch
34535 windows that their sizes are DISPATCH_WINDOW_SIZE. */
34537 static dispatch_windows *
34538 allocate_next_window (int window_num)
34540 if (window_num == 0)
34542 if (dispatch_window_list->next)
34545 return dispatch_window_list;
34548 dispatch_window_list->next = dispatch_window_list1;
34549 dispatch_window_list1->prev = dispatch_window_list;
34551 return dispatch_window_list1;
34554 /* Increment the number of immediate operands of an instruction. */
34557 find_constant_1 (rtx *in_rtx, imm_info *imm_values)
34562 switch ( GET_CODE (*in_rtx))
34567 (imm_values->imm)++;
34568 if (x86_64_immediate_operand (*in_rtx, SImode))
34569 (imm_values->imm32)++;
34571 (imm_values->imm64)++;
34575 (imm_values->imm)++;
34576 (imm_values->imm64)++;
34580 if (LABEL_KIND (*in_rtx) == LABEL_NORMAL)
34582 (imm_values->imm)++;
34583 (imm_values->imm32)++;
34594 /* Compute number of immediate operands of an instruction. */
34597 find_constant (rtx in_rtx, imm_info *imm_values)
34599 for_each_rtx (INSN_P (in_rtx) ? &PATTERN (in_rtx) : &in_rtx,
34600 (rtx_function) find_constant_1, (void *) imm_values);
34603 /* Return total size of immediate operands of an instruction along with number
34604 of corresponding immediate-operands. It initializes its parameters to zero
34605 befor calling FIND_CONSTANT.
34606 INSN is the input instruction. IMM is the total of immediates.
34607 IMM32 is the number of 32 bit immediates. IMM64 is the number of 64
34611 get_num_immediates (rtx insn, int *imm, int *imm32, int *imm64)
34613 imm_info imm_values = {0, 0, 0};
34615 find_constant (insn, &imm_values);
34616 *imm = imm_values.imm;
34617 *imm32 = imm_values.imm32;
34618 *imm64 = imm_values.imm64;
34619 return imm_values.imm32 * 4 + imm_values.imm64 * 8;
34622 /* This function indicates if an operand of an instruction is an
34626 has_immediate (rtx insn)
34628 int num_imm_operand;
34629 int num_imm32_operand;
34630 int num_imm64_operand;
34633 return get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34634 &num_imm64_operand);
34638 /* Return single or double path for instructions. */
34640 static enum insn_path
34641 get_insn_path (rtx insn)
34643 enum attr_amdfam10_decode path = get_attr_amdfam10_decode (insn);
34645 if ((int)path == 0)
34646 return path_single;
34648 if ((int)path == 1)
34649 return path_double;
34654 /* Return insn dispatch group. */
34656 static enum dispatch_group
34657 get_insn_group (rtx insn)
34659 enum dispatch_group group = get_mem_group (insn);
34663 if (is_branch (insn))
34664 return disp_branch;
34669 if (has_immediate (insn))
34672 if (is_prefetch (insn))
34673 return disp_prefetch;
34675 return disp_no_group;
34678 /* Count number of GROUP restricted instructions in a dispatch
34679 window WINDOW_LIST. */
34682 count_num_restricted (rtx insn, dispatch_windows *window_list)
34684 enum dispatch_group group = get_insn_group (insn);
34686 int num_imm_operand;
34687 int num_imm32_operand;
34688 int num_imm64_operand;
34690 if (group == disp_no_group)
34693 if (group == disp_imm)
34695 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34696 &num_imm64_operand);
34697 if (window_list->imm_size + imm_size > MAX_IMM_SIZE
34698 || num_imm_operand + window_list->num_imm > MAX_IMM
34699 || (num_imm32_operand > 0
34700 && (window_list->num_imm_32 + num_imm32_operand > MAX_IMM_32
34701 || window_list->num_imm_64 * 2 + num_imm32_operand > MAX_IMM_32))
34702 || (num_imm64_operand > 0
34703 && (window_list->num_imm_64 + num_imm64_operand > MAX_IMM_64
34704 || window_list->num_imm_32 + num_imm64_operand * 2 > MAX_IMM_32))
34705 || (window_list->imm_size + imm_size == MAX_IMM_SIZE
34706 && num_imm64_operand > 0
34707 && ((window_list->num_imm_64 > 0
34708 && window_list->num_insn >= 2)
34709 || window_list->num_insn >= 3)))
34715 if ((group == disp_load_store
34716 && (window_list->num_loads >= MAX_LOAD
34717 || window_list->num_stores >= MAX_STORE))
34718 || ((group == disp_load
34719 || group == disp_prefetch)
34720 && window_list->num_loads >= MAX_LOAD)
34721 || (group == disp_store
34722 && window_list->num_stores >= MAX_STORE))
34728 /* This function returns true if insn satisfies dispatch rules on the
34729 last window scheduled. */
34732 fits_dispatch_window (rtx insn)
34734 dispatch_windows *window_list = dispatch_window_list;
34735 dispatch_windows *window_list_next = dispatch_window_list->next;
34736 unsigned int num_restrict;
34737 enum dispatch_group group = get_insn_group (insn);
34738 enum insn_path path = get_insn_path (insn);
34741 /* Make disp_cmp and disp_jcc get scheduled at the latest. These
34742 instructions should be given the lowest priority in the
34743 scheduling process in Haifa scheduler to make sure they will be
34744 scheduled in the same dispatch window as the refrence to them. */
34745 if (group == disp_jcc || group == disp_cmp)
34748 /* Check nonrestricted. */
34749 if (group == disp_no_group || group == disp_branch)
34752 /* Get last dispatch window. */
34753 if (window_list_next)
34754 window_list = window_list_next;
34756 if (window_list->window_num == 1)
34758 sum = window_list->prev->window_size + window_list->window_size;
34761 || (min_insn_size (insn) + sum) >= 48)
34762 /* Window 1 is full. Go for next window. */
34766 num_restrict = count_num_restricted (insn, window_list);
34768 if (num_restrict > num_allowable_groups[group])
34771 /* See if it fits in the first window. */
34772 if (window_list->window_num == 0)
34774 /* The first widow should have only single and double path
34776 if (path == path_double
34777 && (window_list->num_uops + 2) > MAX_INSN)
34779 else if (path != path_single)
34785 /* Add an instruction INSN with NUM_UOPS micro-operations to the
34786 dispatch window WINDOW_LIST. */
34789 add_insn_window (rtx insn, dispatch_windows *window_list, int num_uops)
34791 int byte_len = min_insn_size (insn);
34792 int num_insn = window_list->num_insn;
34794 sched_insn_info *window = window_list->window;
34795 enum dispatch_group group = get_insn_group (insn);
34796 enum insn_path path = get_insn_path (insn);
34797 int num_imm_operand;
34798 int num_imm32_operand;
34799 int num_imm64_operand;
34801 if (!window_list->violation && group != disp_cmp
34802 && !fits_dispatch_window (insn))
34803 window_list->violation = true;
34805 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34806 &num_imm64_operand);
34808 /* Initialize window with new instruction. */
34809 window[num_insn].insn = insn;
34810 window[num_insn].byte_len = byte_len;
34811 window[num_insn].group = group;
34812 window[num_insn].path = path;
34813 window[num_insn].imm_bytes = imm_size;
34815 window_list->window_size += byte_len;
34816 window_list->num_insn = num_insn + 1;
34817 window_list->num_uops = window_list->num_uops + num_uops;
34818 window_list->imm_size += imm_size;
34819 window_list->num_imm += num_imm_operand;
34820 window_list->num_imm_32 += num_imm32_operand;
34821 window_list->num_imm_64 += num_imm64_operand;
34823 if (group == disp_store)
34824 window_list->num_stores += 1;
34825 else if (group == disp_load
34826 || group == disp_prefetch)
34827 window_list->num_loads += 1;
34828 else if (group == disp_load_store)
34830 window_list->num_stores += 1;
34831 window_list->num_loads += 1;
34835 /* Adds a scheduled instruction, INSN, to the current dispatch window.
34836 If the total bytes of instructions or the number of instructions in
34837 the window exceed allowable, it allocates a new window. */
34840 add_to_dispatch_window (rtx insn)
34843 dispatch_windows *window_list;
34844 dispatch_windows *next_list;
34845 dispatch_windows *window0_list;
34846 enum insn_path path;
34847 enum dispatch_group insn_group;
34855 if (INSN_CODE (insn) < 0)
34858 byte_len = min_insn_size (insn);
34859 window_list = dispatch_window_list;
34860 next_list = window_list->next;
34861 path = get_insn_path (insn);
34862 insn_group = get_insn_group (insn);
34864 /* Get the last dispatch window. */
34866 window_list = dispatch_window_list->next;
34868 if (path == path_single)
34870 else if (path == path_double)
34873 insn_num_uops = (int) path;
34875 /* If current window is full, get a new window.
34876 Window number zero is full, if MAX_INSN uops are scheduled in it.
34877 Window number one is full, if window zero's bytes plus window
34878 one's bytes is 32, or if the bytes of the new instruction added
34879 to the total makes it greater than 48, or it has already MAX_INSN
34880 instructions in it. */
34881 num_insn = window_list->num_insn;
34882 num_uops = window_list->num_uops;
34883 window_num = window_list->window_num;
34884 insn_fits = fits_dispatch_window (insn);
34886 if (num_insn >= MAX_INSN
34887 || num_uops + insn_num_uops > MAX_INSN
34890 window_num = ~window_num & 1;
34891 window_list = allocate_next_window (window_num);
34894 if (window_num == 0)
34896 add_insn_window (insn, window_list, insn_num_uops);
34897 if (window_list->num_insn >= MAX_INSN
34898 && insn_group == disp_branch)
34900 process_end_window ();
34904 else if (window_num == 1)
34906 window0_list = window_list->prev;
34907 sum = window0_list->window_size + window_list->window_size;
34909 || (byte_len + sum) >= 48)
34911 process_end_window ();
34912 window_list = dispatch_window_list;
34915 add_insn_window (insn, window_list, insn_num_uops);
34918 gcc_unreachable ();
34920 if (is_end_basic_block (insn_group))
34922 /* End of basic block is reached do end-basic-block process. */
34923 process_end_window ();
34928 /* Print the dispatch window, WINDOW_NUM, to FILE. */
34930 DEBUG_FUNCTION static void
34931 debug_dispatch_window_file (FILE *file, int window_num)
34933 dispatch_windows *list;
34936 if (window_num == 0)
34937 list = dispatch_window_list;
34939 list = dispatch_window_list1;
34941 fprintf (file, "Window #%d:\n", list->window_num);
34942 fprintf (file, " num_insn = %d, num_uops = %d, window_size = %d\n",
34943 list->num_insn, list->num_uops, list->window_size);
34944 fprintf (file, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
34945 list->num_imm, list->num_imm_32, list->num_imm_64, list->imm_size);
34947 fprintf (file, " num_loads = %d, num_stores = %d\n", list->num_loads,
34949 fprintf (file, " insn info:\n");
34951 for (i = 0; i < MAX_INSN; i++)
34953 if (!list->window[i].insn)
34955 fprintf (file, " group[%d] = %s, insn[%d] = %p, path[%d] = %d byte_len[%d] = %d, imm_bytes[%d] = %d\n",
34956 i, group_name[list->window[i].group],
34957 i, (void *)list->window[i].insn,
34958 i, list->window[i].path,
34959 i, list->window[i].byte_len,
34960 i, list->window[i].imm_bytes);
34964 /* Print to stdout a dispatch window. */
34966 DEBUG_FUNCTION void
34967 debug_dispatch_window (int window_num)
34969 debug_dispatch_window_file (stdout, window_num);
34972 /* Print INSN dispatch information to FILE. */
34974 DEBUG_FUNCTION static void
34975 debug_insn_dispatch_info_file (FILE *file, rtx insn)
34978 enum insn_path path;
34979 enum dispatch_group group;
34981 int num_imm_operand;
34982 int num_imm32_operand;
34983 int num_imm64_operand;
34985 if (INSN_CODE (insn) < 0)
34988 byte_len = min_insn_size (insn);
34989 path = get_insn_path (insn);
34990 group = get_insn_group (insn);
34991 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34992 &num_imm64_operand);
34994 fprintf (file, " insn info:\n");
34995 fprintf (file, " group = %s, path = %d, byte_len = %d\n",
34996 group_name[group], path, byte_len);
34997 fprintf (file, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
34998 num_imm_operand, num_imm32_operand, num_imm64_operand, imm_size);
35001 /* Print to STDERR the status of the ready list with respect to
35002 dispatch windows. */
35004 DEBUG_FUNCTION void
35005 debug_ready_dispatch (void)
35008 int no_ready = number_in_ready ();
35010 fprintf (stdout, "Number of ready: %d\n", no_ready);
35012 for (i = 0; i < no_ready; i++)
35013 debug_insn_dispatch_info_file (stdout, get_ready_element (i));
35016 /* This routine is the driver of the dispatch scheduler. */
35019 do_dispatch (rtx insn, int mode)
35021 if (mode == DISPATCH_INIT)
35022 init_dispatch_sched ();
35023 else if (mode == ADD_TO_DISPATCH_WINDOW)
35024 add_to_dispatch_window (insn);
35027 /* Return TRUE if Dispatch Scheduling is supported. */
35030 has_dispatch (rtx insn, int action)
35032 if (ix86_tune == PROCESSOR_BDVER1 && flag_dispatch_scheduler)
35038 case IS_DISPATCH_ON:
35043 return is_cmp (insn);
35045 case DISPATCH_VIOLATION:
35046 return dispatch_violation ();
35048 case FITS_DISPATCH_WINDOW:
35049 return fits_dispatch_window (insn);
35055 /* ??? No autovectorization into MMX or 3DNOW until we can reliably
35056 place emms and femms instructions. */
35058 static enum machine_mode
35059 ix86_preferred_simd_mode (enum machine_mode mode)
35061 /* Disable double precision vectorizer if needed. */
35062 if (mode == DFmode && !TARGET_VECTORIZE_DOUBLE)
35065 if (!TARGET_AVX && !TARGET_SSE)
35071 return (TARGET_AVX && !flag_prefer_avx128) ? V8SFmode : V4SFmode;
35073 return (TARGET_AVX && !flag_prefer_avx128) ? V4DFmode : V2DFmode;
35089 /* If AVX is enabled then try vectorizing with both 256bit and 128bit
35092 static unsigned int
35093 ix86_autovectorize_vector_sizes (void)
35095 return TARGET_AVX ? 32 | 16 : 0;
35098 /* Initialize the GCC target structure. */
35099 #undef TARGET_RETURN_IN_MEMORY
35100 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
35102 #undef TARGET_LEGITIMIZE_ADDRESS
35103 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
35105 #undef TARGET_ATTRIBUTE_TABLE
35106 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
35107 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
35108 # undef TARGET_MERGE_DECL_ATTRIBUTES
35109 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
35112 #undef TARGET_COMP_TYPE_ATTRIBUTES
35113 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
35115 #undef TARGET_INIT_BUILTINS
35116 #define TARGET_INIT_BUILTINS ix86_init_builtins
35117 #undef TARGET_BUILTIN_DECL
35118 #define TARGET_BUILTIN_DECL ix86_builtin_decl
35119 #undef TARGET_EXPAND_BUILTIN
35120 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
35122 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
35123 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
35124 ix86_builtin_vectorized_function
35126 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
35127 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
35129 #undef TARGET_BUILTIN_RECIPROCAL
35130 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
35132 #undef TARGET_ASM_FUNCTION_EPILOGUE
35133 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
35135 #undef TARGET_ENCODE_SECTION_INFO
35136 #ifndef SUBTARGET_ENCODE_SECTION_INFO
35137 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
35139 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
35142 #undef TARGET_ASM_OPEN_PAREN
35143 #define TARGET_ASM_OPEN_PAREN ""
35144 #undef TARGET_ASM_CLOSE_PAREN
35145 #define TARGET_ASM_CLOSE_PAREN ""
35147 #undef TARGET_ASM_BYTE_OP
35148 #define TARGET_ASM_BYTE_OP ASM_BYTE
35150 #undef TARGET_ASM_ALIGNED_HI_OP
35151 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
35152 #undef TARGET_ASM_ALIGNED_SI_OP
35153 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
35155 #undef TARGET_ASM_ALIGNED_DI_OP
35156 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
35159 #undef TARGET_PROFILE_BEFORE_PROLOGUE
35160 #define TARGET_PROFILE_BEFORE_PROLOGUE ix86_profile_before_prologue
35162 #undef TARGET_ASM_UNALIGNED_HI_OP
35163 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
35164 #undef TARGET_ASM_UNALIGNED_SI_OP
35165 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
35166 #undef TARGET_ASM_UNALIGNED_DI_OP
35167 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
35169 #undef TARGET_PRINT_OPERAND
35170 #define TARGET_PRINT_OPERAND ix86_print_operand
35171 #undef TARGET_PRINT_OPERAND_ADDRESS
35172 #define TARGET_PRINT_OPERAND_ADDRESS ix86_print_operand_address
35173 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
35174 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ix86_print_operand_punct_valid_p
35175 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
35176 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA i386_asm_output_addr_const_extra
35178 #undef TARGET_SCHED_INIT_GLOBAL
35179 #define TARGET_SCHED_INIT_GLOBAL ix86_sched_init_global
35180 #undef TARGET_SCHED_ADJUST_COST
35181 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
35182 #undef TARGET_SCHED_ISSUE_RATE
35183 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
35184 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
35185 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
35186 ia32_multipass_dfa_lookahead
35188 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
35189 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
35192 #undef TARGET_HAVE_TLS
35193 #define TARGET_HAVE_TLS true
35195 #undef TARGET_CANNOT_FORCE_CONST_MEM
35196 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
35197 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
35198 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
35200 #undef TARGET_DELEGITIMIZE_ADDRESS
35201 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
35203 #undef TARGET_MS_BITFIELD_LAYOUT_P
35204 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
35207 #undef TARGET_BINDS_LOCAL_P
35208 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
35210 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
35211 #undef TARGET_BINDS_LOCAL_P
35212 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
35215 #undef TARGET_ASM_OUTPUT_MI_THUNK
35216 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
35217 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
35218 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
35220 #undef TARGET_ASM_FILE_START
35221 #define TARGET_ASM_FILE_START x86_file_start
35223 #undef TARGET_DEFAULT_TARGET_FLAGS
35224 #define TARGET_DEFAULT_TARGET_FLAGS \
35226 | TARGET_SUBTARGET_DEFAULT \
35227 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
35229 #undef TARGET_HANDLE_OPTION
35230 #define TARGET_HANDLE_OPTION ix86_handle_option
35232 #undef TARGET_OPTION_OVERRIDE
35233 #define TARGET_OPTION_OVERRIDE ix86_option_override
35234 #undef TARGET_OPTION_OPTIMIZATION_TABLE
35235 #define TARGET_OPTION_OPTIMIZATION_TABLE ix86_option_optimization_table
35236 #undef TARGET_OPTION_INIT_STRUCT
35237 #define TARGET_OPTION_INIT_STRUCT ix86_option_init_struct
35239 #undef TARGET_REGISTER_MOVE_COST
35240 #define TARGET_REGISTER_MOVE_COST ix86_register_move_cost
35241 #undef TARGET_MEMORY_MOVE_COST
35242 #define TARGET_MEMORY_MOVE_COST ix86_memory_move_cost
35243 #undef TARGET_RTX_COSTS
35244 #define TARGET_RTX_COSTS ix86_rtx_costs
35245 #undef TARGET_ADDRESS_COST
35246 #define TARGET_ADDRESS_COST ix86_address_cost
35248 #undef TARGET_FIXED_CONDITION_CODE_REGS
35249 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
35250 #undef TARGET_CC_MODES_COMPATIBLE
35251 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
35253 #undef TARGET_MACHINE_DEPENDENT_REORG
35254 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
35256 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
35257 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
35259 #undef TARGET_BUILD_BUILTIN_VA_LIST
35260 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
35262 #undef TARGET_ENUM_VA_LIST_P
35263 #define TARGET_ENUM_VA_LIST_P ix86_enum_va_list
35265 #undef TARGET_FN_ABI_VA_LIST
35266 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
35268 #undef TARGET_CANONICAL_VA_LIST_TYPE
35269 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
35271 #undef TARGET_EXPAND_BUILTIN_VA_START
35272 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
35274 #undef TARGET_MD_ASM_CLOBBERS
35275 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
35277 #undef TARGET_PROMOTE_PROTOTYPES
35278 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
35279 #undef TARGET_STRUCT_VALUE_RTX
35280 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
35281 #undef TARGET_SETUP_INCOMING_VARARGS
35282 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
35283 #undef TARGET_MUST_PASS_IN_STACK
35284 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
35285 #undef TARGET_FUNCTION_ARG_ADVANCE
35286 #define TARGET_FUNCTION_ARG_ADVANCE ix86_function_arg_advance
35287 #undef TARGET_FUNCTION_ARG
35288 #define TARGET_FUNCTION_ARG ix86_function_arg
35289 #undef TARGET_FUNCTION_ARG_BOUNDARY
35290 #define TARGET_FUNCTION_ARG_BOUNDARY ix86_function_arg_boundary
35291 #undef TARGET_PASS_BY_REFERENCE
35292 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
35293 #undef TARGET_INTERNAL_ARG_POINTER
35294 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
35295 #undef TARGET_UPDATE_STACK_BOUNDARY
35296 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
35297 #undef TARGET_GET_DRAP_RTX
35298 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
35299 #undef TARGET_STRICT_ARGUMENT_NAMING
35300 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
35301 #undef TARGET_STATIC_CHAIN
35302 #define TARGET_STATIC_CHAIN ix86_static_chain
35303 #undef TARGET_TRAMPOLINE_INIT
35304 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
35305 #undef TARGET_RETURN_POPS_ARGS
35306 #define TARGET_RETURN_POPS_ARGS ix86_return_pops_args
35308 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
35309 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
35311 #undef TARGET_SCALAR_MODE_SUPPORTED_P
35312 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
35314 #undef TARGET_VECTOR_MODE_SUPPORTED_P
35315 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
35317 #undef TARGET_C_MODE_FOR_SUFFIX
35318 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
35321 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
35322 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
35325 #ifdef SUBTARGET_INSERT_ATTRIBUTES
35326 #undef TARGET_INSERT_ATTRIBUTES
35327 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
35330 #undef TARGET_MANGLE_TYPE
35331 #define TARGET_MANGLE_TYPE ix86_mangle_type
35333 #undef TARGET_STACK_PROTECT_FAIL
35334 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
35336 #undef TARGET_SUPPORTS_SPLIT_STACK
35337 #define TARGET_SUPPORTS_SPLIT_STACK ix86_supports_split_stack
35339 #undef TARGET_FUNCTION_VALUE
35340 #define TARGET_FUNCTION_VALUE ix86_function_value
35342 #undef TARGET_FUNCTION_VALUE_REGNO_P
35343 #define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
35345 #undef TARGET_SECONDARY_RELOAD
35346 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
35348 #undef TARGET_PREFERRED_RELOAD_CLASS
35349 #define TARGET_PREFERRED_RELOAD_CLASS ix86_preferred_reload_class
35350 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
35351 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS ix86_preferred_output_reload_class
35352 #undef TARGET_CLASS_LIKELY_SPILLED_P
35353 #define TARGET_CLASS_LIKELY_SPILLED_P ix86_class_likely_spilled_p
35355 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
35356 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
35357 ix86_builtin_vectorization_cost
35358 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
35359 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
35360 ix86_vectorize_builtin_vec_perm
35361 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
35362 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
35363 ix86_vectorize_builtin_vec_perm_ok
35364 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
35365 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
35366 ix86_preferred_simd_mode
35367 #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
35368 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
35369 ix86_autovectorize_vector_sizes
35371 #undef TARGET_SET_CURRENT_FUNCTION
35372 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
35374 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
35375 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
35377 #undef TARGET_OPTION_SAVE
35378 #define TARGET_OPTION_SAVE ix86_function_specific_save
35380 #undef TARGET_OPTION_RESTORE
35381 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
35383 #undef TARGET_OPTION_PRINT
35384 #define TARGET_OPTION_PRINT ix86_function_specific_print
35386 #undef TARGET_CAN_INLINE_P
35387 #define TARGET_CAN_INLINE_P ix86_can_inline_p
35389 #undef TARGET_EXPAND_TO_RTL_HOOK
35390 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
35392 #undef TARGET_LEGITIMATE_ADDRESS_P
35393 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
35395 #undef TARGET_FRAME_POINTER_REQUIRED
35396 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
35398 #undef TARGET_CAN_ELIMINATE
35399 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
35401 #undef TARGET_EXTRA_LIVE_ON_ENTRY
35402 #define TARGET_EXTRA_LIVE_ON_ENTRY ix86_live_on_entry
35404 #undef TARGET_ASM_CODE_END
35405 #define TARGET_ASM_CODE_END ix86_code_end
35407 #undef TARGET_CONDITIONAL_REGISTER_USAGE
35408 #define TARGET_CONDITIONAL_REGISTER_USAGE ix86_conditional_register_usage
35411 #undef TARGET_INIT_LIBFUNCS
35412 #define TARGET_INIT_LIBFUNCS darwin_rename_builtins
35415 struct gcc_target targetm = TARGET_INITIALIZER;
35417 #include "gt-i386.h"