1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
34 #include "insn-codes.h"
35 #include "insn-attr.h"
42 #include "diagnostic-core.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "langhooks.h"
54 #include "tm-constrs.h"
58 #include "dwarf2out.h"
59 #include "sched-int.h"
63 enum upper_128bits_state
70 typedef struct block_info_def
72 /* State of the upper 128bits of AVX registers at exit. */
73 enum upper_128bits_state state;
74 /* TRUE if state of the upper 128bits of AVX registers is unchanged
77 /* TRUE if block has been processed. */
79 /* TRUE if block has been scanned. */
81 /* Previous state of the upper 128bits of AVX registers at entry. */
82 enum upper_128bits_state prev;
85 #define BLOCK_INFO(B) ((block_info) (B)->aux)
87 enum call_avx256_state
89 /* Callee returns 256bit AVX register. */
90 callee_return_avx256 = -1,
91 /* Callee returns and passes 256bit AVX register. */
92 callee_return_pass_avx256,
93 /* Callee passes 256bit AVX register. */
95 /* Callee doesn't return nor passe 256bit AVX register, or no
96 256bit AVX register in function return. */
98 /* vzeroupper intrinsic. */
102 /* Check if a 256bit AVX register is referenced in stores. */
105 check_avx256_stores (rtx dest, const_rtx set, void *data)
108 && VALID_AVX256_REG_MODE (GET_MODE (dest)))
109 || (GET_CODE (set) == SET
110 && REG_P (SET_SRC (set))
111 && VALID_AVX256_REG_MODE (GET_MODE (SET_SRC (set)))))
113 enum upper_128bits_state *state
114 = (enum upper_128bits_state *) data;
119 /* Helper function for move_or_delete_vzeroupper_1. Look for vzeroupper
120 in basic block BB. Delete it if upper 128bit AVX registers are
121 unused. If it isn't deleted, move it to just before a jump insn.
123 STATE is state of the upper 128bits of AVX registers at entry. */
126 move_or_delete_vzeroupper_2 (basic_block bb,
127 enum upper_128bits_state state)
130 rtx vzeroupper_insn = NULL_RTX;
135 if (BLOCK_INFO (bb)->unchanged)
138 fprintf (dump_file, " [bb %i] unchanged: upper 128bits: %d\n",
141 BLOCK_INFO (bb)->state = state;
145 if (BLOCK_INFO (bb)->scanned && BLOCK_INFO (bb)->prev == state)
148 fprintf (dump_file, " [bb %i] scanned: upper 128bits: %d\n",
149 bb->index, BLOCK_INFO (bb)->state);
153 BLOCK_INFO (bb)->prev = state;
156 fprintf (dump_file, " [bb %i] entry: upper 128bits: %d\n",
161 /* BB_END changes when it is deleted. */
162 bb_end = BB_END (bb);
164 while (insn != bb_end)
166 insn = NEXT_INSN (insn);
168 if (!NONDEBUG_INSN_P (insn))
171 /* Move vzeroupper before jump/call. */
172 if (JUMP_P (insn) || CALL_P (insn))
174 if (!vzeroupper_insn)
177 if (PREV_INSN (insn) != vzeroupper_insn)
181 fprintf (dump_file, "Move vzeroupper after:\n");
182 print_rtl_single (dump_file, PREV_INSN (insn));
183 fprintf (dump_file, "before:\n");
184 print_rtl_single (dump_file, insn);
186 reorder_insns_nobb (vzeroupper_insn, vzeroupper_insn,
189 vzeroupper_insn = NULL_RTX;
193 pat = PATTERN (insn);
195 /* Check insn for vzeroupper intrinsic. */
196 if (GET_CODE (pat) == UNSPEC_VOLATILE
197 && XINT (pat, 1) == UNSPECV_VZEROUPPER)
201 /* Found vzeroupper intrinsic. */
202 fprintf (dump_file, "Found vzeroupper:\n");
203 print_rtl_single (dump_file, insn);
208 /* Check insn for vzeroall intrinsic. */
209 if (GET_CODE (pat) == PARALLEL
210 && GET_CODE (XVECEXP (pat, 0, 0)) == UNSPEC_VOLATILE
211 && XINT (XVECEXP (pat, 0, 0), 1) == UNSPECV_VZEROALL)
216 /* Delete pending vzeroupper insertion. */
219 delete_insn (vzeroupper_insn);
220 vzeroupper_insn = NULL_RTX;
223 else if (state != used)
225 note_stores (pat, check_avx256_stores, &state);
232 /* Process vzeroupper intrinsic. */
233 avx256 = INTVAL (XVECEXP (pat, 0, 0));
237 /* Since the upper 128bits are cleared, callee must not pass
238 256bit AVX register. We only need to check if callee
239 returns 256bit AVX register. */
240 if (avx256 == callee_return_avx256)
246 /* Remove unnecessary vzeroupper since upper 128bits are
250 fprintf (dump_file, "Delete redundant vzeroupper:\n");
251 print_rtl_single (dump_file, insn);
257 /* Set state to UNUSED if callee doesn't return 256bit AVX
259 if (avx256 != callee_return_pass_avx256)
262 if (avx256 == callee_return_pass_avx256
263 || avx256 == callee_pass_avx256)
265 /* Must remove vzeroupper since callee passes in 256bit
269 fprintf (dump_file, "Delete callee pass vzeroupper:\n");
270 print_rtl_single (dump_file, insn);
276 vzeroupper_insn = insn;
282 BLOCK_INFO (bb)->state = state;
283 BLOCK_INFO (bb)->unchanged = unchanged;
284 BLOCK_INFO (bb)->scanned = true;
287 fprintf (dump_file, " [bb %i] exit: %s: upper 128bits: %d\n",
288 bb->index, unchanged ? "unchanged" : "changed",
292 /* Helper function for move_or_delete_vzeroupper. Process vzeroupper
293 in BLOCK and check its predecessor blocks. Treat UNKNOWN state
294 as USED if UNKNOWN_IS_UNUSED is true. Return TRUE if the exit
298 move_or_delete_vzeroupper_1 (basic_block block, bool unknown_is_unused)
302 enum upper_128bits_state state, old_state, new_state;
306 fprintf (dump_file, " Process [bb %i]: status: %d\n",
307 block->index, BLOCK_INFO (block)->processed);
309 if (BLOCK_INFO (block)->processed)
314 /* Check all predecessor edges of this block. */
315 seen_unknown = false;
316 FOR_EACH_EDGE (e, ei, block->preds)
320 switch (BLOCK_INFO (e->src)->state)
323 if (!unknown_is_unused)
337 old_state = BLOCK_INFO (block)->state;
338 move_or_delete_vzeroupper_2 (block, state);
339 new_state = BLOCK_INFO (block)->state;
341 if (state != unknown || new_state == used)
342 BLOCK_INFO (block)->processed = true;
344 /* Need to rescan if the upper 128bits of AVX registers are changed
346 if (new_state != old_state)
348 if (new_state == used)
349 cfun->machine->rescan_vzeroupper_p = 1;
356 /* Go through the instruction stream looking for vzeroupper. Delete
357 it if upper 128bit AVX registers are unused. If it isn't deleted,
358 move it to just before a jump insn. */
361 move_or_delete_vzeroupper (void)
366 fibheap_t worklist, pending, fibheap_swap;
367 sbitmap visited, in_worklist, in_pending, sbitmap_swap;
372 /* Set up block info for each basic block. */
373 alloc_aux_for_blocks (sizeof (struct block_info_def));
375 /* Process outgoing edges of entry point. */
377 fprintf (dump_file, "Process outgoing edges of entry point\n");
379 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
381 move_or_delete_vzeroupper_2 (e->dest,
382 cfun->machine->caller_pass_avx256_p
384 BLOCK_INFO (e->dest)->processed = true;
387 /* Compute reverse completion order of depth first search of the CFG
388 so that the data-flow runs faster. */
389 rc_order = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
390 bb_order = XNEWVEC (int, last_basic_block);
391 pre_and_rev_post_order_compute (NULL, rc_order, false);
392 for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++)
393 bb_order[rc_order[i]] = i;
396 worklist = fibheap_new ();
397 pending = fibheap_new ();
398 visited = sbitmap_alloc (last_basic_block);
399 in_worklist = sbitmap_alloc (last_basic_block);
400 in_pending = sbitmap_alloc (last_basic_block);
401 sbitmap_zero (in_worklist);
403 /* Don't check outgoing edges of entry point. */
404 sbitmap_ones (in_pending);
406 if (BLOCK_INFO (bb)->processed)
407 RESET_BIT (in_pending, bb->index);
410 move_or_delete_vzeroupper_1 (bb, false);
411 fibheap_insert (pending, bb_order[bb->index], bb);
415 fprintf (dump_file, "Check remaining basic blocks\n");
417 while (!fibheap_empty (pending))
419 fibheap_swap = pending;
421 worklist = fibheap_swap;
422 sbitmap_swap = in_pending;
423 in_pending = in_worklist;
424 in_worklist = sbitmap_swap;
426 sbitmap_zero (visited);
428 cfun->machine->rescan_vzeroupper_p = 0;
430 while (!fibheap_empty (worklist))
432 bb = (basic_block) fibheap_extract_min (worklist);
433 RESET_BIT (in_worklist, bb->index);
434 gcc_assert (!TEST_BIT (visited, bb->index));
435 if (!TEST_BIT (visited, bb->index))
439 SET_BIT (visited, bb->index);
441 if (move_or_delete_vzeroupper_1 (bb, false))
442 FOR_EACH_EDGE (e, ei, bb->succs)
444 if (e->dest == EXIT_BLOCK_PTR
445 || BLOCK_INFO (e->dest)->processed)
448 if (TEST_BIT (visited, e->dest->index))
450 if (!TEST_BIT (in_pending, e->dest->index))
452 /* Send E->DEST to next round. */
453 SET_BIT (in_pending, e->dest->index);
454 fibheap_insert (pending,
455 bb_order[e->dest->index],
459 else if (!TEST_BIT (in_worklist, e->dest->index))
461 /* Add E->DEST to current round. */
462 SET_BIT (in_worklist, e->dest->index);
463 fibheap_insert (worklist, bb_order[e->dest->index],
470 if (!cfun->machine->rescan_vzeroupper_p)
475 fibheap_delete (worklist);
476 fibheap_delete (pending);
477 sbitmap_free (visited);
478 sbitmap_free (in_worklist);
479 sbitmap_free (in_pending);
482 fprintf (dump_file, "Process remaining basic blocks\n");
485 move_or_delete_vzeroupper_1 (bb, true);
487 free_aux_for_blocks ();
490 static rtx legitimize_dllimport_symbol (rtx, bool);
492 #ifndef CHECK_STACK_LIMIT
493 #define CHECK_STACK_LIMIT (-1)
496 /* Return index of given mode in mult and division cost tables. */
497 #define MODE_INDEX(mode) \
498 ((mode) == QImode ? 0 \
499 : (mode) == HImode ? 1 \
500 : (mode) == SImode ? 2 \
501 : (mode) == DImode ? 3 \
504 /* Processor costs (relative to an add) */
505 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
506 #define COSTS_N_BYTES(N) ((N) * 2)
508 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
511 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
512 COSTS_N_BYTES (2), /* cost of an add instruction */
513 COSTS_N_BYTES (3), /* cost of a lea instruction */
514 COSTS_N_BYTES (2), /* variable shift costs */
515 COSTS_N_BYTES (3), /* constant shift costs */
516 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
517 COSTS_N_BYTES (3), /* HI */
518 COSTS_N_BYTES (3), /* SI */
519 COSTS_N_BYTES (3), /* DI */
520 COSTS_N_BYTES (5)}, /* other */
521 0, /* cost of multiply per each bit set */
522 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
523 COSTS_N_BYTES (3), /* HI */
524 COSTS_N_BYTES (3), /* SI */
525 COSTS_N_BYTES (3), /* DI */
526 COSTS_N_BYTES (5)}, /* other */
527 COSTS_N_BYTES (3), /* cost of movsx */
528 COSTS_N_BYTES (3), /* cost of movzx */
529 0, /* "large" insn */
531 2, /* cost for loading QImode using movzbl */
532 {2, 2, 2}, /* cost of loading integer registers
533 in QImode, HImode and SImode.
534 Relative to reg-reg move (2). */
535 {2, 2, 2}, /* cost of storing integer registers */
536 2, /* cost of reg,reg fld/fst */
537 {2, 2, 2}, /* cost of loading fp registers
538 in SFmode, DFmode and XFmode */
539 {2, 2, 2}, /* cost of storing fp registers
540 in SFmode, DFmode and XFmode */
541 3, /* cost of moving MMX register */
542 {3, 3}, /* cost of loading MMX registers
543 in SImode and DImode */
544 {3, 3}, /* cost of storing MMX registers
545 in SImode and DImode */
546 3, /* cost of moving SSE register */
547 {3, 3, 3}, /* cost of loading SSE registers
548 in SImode, DImode and TImode */
549 {3, 3, 3}, /* cost of storing SSE registers
550 in SImode, DImode and TImode */
551 3, /* MMX or SSE register to integer */
552 0, /* size of l1 cache */
553 0, /* size of l2 cache */
554 0, /* size of prefetch block */
555 0, /* number of parallel prefetches */
557 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
558 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
559 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
560 COSTS_N_BYTES (2), /* cost of FABS instruction. */
561 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
562 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
563 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
564 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
565 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
566 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
567 1, /* scalar_stmt_cost. */
568 1, /* scalar load_cost. */
569 1, /* scalar_store_cost. */
570 1, /* vec_stmt_cost. */
571 1, /* vec_to_scalar_cost. */
572 1, /* scalar_to_vec_cost. */
573 1, /* vec_align_load_cost. */
574 1, /* vec_unalign_load_cost. */
575 1, /* vec_store_cost. */
576 1, /* cond_taken_branch_cost. */
577 1, /* cond_not_taken_branch_cost. */
580 /* Processor costs (relative to an add) */
582 struct processor_costs i386_cost = { /* 386 specific costs */
583 COSTS_N_INSNS (1), /* cost of an add instruction */
584 COSTS_N_INSNS (1), /* cost of a lea instruction */
585 COSTS_N_INSNS (3), /* variable shift costs */
586 COSTS_N_INSNS (2), /* constant shift costs */
587 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
588 COSTS_N_INSNS (6), /* HI */
589 COSTS_N_INSNS (6), /* SI */
590 COSTS_N_INSNS (6), /* DI */
591 COSTS_N_INSNS (6)}, /* other */
592 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
593 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
594 COSTS_N_INSNS (23), /* HI */
595 COSTS_N_INSNS (23), /* SI */
596 COSTS_N_INSNS (23), /* DI */
597 COSTS_N_INSNS (23)}, /* other */
598 COSTS_N_INSNS (3), /* cost of movsx */
599 COSTS_N_INSNS (2), /* cost of movzx */
600 15, /* "large" insn */
602 4, /* cost for loading QImode using movzbl */
603 {2, 4, 2}, /* cost of loading integer registers
604 in QImode, HImode and SImode.
605 Relative to reg-reg move (2). */
606 {2, 4, 2}, /* cost of storing integer registers */
607 2, /* cost of reg,reg fld/fst */
608 {8, 8, 8}, /* cost of loading fp registers
609 in SFmode, DFmode and XFmode */
610 {8, 8, 8}, /* cost of storing fp registers
611 in SFmode, DFmode and XFmode */
612 2, /* cost of moving MMX register */
613 {4, 8}, /* cost of loading MMX registers
614 in SImode and DImode */
615 {4, 8}, /* cost of storing MMX registers
616 in SImode and DImode */
617 2, /* cost of moving SSE register */
618 {4, 8, 16}, /* cost of loading SSE registers
619 in SImode, DImode and TImode */
620 {4, 8, 16}, /* cost of storing SSE registers
621 in SImode, DImode and TImode */
622 3, /* MMX or SSE register to integer */
623 0, /* size of l1 cache */
624 0, /* size of l2 cache */
625 0, /* size of prefetch block */
626 0, /* number of parallel prefetches */
628 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
629 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
630 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
631 COSTS_N_INSNS (22), /* cost of FABS instruction. */
632 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
633 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
634 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
635 DUMMY_STRINGOP_ALGS},
636 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
637 DUMMY_STRINGOP_ALGS},
638 1, /* scalar_stmt_cost. */
639 1, /* scalar load_cost. */
640 1, /* scalar_store_cost. */
641 1, /* vec_stmt_cost. */
642 1, /* vec_to_scalar_cost. */
643 1, /* scalar_to_vec_cost. */
644 1, /* vec_align_load_cost. */
645 2, /* vec_unalign_load_cost. */
646 1, /* vec_store_cost. */
647 3, /* cond_taken_branch_cost. */
648 1, /* cond_not_taken_branch_cost. */
652 struct processor_costs i486_cost = { /* 486 specific costs */
653 COSTS_N_INSNS (1), /* cost of an add instruction */
654 COSTS_N_INSNS (1), /* cost of a lea instruction */
655 COSTS_N_INSNS (3), /* variable shift costs */
656 COSTS_N_INSNS (2), /* constant shift costs */
657 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
658 COSTS_N_INSNS (12), /* HI */
659 COSTS_N_INSNS (12), /* SI */
660 COSTS_N_INSNS (12), /* DI */
661 COSTS_N_INSNS (12)}, /* other */
662 1, /* cost of multiply per each bit set */
663 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
664 COSTS_N_INSNS (40), /* HI */
665 COSTS_N_INSNS (40), /* SI */
666 COSTS_N_INSNS (40), /* DI */
667 COSTS_N_INSNS (40)}, /* other */
668 COSTS_N_INSNS (3), /* cost of movsx */
669 COSTS_N_INSNS (2), /* cost of movzx */
670 15, /* "large" insn */
672 4, /* cost for loading QImode using movzbl */
673 {2, 4, 2}, /* cost of loading integer registers
674 in QImode, HImode and SImode.
675 Relative to reg-reg move (2). */
676 {2, 4, 2}, /* cost of storing integer registers */
677 2, /* cost of reg,reg fld/fst */
678 {8, 8, 8}, /* cost of loading fp registers
679 in SFmode, DFmode and XFmode */
680 {8, 8, 8}, /* cost of storing fp registers
681 in SFmode, DFmode and XFmode */
682 2, /* cost of moving MMX register */
683 {4, 8}, /* cost of loading MMX registers
684 in SImode and DImode */
685 {4, 8}, /* cost of storing MMX registers
686 in SImode and DImode */
687 2, /* cost of moving SSE register */
688 {4, 8, 16}, /* cost of loading SSE registers
689 in SImode, DImode and TImode */
690 {4, 8, 16}, /* cost of storing SSE registers
691 in SImode, DImode and TImode */
692 3, /* MMX or SSE register to integer */
693 4, /* size of l1 cache. 486 has 8kB cache
694 shared for code and data, so 4kB is
695 not really precise. */
696 4, /* size of l2 cache */
697 0, /* size of prefetch block */
698 0, /* number of parallel prefetches */
700 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
701 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
702 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
703 COSTS_N_INSNS (3), /* cost of FABS instruction. */
704 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
705 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
706 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
707 DUMMY_STRINGOP_ALGS},
708 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
709 DUMMY_STRINGOP_ALGS},
710 1, /* scalar_stmt_cost. */
711 1, /* scalar load_cost. */
712 1, /* scalar_store_cost. */
713 1, /* vec_stmt_cost. */
714 1, /* vec_to_scalar_cost. */
715 1, /* scalar_to_vec_cost. */
716 1, /* vec_align_load_cost. */
717 2, /* vec_unalign_load_cost. */
718 1, /* vec_store_cost. */
719 3, /* cond_taken_branch_cost. */
720 1, /* cond_not_taken_branch_cost. */
724 struct processor_costs pentium_cost = {
725 COSTS_N_INSNS (1), /* cost of an add instruction */
726 COSTS_N_INSNS (1), /* cost of a lea instruction */
727 COSTS_N_INSNS (4), /* variable shift costs */
728 COSTS_N_INSNS (1), /* constant shift costs */
729 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
730 COSTS_N_INSNS (11), /* HI */
731 COSTS_N_INSNS (11), /* SI */
732 COSTS_N_INSNS (11), /* DI */
733 COSTS_N_INSNS (11)}, /* other */
734 0, /* cost of multiply per each bit set */
735 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
736 COSTS_N_INSNS (25), /* HI */
737 COSTS_N_INSNS (25), /* SI */
738 COSTS_N_INSNS (25), /* DI */
739 COSTS_N_INSNS (25)}, /* other */
740 COSTS_N_INSNS (3), /* cost of movsx */
741 COSTS_N_INSNS (2), /* cost of movzx */
742 8, /* "large" insn */
744 6, /* cost for loading QImode using movzbl */
745 {2, 4, 2}, /* cost of loading integer registers
746 in QImode, HImode and SImode.
747 Relative to reg-reg move (2). */
748 {2, 4, 2}, /* cost of storing integer registers */
749 2, /* cost of reg,reg fld/fst */
750 {2, 2, 6}, /* cost of loading fp registers
751 in SFmode, DFmode and XFmode */
752 {4, 4, 6}, /* cost of storing fp registers
753 in SFmode, DFmode and XFmode */
754 8, /* cost of moving MMX register */
755 {8, 8}, /* cost of loading MMX registers
756 in SImode and DImode */
757 {8, 8}, /* cost of storing MMX registers
758 in SImode and DImode */
759 2, /* cost of moving SSE register */
760 {4, 8, 16}, /* cost of loading SSE registers
761 in SImode, DImode and TImode */
762 {4, 8, 16}, /* cost of storing SSE registers
763 in SImode, DImode and TImode */
764 3, /* MMX or SSE register to integer */
765 8, /* size of l1 cache. */
766 8, /* size of l2 cache */
767 0, /* size of prefetch block */
768 0, /* number of parallel prefetches */
770 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
771 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
772 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
773 COSTS_N_INSNS (1), /* cost of FABS instruction. */
774 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
775 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
776 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
777 DUMMY_STRINGOP_ALGS},
778 {{libcall, {{-1, rep_prefix_4_byte}}},
779 DUMMY_STRINGOP_ALGS},
780 1, /* scalar_stmt_cost. */
781 1, /* scalar load_cost. */
782 1, /* scalar_store_cost. */
783 1, /* vec_stmt_cost. */
784 1, /* vec_to_scalar_cost. */
785 1, /* scalar_to_vec_cost. */
786 1, /* vec_align_load_cost. */
787 2, /* vec_unalign_load_cost. */
788 1, /* vec_store_cost. */
789 3, /* cond_taken_branch_cost. */
790 1, /* cond_not_taken_branch_cost. */
794 struct processor_costs pentiumpro_cost = {
795 COSTS_N_INSNS (1), /* cost of an add instruction */
796 COSTS_N_INSNS (1), /* cost of a lea instruction */
797 COSTS_N_INSNS (1), /* variable shift costs */
798 COSTS_N_INSNS (1), /* constant shift costs */
799 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
800 COSTS_N_INSNS (4), /* HI */
801 COSTS_N_INSNS (4), /* SI */
802 COSTS_N_INSNS (4), /* DI */
803 COSTS_N_INSNS (4)}, /* other */
804 0, /* cost of multiply per each bit set */
805 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
806 COSTS_N_INSNS (17), /* HI */
807 COSTS_N_INSNS (17), /* SI */
808 COSTS_N_INSNS (17), /* DI */
809 COSTS_N_INSNS (17)}, /* other */
810 COSTS_N_INSNS (1), /* cost of movsx */
811 COSTS_N_INSNS (1), /* cost of movzx */
812 8, /* "large" insn */
814 2, /* cost for loading QImode using movzbl */
815 {4, 4, 4}, /* cost of loading integer registers
816 in QImode, HImode and SImode.
817 Relative to reg-reg move (2). */
818 {2, 2, 2}, /* cost of storing integer registers */
819 2, /* cost of reg,reg fld/fst */
820 {2, 2, 6}, /* cost of loading fp registers
821 in SFmode, DFmode and XFmode */
822 {4, 4, 6}, /* cost of storing fp registers
823 in SFmode, DFmode and XFmode */
824 2, /* cost of moving MMX register */
825 {2, 2}, /* cost of loading MMX registers
826 in SImode and DImode */
827 {2, 2}, /* cost of storing MMX registers
828 in SImode and DImode */
829 2, /* cost of moving SSE register */
830 {2, 2, 8}, /* cost of loading SSE registers
831 in SImode, DImode and TImode */
832 {2, 2, 8}, /* cost of storing SSE registers
833 in SImode, DImode and TImode */
834 3, /* MMX or SSE register to integer */
835 8, /* size of l1 cache. */
836 256, /* size of l2 cache */
837 32, /* size of prefetch block */
838 6, /* number of parallel prefetches */
840 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
841 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
842 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
843 COSTS_N_INSNS (2), /* cost of FABS instruction. */
844 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
845 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
846 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes
847 (we ensure the alignment). For small blocks inline loop is still a
848 noticeable win, for bigger blocks either rep movsl or rep movsb is
849 way to go. Rep movsb has apparently more expensive startup time in CPU,
850 but after 4K the difference is down in the noise. */
851 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
852 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
853 DUMMY_STRINGOP_ALGS},
854 {{rep_prefix_4_byte, {{1024, unrolled_loop},
855 {8192, rep_prefix_4_byte}, {-1, libcall}}},
856 DUMMY_STRINGOP_ALGS},
857 1, /* scalar_stmt_cost. */
858 1, /* scalar load_cost. */
859 1, /* scalar_store_cost. */
860 1, /* vec_stmt_cost. */
861 1, /* vec_to_scalar_cost. */
862 1, /* scalar_to_vec_cost. */
863 1, /* vec_align_load_cost. */
864 2, /* vec_unalign_load_cost. */
865 1, /* vec_store_cost. */
866 3, /* cond_taken_branch_cost. */
867 1, /* cond_not_taken_branch_cost. */
871 struct processor_costs geode_cost = {
872 COSTS_N_INSNS (1), /* cost of an add instruction */
873 COSTS_N_INSNS (1), /* cost of a lea instruction */
874 COSTS_N_INSNS (2), /* variable shift costs */
875 COSTS_N_INSNS (1), /* constant shift costs */
876 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
877 COSTS_N_INSNS (4), /* HI */
878 COSTS_N_INSNS (7), /* SI */
879 COSTS_N_INSNS (7), /* DI */
880 COSTS_N_INSNS (7)}, /* other */
881 0, /* cost of multiply per each bit set */
882 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
883 COSTS_N_INSNS (23), /* HI */
884 COSTS_N_INSNS (39), /* SI */
885 COSTS_N_INSNS (39), /* DI */
886 COSTS_N_INSNS (39)}, /* other */
887 COSTS_N_INSNS (1), /* cost of movsx */
888 COSTS_N_INSNS (1), /* cost of movzx */
889 8, /* "large" insn */
891 1, /* cost for loading QImode using movzbl */
892 {1, 1, 1}, /* cost of loading integer registers
893 in QImode, HImode and SImode.
894 Relative to reg-reg move (2). */
895 {1, 1, 1}, /* cost of storing integer registers */
896 1, /* cost of reg,reg fld/fst */
897 {1, 1, 1}, /* cost of loading fp registers
898 in SFmode, DFmode and XFmode */
899 {4, 6, 6}, /* cost of storing fp registers
900 in SFmode, DFmode and XFmode */
902 1, /* cost of moving MMX register */
903 {1, 1}, /* cost of loading MMX registers
904 in SImode and DImode */
905 {1, 1}, /* cost of storing MMX registers
906 in SImode and DImode */
907 1, /* cost of moving SSE register */
908 {1, 1, 1}, /* cost of loading SSE registers
909 in SImode, DImode and TImode */
910 {1, 1, 1}, /* cost of storing SSE registers
911 in SImode, DImode and TImode */
912 1, /* MMX or SSE register to integer */
913 64, /* size of l1 cache. */
914 128, /* size of l2 cache. */
915 32, /* size of prefetch block */
916 1, /* number of parallel prefetches */
918 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
919 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
920 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
921 COSTS_N_INSNS (1), /* cost of FABS instruction. */
922 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
923 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
924 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
925 DUMMY_STRINGOP_ALGS},
926 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
927 DUMMY_STRINGOP_ALGS},
928 1, /* scalar_stmt_cost. */
929 1, /* scalar load_cost. */
930 1, /* scalar_store_cost. */
931 1, /* vec_stmt_cost. */
932 1, /* vec_to_scalar_cost. */
933 1, /* scalar_to_vec_cost. */
934 1, /* vec_align_load_cost. */
935 2, /* vec_unalign_load_cost. */
936 1, /* vec_store_cost. */
937 3, /* cond_taken_branch_cost. */
938 1, /* cond_not_taken_branch_cost. */
942 struct processor_costs k6_cost = {
943 COSTS_N_INSNS (1), /* cost of an add instruction */
944 COSTS_N_INSNS (2), /* cost of a lea instruction */
945 COSTS_N_INSNS (1), /* variable shift costs */
946 COSTS_N_INSNS (1), /* constant shift costs */
947 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
948 COSTS_N_INSNS (3), /* HI */
949 COSTS_N_INSNS (3), /* SI */
950 COSTS_N_INSNS (3), /* DI */
951 COSTS_N_INSNS (3)}, /* other */
952 0, /* cost of multiply per each bit set */
953 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
954 COSTS_N_INSNS (18), /* HI */
955 COSTS_N_INSNS (18), /* SI */
956 COSTS_N_INSNS (18), /* DI */
957 COSTS_N_INSNS (18)}, /* other */
958 COSTS_N_INSNS (2), /* cost of movsx */
959 COSTS_N_INSNS (2), /* cost of movzx */
960 8, /* "large" insn */
962 3, /* cost for loading QImode using movzbl */
963 {4, 5, 4}, /* cost of loading integer registers
964 in QImode, HImode and SImode.
965 Relative to reg-reg move (2). */
966 {2, 3, 2}, /* cost of storing integer registers */
967 4, /* cost of reg,reg fld/fst */
968 {6, 6, 6}, /* cost of loading fp registers
969 in SFmode, DFmode and XFmode */
970 {4, 4, 4}, /* cost of storing fp registers
971 in SFmode, DFmode and XFmode */
972 2, /* cost of moving MMX register */
973 {2, 2}, /* cost of loading MMX registers
974 in SImode and DImode */
975 {2, 2}, /* cost of storing MMX registers
976 in SImode and DImode */
977 2, /* cost of moving SSE register */
978 {2, 2, 8}, /* cost of loading SSE registers
979 in SImode, DImode and TImode */
980 {2, 2, 8}, /* cost of storing SSE registers
981 in SImode, DImode and TImode */
982 6, /* MMX or SSE register to integer */
983 32, /* size of l1 cache. */
984 32, /* size of l2 cache. Some models
985 have integrated l2 cache, but
986 optimizing for k6 is not important
987 enough to worry about that. */
988 32, /* size of prefetch block */
989 1, /* number of parallel prefetches */
991 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
992 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
993 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
994 COSTS_N_INSNS (2), /* cost of FABS instruction. */
995 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
996 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
997 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
998 DUMMY_STRINGOP_ALGS},
999 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
1000 DUMMY_STRINGOP_ALGS},
1001 1, /* scalar_stmt_cost. */
1002 1, /* scalar load_cost. */
1003 1, /* scalar_store_cost. */
1004 1, /* vec_stmt_cost. */
1005 1, /* vec_to_scalar_cost. */
1006 1, /* scalar_to_vec_cost. */
1007 1, /* vec_align_load_cost. */
1008 2, /* vec_unalign_load_cost. */
1009 1, /* vec_store_cost. */
1010 3, /* cond_taken_branch_cost. */
1011 1, /* cond_not_taken_branch_cost. */
1015 struct processor_costs athlon_cost = {
1016 COSTS_N_INSNS (1), /* cost of an add instruction */
1017 COSTS_N_INSNS (2), /* cost of a lea instruction */
1018 COSTS_N_INSNS (1), /* variable shift costs */
1019 COSTS_N_INSNS (1), /* constant shift costs */
1020 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
1021 COSTS_N_INSNS (5), /* HI */
1022 COSTS_N_INSNS (5), /* SI */
1023 COSTS_N_INSNS (5), /* DI */
1024 COSTS_N_INSNS (5)}, /* other */
1025 0, /* cost of multiply per each bit set */
1026 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1027 COSTS_N_INSNS (26), /* HI */
1028 COSTS_N_INSNS (42), /* SI */
1029 COSTS_N_INSNS (74), /* DI */
1030 COSTS_N_INSNS (74)}, /* other */
1031 COSTS_N_INSNS (1), /* cost of movsx */
1032 COSTS_N_INSNS (1), /* cost of movzx */
1033 8, /* "large" insn */
1035 4, /* cost for loading QImode using movzbl */
1036 {3, 4, 3}, /* cost of loading integer registers
1037 in QImode, HImode and SImode.
1038 Relative to reg-reg move (2). */
1039 {3, 4, 3}, /* cost of storing integer registers */
1040 4, /* cost of reg,reg fld/fst */
1041 {4, 4, 12}, /* cost of loading fp registers
1042 in SFmode, DFmode and XFmode */
1043 {6, 6, 8}, /* cost of storing fp registers
1044 in SFmode, DFmode and XFmode */
1045 2, /* cost of moving MMX register */
1046 {4, 4}, /* cost of loading MMX registers
1047 in SImode and DImode */
1048 {4, 4}, /* cost of storing MMX registers
1049 in SImode and DImode */
1050 2, /* cost of moving SSE register */
1051 {4, 4, 6}, /* cost of loading SSE registers
1052 in SImode, DImode and TImode */
1053 {4, 4, 5}, /* cost of storing SSE registers
1054 in SImode, DImode and TImode */
1055 5, /* MMX or SSE register to integer */
1056 64, /* size of l1 cache. */
1057 256, /* size of l2 cache. */
1058 64, /* size of prefetch block */
1059 6, /* number of parallel prefetches */
1060 5, /* Branch cost */
1061 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1062 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1063 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
1064 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1065 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1066 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1067 /* For some reason, Athlon deals better with REP prefix (relative to loops)
1068 compared to K8. Alignment becomes important after 8 bytes for memcpy and
1069 128 bytes for memset. */
1070 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
1071 DUMMY_STRINGOP_ALGS},
1072 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
1073 DUMMY_STRINGOP_ALGS},
1074 1, /* scalar_stmt_cost. */
1075 1, /* scalar load_cost. */
1076 1, /* scalar_store_cost. */
1077 1, /* vec_stmt_cost. */
1078 1, /* vec_to_scalar_cost. */
1079 1, /* scalar_to_vec_cost. */
1080 1, /* vec_align_load_cost. */
1081 2, /* vec_unalign_load_cost. */
1082 1, /* vec_store_cost. */
1083 3, /* cond_taken_branch_cost. */
1084 1, /* cond_not_taken_branch_cost. */
1088 struct processor_costs k8_cost = {
1089 COSTS_N_INSNS (1), /* cost of an add instruction */
1090 COSTS_N_INSNS (2), /* cost of a lea instruction */
1091 COSTS_N_INSNS (1), /* variable shift costs */
1092 COSTS_N_INSNS (1), /* constant shift costs */
1093 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1094 COSTS_N_INSNS (4), /* HI */
1095 COSTS_N_INSNS (3), /* SI */
1096 COSTS_N_INSNS (4), /* DI */
1097 COSTS_N_INSNS (5)}, /* other */
1098 0, /* cost of multiply per each bit set */
1099 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1100 COSTS_N_INSNS (26), /* HI */
1101 COSTS_N_INSNS (42), /* SI */
1102 COSTS_N_INSNS (74), /* DI */
1103 COSTS_N_INSNS (74)}, /* other */
1104 COSTS_N_INSNS (1), /* cost of movsx */
1105 COSTS_N_INSNS (1), /* cost of movzx */
1106 8, /* "large" insn */
1108 4, /* cost for loading QImode using movzbl */
1109 {3, 4, 3}, /* cost of loading integer registers
1110 in QImode, HImode and SImode.
1111 Relative to reg-reg move (2). */
1112 {3, 4, 3}, /* cost of storing integer registers */
1113 4, /* cost of reg,reg fld/fst */
1114 {4, 4, 12}, /* cost of loading fp registers
1115 in SFmode, DFmode and XFmode */
1116 {6, 6, 8}, /* cost of storing fp registers
1117 in SFmode, DFmode and XFmode */
1118 2, /* cost of moving MMX register */
1119 {3, 3}, /* cost of loading MMX registers
1120 in SImode and DImode */
1121 {4, 4}, /* cost of storing MMX registers
1122 in SImode and DImode */
1123 2, /* cost of moving SSE register */
1124 {4, 3, 6}, /* cost of loading SSE registers
1125 in SImode, DImode and TImode */
1126 {4, 4, 5}, /* cost of storing SSE registers
1127 in SImode, DImode and TImode */
1128 5, /* MMX or SSE register to integer */
1129 64, /* size of l1 cache. */
1130 512, /* size of l2 cache. */
1131 64, /* size of prefetch block */
1132 /* New AMD processors never drop prefetches; if they cannot be performed
1133 immediately, they are queued. We set number of simultaneous prefetches
1134 to a large constant to reflect this (it probably is not a good idea not
1135 to limit number of prefetches at all, as their execution also takes some
1137 100, /* number of parallel prefetches */
1138 3, /* Branch cost */
1139 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1140 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1141 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1142 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1143 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1144 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1145 /* K8 has optimized REP instruction for medium sized blocks, but for very
1146 small blocks it is better to use loop. For large blocks, libcall can
1147 do nontemporary accesses and beat inline considerably. */
1148 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1149 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1150 {{libcall, {{8, loop}, {24, unrolled_loop},
1151 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1152 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1153 4, /* scalar_stmt_cost. */
1154 2, /* scalar load_cost. */
1155 2, /* scalar_store_cost. */
1156 5, /* vec_stmt_cost. */
1157 0, /* vec_to_scalar_cost. */
1158 2, /* scalar_to_vec_cost. */
1159 2, /* vec_align_load_cost. */
1160 3, /* vec_unalign_load_cost. */
1161 3, /* vec_store_cost. */
1162 3, /* cond_taken_branch_cost. */
1163 2, /* cond_not_taken_branch_cost. */
1166 struct processor_costs amdfam10_cost = {
1167 COSTS_N_INSNS (1), /* cost of an add instruction */
1168 COSTS_N_INSNS (2), /* cost of a lea instruction */
1169 COSTS_N_INSNS (1), /* variable shift costs */
1170 COSTS_N_INSNS (1), /* constant shift costs */
1171 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1172 COSTS_N_INSNS (4), /* HI */
1173 COSTS_N_INSNS (3), /* SI */
1174 COSTS_N_INSNS (4), /* DI */
1175 COSTS_N_INSNS (5)}, /* other */
1176 0, /* cost of multiply per each bit set */
1177 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1178 COSTS_N_INSNS (35), /* HI */
1179 COSTS_N_INSNS (51), /* SI */
1180 COSTS_N_INSNS (83), /* DI */
1181 COSTS_N_INSNS (83)}, /* other */
1182 COSTS_N_INSNS (1), /* cost of movsx */
1183 COSTS_N_INSNS (1), /* cost of movzx */
1184 8, /* "large" insn */
1186 4, /* cost for loading QImode using movzbl */
1187 {3, 4, 3}, /* cost of loading integer registers
1188 in QImode, HImode and SImode.
1189 Relative to reg-reg move (2). */
1190 {3, 4, 3}, /* cost of storing integer registers */
1191 4, /* cost of reg,reg fld/fst */
1192 {4, 4, 12}, /* cost of loading fp registers
1193 in SFmode, DFmode and XFmode */
1194 {6, 6, 8}, /* cost of storing fp registers
1195 in SFmode, DFmode and XFmode */
1196 2, /* cost of moving MMX register */
1197 {3, 3}, /* cost of loading MMX registers
1198 in SImode and DImode */
1199 {4, 4}, /* cost of storing MMX registers
1200 in SImode and DImode */
1201 2, /* cost of moving SSE register */
1202 {4, 4, 3}, /* cost of loading SSE registers
1203 in SImode, DImode and TImode */
1204 {4, 4, 5}, /* cost of storing SSE registers
1205 in SImode, DImode and TImode */
1206 3, /* MMX or SSE register to integer */
1208 MOVD reg64, xmmreg Double FSTORE 4
1209 MOVD reg32, xmmreg Double FSTORE 4
1211 MOVD reg64, xmmreg Double FADD 3
1213 MOVD reg32, xmmreg Double FADD 3
1215 64, /* size of l1 cache. */
1216 512, /* size of l2 cache. */
1217 64, /* size of prefetch block */
1218 /* New AMD processors never drop prefetches; if they cannot be performed
1219 immediately, they are queued. We set number of simultaneous prefetches
1220 to a large constant to reflect this (it probably is not a good idea not
1221 to limit number of prefetches at all, as their execution also takes some
1223 100, /* number of parallel prefetches */
1224 2, /* Branch cost */
1225 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1226 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1227 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1228 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1229 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1230 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1232 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
1233 very small blocks it is better to use loop. For large blocks, libcall can
1234 do nontemporary accesses and beat inline considerably. */
1235 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1236 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1237 {{libcall, {{8, loop}, {24, unrolled_loop},
1238 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1239 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1240 4, /* scalar_stmt_cost. */
1241 2, /* scalar load_cost. */
1242 2, /* scalar_store_cost. */
1243 6, /* vec_stmt_cost. */
1244 0, /* vec_to_scalar_cost. */
1245 2, /* scalar_to_vec_cost. */
1246 2, /* vec_align_load_cost. */
1247 2, /* vec_unalign_load_cost. */
1248 2, /* vec_store_cost. */
1249 2, /* cond_taken_branch_cost. */
1250 1, /* cond_not_taken_branch_cost. */
1253 struct processor_costs bdver1_cost = {
1254 COSTS_N_INSNS (1), /* cost of an add instruction */
1255 COSTS_N_INSNS (1), /* cost of a lea instruction */
1256 COSTS_N_INSNS (1), /* variable shift costs */
1257 COSTS_N_INSNS (1), /* constant shift costs */
1258 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
1259 COSTS_N_INSNS (4), /* HI */
1260 COSTS_N_INSNS (4), /* SI */
1261 COSTS_N_INSNS (6), /* DI */
1262 COSTS_N_INSNS (6)}, /* other */
1263 0, /* cost of multiply per each bit set */
1264 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1265 COSTS_N_INSNS (35), /* HI */
1266 COSTS_N_INSNS (51), /* SI */
1267 COSTS_N_INSNS (83), /* DI */
1268 COSTS_N_INSNS (83)}, /* other */
1269 COSTS_N_INSNS (1), /* cost of movsx */
1270 COSTS_N_INSNS (1), /* cost of movzx */
1271 8, /* "large" insn */
1273 4, /* cost for loading QImode using movzbl */
1274 {5, 5, 4}, /* cost of loading integer registers
1275 in QImode, HImode and SImode.
1276 Relative to reg-reg move (2). */
1277 {4, 4, 4}, /* cost of storing integer registers */
1278 2, /* cost of reg,reg fld/fst */
1279 {5, 5, 12}, /* cost of loading fp registers
1280 in SFmode, DFmode and XFmode */
1281 {4, 4, 8}, /* cost of storing fp registers
1282 in SFmode, DFmode and XFmode */
1283 2, /* cost of moving MMX register */
1284 {4, 4}, /* cost of loading MMX registers
1285 in SImode and DImode */
1286 {4, 4}, /* cost of storing MMX registers
1287 in SImode and DImode */
1288 2, /* cost of moving SSE register */
1289 {4, 4, 4}, /* cost of loading SSE registers
1290 in SImode, DImode and TImode */
1291 {4, 4, 4}, /* cost of storing SSE registers
1292 in SImode, DImode and TImode */
1293 2, /* MMX or SSE register to integer */
1295 MOVD reg64, xmmreg Double FSTORE 4
1296 MOVD reg32, xmmreg Double FSTORE 4
1298 MOVD reg64, xmmreg Double FADD 3
1300 MOVD reg32, xmmreg Double FADD 3
1302 16, /* size of l1 cache. */
1303 2048, /* size of l2 cache. */
1304 64, /* size of prefetch block */
1305 /* New AMD processors never drop prefetches; if they cannot be performed
1306 immediately, they are queued. We set number of simultaneous prefetches
1307 to a large constant to reflect this (it probably is not a good idea not
1308 to limit number of prefetches at all, as their execution also takes some
1310 100, /* number of parallel prefetches */
1311 2, /* Branch cost */
1312 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1313 COSTS_N_INSNS (6), /* cost of FMUL instruction. */
1314 COSTS_N_INSNS (42), /* cost of FDIV instruction. */
1315 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1316 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1317 COSTS_N_INSNS (52), /* cost of FSQRT instruction. */
1319 /* BDVER1 has optimized REP instruction for medium sized blocks, but for
1320 very small blocks it is better to use loop. For large blocks, libcall
1321 can do nontemporary accesses and beat inline considerably. */
1322 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1323 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1324 {{libcall, {{8, loop}, {24, unrolled_loop},
1325 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1326 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1327 6, /* scalar_stmt_cost. */
1328 4, /* scalar load_cost. */
1329 4, /* scalar_store_cost. */
1330 6, /* vec_stmt_cost. */
1331 0, /* vec_to_scalar_cost. */
1332 2, /* scalar_to_vec_cost. */
1333 4, /* vec_align_load_cost. */
1334 4, /* vec_unalign_load_cost. */
1335 4, /* vec_store_cost. */
1336 2, /* cond_taken_branch_cost. */
1337 1, /* cond_not_taken_branch_cost. */
1340 struct processor_costs btver1_cost = {
1341 COSTS_N_INSNS (1), /* cost of an add instruction */
1342 COSTS_N_INSNS (2), /* cost of a lea instruction */
1343 COSTS_N_INSNS (1), /* variable shift costs */
1344 COSTS_N_INSNS (1), /* constant shift costs */
1345 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1346 COSTS_N_INSNS (4), /* HI */
1347 COSTS_N_INSNS (3), /* SI */
1348 COSTS_N_INSNS (4), /* DI */
1349 COSTS_N_INSNS (5)}, /* other */
1350 0, /* cost of multiply per each bit set */
1351 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1352 COSTS_N_INSNS (35), /* HI */
1353 COSTS_N_INSNS (51), /* SI */
1354 COSTS_N_INSNS (83), /* DI */
1355 COSTS_N_INSNS (83)}, /* other */
1356 COSTS_N_INSNS (1), /* cost of movsx */
1357 COSTS_N_INSNS (1), /* cost of movzx */
1358 8, /* "large" insn */
1360 4, /* cost for loading QImode using movzbl */
1361 {3, 4, 3}, /* cost of loading integer registers
1362 in QImode, HImode and SImode.
1363 Relative to reg-reg move (2). */
1364 {3, 4, 3}, /* cost of storing integer registers */
1365 4, /* cost of reg,reg fld/fst */
1366 {4, 4, 12}, /* cost of loading fp registers
1367 in SFmode, DFmode and XFmode */
1368 {6, 6, 8}, /* cost of storing fp registers
1369 in SFmode, DFmode and XFmode */
1370 2, /* cost of moving MMX register */
1371 {3, 3}, /* cost of loading MMX registers
1372 in SImode and DImode */
1373 {4, 4}, /* cost of storing MMX registers
1374 in SImode and DImode */
1375 2, /* cost of moving SSE register */
1376 {4, 4, 3}, /* cost of loading SSE registers
1377 in SImode, DImode and TImode */
1378 {4, 4, 5}, /* cost of storing SSE registers
1379 in SImode, DImode and TImode */
1380 3, /* MMX or SSE register to integer */
1382 MOVD reg64, xmmreg Double FSTORE 4
1383 MOVD reg32, xmmreg Double FSTORE 4
1385 MOVD reg64, xmmreg Double FADD 3
1387 MOVD reg32, xmmreg Double FADD 3
1389 32, /* size of l1 cache. */
1390 512, /* size of l2 cache. */
1391 64, /* size of prefetch block */
1392 100, /* number of parallel prefetches */
1393 2, /* Branch cost */
1394 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1395 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1396 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1397 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1398 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1399 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1401 /* BTVER1 has optimized REP instruction for medium sized blocks, but for
1402 very small blocks it is better to use loop. For large blocks, libcall can
1403 do nontemporary accesses and beat inline considerably. */
1404 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1405 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1406 {{libcall, {{8, loop}, {24, unrolled_loop},
1407 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1408 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1409 4, /* scalar_stmt_cost. */
1410 2, /* scalar load_cost. */
1411 2, /* scalar_store_cost. */
1412 6, /* vec_stmt_cost. */
1413 0, /* vec_to_scalar_cost. */
1414 2, /* scalar_to_vec_cost. */
1415 2, /* vec_align_load_cost. */
1416 2, /* vec_unalign_load_cost. */
1417 2, /* vec_store_cost. */
1418 2, /* cond_taken_branch_cost. */
1419 1, /* cond_not_taken_branch_cost. */
1423 struct processor_costs pentium4_cost = {
1424 COSTS_N_INSNS (1), /* cost of an add instruction */
1425 COSTS_N_INSNS (3), /* cost of a lea instruction */
1426 COSTS_N_INSNS (4), /* variable shift costs */
1427 COSTS_N_INSNS (4), /* constant shift costs */
1428 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
1429 COSTS_N_INSNS (15), /* HI */
1430 COSTS_N_INSNS (15), /* SI */
1431 COSTS_N_INSNS (15), /* DI */
1432 COSTS_N_INSNS (15)}, /* other */
1433 0, /* cost of multiply per each bit set */
1434 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
1435 COSTS_N_INSNS (56), /* HI */
1436 COSTS_N_INSNS (56), /* SI */
1437 COSTS_N_INSNS (56), /* DI */
1438 COSTS_N_INSNS (56)}, /* other */
1439 COSTS_N_INSNS (1), /* cost of movsx */
1440 COSTS_N_INSNS (1), /* cost of movzx */
1441 16, /* "large" insn */
1443 2, /* cost for loading QImode using movzbl */
1444 {4, 5, 4}, /* cost of loading integer registers
1445 in QImode, HImode and SImode.
1446 Relative to reg-reg move (2). */
1447 {2, 3, 2}, /* cost of storing integer registers */
1448 2, /* cost of reg,reg fld/fst */
1449 {2, 2, 6}, /* cost of loading fp registers
1450 in SFmode, DFmode and XFmode */
1451 {4, 4, 6}, /* cost of storing fp registers
1452 in SFmode, DFmode and XFmode */
1453 2, /* cost of moving MMX register */
1454 {2, 2}, /* cost of loading MMX registers
1455 in SImode and DImode */
1456 {2, 2}, /* cost of storing MMX registers
1457 in SImode and DImode */
1458 12, /* cost of moving SSE register */
1459 {12, 12, 12}, /* cost of loading SSE registers
1460 in SImode, DImode and TImode */
1461 {2, 2, 8}, /* cost of storing SSE registers
1462 in SImode, DImode and TImode */
1463 10, /* MMX or SSE register to integer */
1464 8, /* size of l1 cache. */
1465 256, /* size of l2 cache. */
1466 64, /* size of prefetch block */
1467 6, /* number of parallel prefetches */
1468 2, /* Branch cost */
1469 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
1470 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
1471 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
1472 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1473 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1474 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
1475 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1476 DUMMY_STRINGOP_ALGS},
1477 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1479 DUMMY_STRINGOP_ALGS},
1480 1, /* scalar_stmt_cost. */
1481 1, /* scalar load_cost. */
1482 1, /* scalar_store_cost. */
1483 1, /* vec_stmt_cost. */
1484 1, /* vec_to_scalar_cost. */
1485 1, /* scalar_to_vec_cost. */
1486 1, /* vec_align_load_cost. */
1487 2, /* vec_unalign_load_cost. */
1488 1, /* vec_store_cost. */
1489 3, /* cond_taken_branch_cost. */
1490 1, /* cond_not_taken_branch_cost. */
1494 struct processor_costs nocona_cost = {
1495 COSTS_N_INSNS (1), /* cost of an add instruction */
1496 COSTS_N_INSNS (1), /* cost of a lea instruction */
1497 COSTS_N_INSNS (1), /* variable shift costs */
1498 COSTS_N_INSNS (1), /* constant shift costs */
1499 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
1500 COSTS_N_INSNS (10), /* HI */
1501 COSTS_N_INSNS (10), /* SI */
1502 COSTS_N_INSNS (10), /* DI */
1503 COSTS_N_INSNS (10)}, /* other */
1504 0, /* cost of multiply per each bit set */
1505 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
1506 COSTS_N_INSNS (66), /* HI */
1507 COSTS_N_INSNS (66), /* SI */
1508 COSTS_N_INSNS (66), /* DI */
1509 COSTS_N_INSNS (66)}, /* other */
1510 COSTS_N_INSNS (1), /* cost of movsx */
1511 COSTS_N_INSNS (1), /* cost of movzx */
1512 16, /* "large" insn */
1513 17, /* MOVE_RATIO */
1514 4, /* cost for loading QImode using movzbl */
1515 {4, 4, 4}, /* cost of loading integer registers
1516 in QImode, HImode and SImode.
1517 Relative to reg-reg move (2). */
1518 {4, 4, 4}, /* cost of storing integer registers */
1519 3, /* cost of reg,reg fld/fst */
1520 {12, 12, 12}, /* cost of loading fp registers
1521 in SFmode, DFmode and XFmode */
1522 {4, 4, 4}, /* cost of storing fp registers
1523 in SFmode, DFmode and XFmode */
1524 6, /* cost of moving MMX register */
1525 {12, 12}, /* cost of loading MMX registers
1526 in SImode and DImode */
1527 {12, 12}, /* cost of storing MMX registers
1528 in SImode and DImode */
1529 6, /* cost of moving SSE register */
1530 {12, 12, 12}, /* cost of loading SSE registers
1531 in SImode, DImode and TImode */
1532 {12, 12, 12}, /* cost of storing SSE registers
1533 in SImode, DImode and TImode */
1534 8, /* MMX or SSE register to integer */
1535 8, /* size of l1 cache. */
1536 1024, /* size of l2 cache. */
1537 128, /* size of prefetch block */
1538 8, /* number of parallel prefetches */
1539 1, /* Branch cost */
1540 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1541 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1542 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
1543 COSTS_N_INSNS (3), /* cost of FABS instruction. */
1544 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
1545 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
1546 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1547 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
1548 {100000, unrolled_loop}, {-1, libcall}}}},
1549 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1551 {libcall, {{24, loop}, {64, unrolled_loop},
1552 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1553 1, /* scalar_stmt_cost. */
1554 1, /* scalar load_cost. */
1555 1, /* scalar_store_cost. */
1556 1, /* vec_stmt_cost. */
1557 1, /* vec_to_scalar_cost. */
1558 1, /* scalar_to_vec_cost. */
1559 1, /* vec_align_load_cost. */
1560 2, /* vec_unalign_load_cost. */
1561 1, /* vec_store_cost. */
1562 3, /* cond_taken_branch_cost. */
1563 1, /* cond_not_taken_branch_cost. */
1567 struct processor_costs atom_cost = {
1568 COSTS_N_INSNS (1), /* cost of an add instruction */
1569 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1570 COSTS_N_INSNS (1), /* variable shift costs */
1571 COSTS_N_INSNS (1), /* constant shift costs */
1572 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1573 COSTS_N_INSNS (4), /* HI */
1574 COSTS_N_INSNS (3), /* SI */
1575 COSTS_N_INSNS (4), /* DI */
1576 COSTS_N_INSNS (2)}, /* other */
1577 0, /* cost of multiply per each bit set */
1578 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1579 COSTS_N_INSNS (26), /* HI */
1580 COSTS_N_INSNS (42), /* SI */
1581 COSTS_N_INSNS (74), /* DI */
1582 COSTS_N_INSNS (74)}, /* other */
1583 COSTS_N_INSNS (1), /* cost of movsx */
1584 COSTS_N_INSNS (1), /* cost of movzx */
1585 8, /* "large" insn */
1586 17, /* MOVE_RATIO */
1587 2, /* cost for loading QImode using movzbl */
1588 {4, 4, 4}, /* cost of loading integer registers
1589 in QImode, HImode and SImode.
1590 Relative to reg-reg move (2). */
1591 {4, 4, 4}, /* cost of storing integer registers */
1592 4, /* cost of reg,reg fld/fst */
1593 {12, 12, 12}, /* cost of loading fp registers
1594 in SFmode, DFmode and XFmode */
1595 {6, 6, 8}, /* cost of storing fp registers
1596 in SFmode, DFmode and XFmode */
1597 2, /* cost of moving MMX register */
1598 {8, 8}, /* cost of loading MMX registers
1599 in SImode and DImode */
1600 {8, 8}, /* cost of storing MMX registers
1601 in SImode and DImode */
1602 2, /* cost of moving SSE register */
1603 {8, 8, 8}, /* cost of loading SSE registers
1604 in SImode, DImode and TImode */
1605 {8, 8, 8}, /* cost of storing SSE registers
1606 in SImode, DImode and TImode */
1607 5, /* MMX or SSE register to integer */
1608 32, /* size of l1 cache. */
1609 256, /* size of l2 cache. */
1610 64, /* size of prefetch block */
1611 6, /* number of parallel prefetches */
1612 3, /* Branch cost */
1613 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1614 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1615 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1616 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1617 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1618 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1619 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1620 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1621 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1622 {{libcall, {{8, loop}, {15, unrolled_loop},
1623 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1624 {libcall, {{24, loop}, {32, unrolled_loop},
1625 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1626 1, /* scalar_stmt_cost. */
1627 1, /* scalar load_cost. */
1628 1, /* scalar_store_cost. */
1629 1, /* vec_stmt_cost. */
1630 1, /* vec_to_scalar_cost. */
1631 1, /* scalar_to_vec_cost. */
1632 1, /* vec_align_load_cost. */
1633 2, /* vec_unalign_load_cost. */
1634 1, /* vec_store_cost. */
1635 3, /* cond_taken_branch_cost. */
1636 1, /* cond_not_taken_branch_cost. */
1639 /* Generic64 should produce code tuned for Nocona and K8. */
1641 struct processor_costs generic64_cost = {
1642 COSTS_N_INSNS (1), /* cost of an add instruction */
1643 /* On all chips taken into consideration lea is 2 cycles and more. With
1644 this cost however our current implementation of synth_mult results in
1645 use of unnecessary temporary registers causing regression on several
1646 SPECfp benchmarks. */
1647 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1648 COSTS_N_INSNS (1), /* variable shift costs */
1649 COSTS_N_INSNS (1), /* constant shift costs */
1650 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1651 COSTS_N_INSNS (4), /* HI */
1652 COSTS_N_INSNS (3), /* SI */
1653 COSTS_N_INSNS (4), /* DI */
1654 COSTS_N_INSNS (2)}, /* other */
1655 0, /* cost of multiply per each bit set */
1656 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1657 COSTS_N_INSNS (26), /* HI */
1658 COSTS_N_INSNS (42), /* SI */
1659 COSTS_N_INSNS (74), /* DI */
1660 COSTS_N_INSNS (74)}, /* other */
1661 COSTS_N_INSNS (1), /* cost of movsx */
1662 COSTS_N_INSNS (1), /* cost of movzx */
1663 8, /* "large" insn */
1664 17, /* MOVE_RATIO */
1665 4, /* cost for loading QImode using movzbl */
1666 {4, 4, 4}, /* cost of loading integer registers
1667 in QImode, HImode and SImode.
1668 Relative to reg-reg move (2). */
1669 {4, 4, 4}, /* cost of storing integer registers */
1670 4, /* cost of reg,reg fld/fst */
1671 {12, 12, 12}, /* cost of loading fp registers
1672 in SFmode, DFmode and XFmode */
1673 {6, 6, 8}, /* cost of storing fp registers
1674 in SFmode, DFmode and XFmode */
1675 2, /* cost of moving MMX register */
1676 {8, 8}, /* cost of loading MMX registers
1677 in SImode and DImode */
1678 {8, 8}, /* cost of storing MMX registers
1679 in SImode and DImode */
1680 2, /* cost of moving SSE register */
1681 {8, 8, 8}, /* cost of loading SSE registers
1682 in SImode, DImode and TImode */
1683 {8, 8, 8}, /* cost of storing SSE registers
1684 in SImode, DImode and TImode */
1685 5, /* MMX or SSE register to integer */
1686 32, /* size of l1 cache. */
1687 512, /* size of l2 cache. */
1688 64, /* size of prefetch block */
1689 6, /* number of parallel prefetches */
1690 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this
1691 value is increased to perhaps more appropriate value of 5. */
1692 3, /* Branch cost */
1693 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1694 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1695 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1696 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1697 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1698 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1699 {DUMMY_STRINGOP_ALGS,
1700 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1701 {DUMMY_STRINGOP_ALGS,
1702 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1703 1, /* scalar_stmt_cost. */
1704 1, /* scalar load_cost. */
1705 1, /* scalar_store_cost. */
1706 1, /* vec_stmt_cost. */
1707 1, /* vec_to_scalar_cost. */
1708 1, /* scalar_to_vec_cost. */
1709 1, /* vec_align_load_cost. */
1710 2, /* vec_unalign_load_cost. */
1711 1, /* vec_store_cost. */
1712 3, /* cond_taken_branch_cost. */
1713 1, /* cond_not_taken_branch_cost. */
1716 /* Generic32 should produce code tuned for PPro, Pentium4, Nocona,
1719 struct processor_costs generic32_cost = {
1720 COSTS_N_INSNS (1), /* cost of an add instruction */
1721 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1722 COSTS_N_INSNS (1), /* variable shift costs */
1723 COSTS_N_INSNS (1), /* constant shift costs */
1724 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1725 COSTS_N_INSNS (4), /* HI */
1726 COSTS_N_INSNS (3), /* SI */
1727 COSTS_N_INSNS (4), /* DI */
1728 COSTS_N_INSNS (2)}, /* other */
1729 0, /* cost of multiply per each bit set */
1730 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1731 COSTS_N_INSNS (26), /* HI */
1732 COSTS_N_INSNS (42), /* SI */
1733 COSTS_N_INSNS (74), /* DI */
1734 COSTS_N_INSNS (74)}, /* other */
1735 COSTS_N_INSNS (1), /* cost of movsx */
1736 COSTS_N_INSNS (1), /* cost of movzx */
1737 8, /* "large" insn */
1738 17, /* MOVE_RATIO */
1739 4, /* cost for loading QImode using movzbl */
1740 {4, 4, 4}, /* cost of loading integer registers
1741 in QImode, HImode and SImode.
1742 Relative to reg-reg move (2). */
1743 {4, 4, 4}, /* cost of storing integer registers */
1744 4, /* cost of reg,reg fld/fst */
1745 {12, 12, 12}, /* cost of loading fp registers
1746 in SFmode, DFmode and XFmode */
1747 {6, 6, 8}, /* cost of storing fp registers
1748 in SFmode, DFmode and XFmode */
1749 2, /* cost of moving MMX register */
1750 {8, 8}, /* cost of loading MMX registers
1751 in SImode and DImode */
1752 {8, 8}, /* cost of storing MMX registers
1753 in SImode and DImode */
1754 2, /* cost of moving SSE register */
1755 {8, 8, 8}, /* cost of loading SSE registers
1756 in SImode, DImode and TImode */
1757 {8, 8, 8}, /* cost of storing SSE registers
1758 in SImode, DImode and TImode */
1759 5, /* MMX or SSE register to integer */
1760 32, /* size of l1 cache. */
1761 256, /* size of l2 cache. */
1762 64, /* size of prefetch block */
1763 6, /* number of parallel prefetches */
1764 3, /* Branch cost */
1765 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1766 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1767 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1768 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1769 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1770 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1771 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1772 DUMMY_STRINGOP_ALGS},
1773 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1774 DUMMY_STRINGOP_ALGS},
1775 1, /* scalar_stmt_cost. */
1776 1, /* scalar load_cost. */
1777 1, /* scalar_store_cost. */
1778 1, /* vec_stmt_cost. */
1779 1, /* vec_to_scalar_cost. */
1780 1, /* scalar_to_vec_cost. */
1781 1, /* vec_align_load_cost. */
1782 2, /* vec_unalign_load_cost. */
1783 1, /* vec_store_cost. */
1784 3, /* cond_taken_branch_cost. */
1785 1, /* cond_not_taken_branch_cost. */
1788 const struct processor_costs *ix86_cost = &pentium_cost;
1790 /* Processor feature/optimization bitmasks. */
1791 #define m_386 (1<<PROCESSOR_I386)
1792 #define m_486 (1<<PROCESSOR_I486)
1793 #define m_PENT (1<<PROCESSOR_PENTIUM)
1794 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1795 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1796 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1797 #define m_CORE2_32 (1<<PROCESSOR_CORE2_32)
1798 #define m_CORE2_64 (1<<PROCESSOR_CORE2_64)
1799 #define m_COREI7_32 (1<<PROCESSOR_COREI7_32)
1800 #define m_COREI7_64 (1<<PROCESSOR_COREI7_64)
1801 #define m_COREI7 (m_COREI7_32 | m_COREI7_64)
1802 #define m_CORE2I7_32 (m_CORE2_32 | m_COREI7_32)
1803 #define m_CORE2I7_64 (m_CORE2_64 | m_COREI7_64)
1804 #define m_CORE2I7 (m_CORE2I7_32 | m_CORE2I7_64)
1805 #define m_ATOM (1<<PROCESSOR_ATOM)
1807 #define m_GEODE (1<<PROCESSOR_GEODE)
1808 #define m_K6 (1<<PROCESSOR_K6)
1809 #define m_K6_GEODE (m_K6 | m_GEODE)
1810 #define m_K8 (1<<PROCESSOR_K8)
1811 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1812 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1813 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1814 #define m_BDVER1 (1<<PROCESSOR_BDVER1)
1815 #define m_BTVER1 (1<<PROCESSOR_BTVER1)
1816 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10 | m_BDVER1 | m_BTVER1)
1818 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1819 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1821 /* Generic instruction choice should be common subset of supported CPUs
1822 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1823 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1825 /* Feature tests against the various tunings. */
1826 unsigned char ix86_tune_features[X86_TUNE_LAST];
1828 /* Feature tests against the various tunings used to create ix86_tune_features
1829 based on the processor mask. */
1830 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1831 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1832 negatively, so enabling for Generic64 seems like good code size
1833 tradeoff. We can't enable it for 32bit generic because it does not
1834 work well with PPro base chips. */
1835 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2I7_64 | m_GENERIC64,
1837 /* X86_TUNE_PUSH_MEMORY */
1838 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1839 | m_NOCONA | m_CORE2I7 | m_GENERIC,
1841 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1844 /* X86_TUNE_UNROLL_STRLEN */
1845 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1846 | m_CORE2I7 | m_GENERIC,
1848 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1849 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1850 | m_CORE2I7 | m_GENERIC,
1852 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1853 on simulation result. But after P4 was made, no performance benefit
1854 was observed with branch hints. It also increases the code size.
1855 As a result, icc never generates branch hints. */
1858 /* X86_TUNE_DOUBLE_WITH_ADD */
1861 /* X86_TUNE_USE_SAHF */
1862 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER1 | m_BTVER1
1863 | m_PENT4 | m_NOCONA | m_CORE2I7 | m_GENERIC,
1865 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1866 partial dependencies. */
1867 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1868 | m_CORE2I7 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1870 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1871 register stalls on Generic32 compilation setting as well. However
1872 in current implementation the partial register stalls are not eliminated
1873 very well - they can be introduced via subregs synthesized by combine
1874 and can happen in caller/callee saving sequences. Because this option
1875 pays back little on PPro based chips and is in conflict with partial reg
1876 dependencies used by Athlon/P4 based chips, it is better to leave it off
1877 for generic32 for now. */
1880 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1881 m_CORE2I7 | m_GENERIC,
1883 /* X86_TUNE_USE_HIMODE_FIOP */
1884 m_386 | m_486 | m_K6_GEODE,
1886 /* X86_TUNE_USE_SIMODE_FIOP */
1887 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2I7 | m_GENERIC),
1889 /* X86_TUNE_USE_MOV0 */
1892 /* X86_TUNE_USE_CLTD */
1893 ~(m_PENT | m_ATOM | m_K6 | m_CORE2I7 | m_GENERIC),
1895 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1898 /* X86_TUNE_SPLIT_LONG_MOVES */
1901 /* X86_TUNE_READ_MODIFY_WRITE */
1904 /* X86_TUNE_READ_MODIFY */
1907 /* X86_TUNE_PROMOTE_QIMODE */
1908 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1909 | m_CORE2I7 | m_GENERIC /* | m_PENT4 ? */,
1911 /* X86_TUNE_FAST_PREFIX */
1912 ~(m_PENT | m_486 | m_386),
1914 /* X86_TUNE_SINGLE_STRINGOP */
1915 m_386 | m_PENT4 | m_NOCONA,
1917 /* X86_TUNE_QIMODE_MATH */
1920 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1921 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1922 might be considered for Generic32 if our scheme for avoiding partial
1923 stalls was more effective. */
1926 /* X86_TUNE_PROMOTE_QI_REGS */
1929 /* X86_TUNE_PROMOTE_HI_REGS */
1932 /* X86_TUNE_SINGLE_POP: Enable if single pop insn is preferred
1933 over esp addition. */
1934 m_386 | m_486 | m_PENT | m_PPRO,
1936 /* X86_TUNE_DOUBLE_POP: Enable if double pop insn is preferred
1937 over esp addition. */
1940 /* X86_TUNE_SINGLE_PUSH: Enable if single push insn is preferred
1941 over esp subtraction. */
1942 m_386 | m_486 | m_PENT | m_K6_GEODE,
1944 /* X86_TUNE_DOUBLE_PUSH. Enable if double push insn is preferred
1945 over esp subtraction. */
1946 m_PENT | m_K6_GEODE,
1948 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1949 for DFmode copies */
1950 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2I7
1951 | m_GENERIC | m_GEODE),
1953 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1954 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2I7 | m_GENERIC,
1956 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1957 conflict here in between PPro/Pentium4 based chips that thread 128bit
1958 SSE registers as single units versus K8 based chips that divide SSE
1959 registers to two 64bit halves. This knob promotes all store destinations
1960 to be 128bit to allow register renaming on 128bit SSE units, but usually
1961 results in one extra microop on 64bit SSE units. Experimental results
1962 shows that disabling this option on P4 brings over 20% SPECfp regression,
1963 while enabling it on K8 brings roughly 2.4% regression that can be partly
1964 masked by careful scheduling of moves. */
1965 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2I7 | m_GENERIC
1966 | m_AMDFAM10 | m_BDVER1,
1968 /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL */
1969 m_AMDFAM10 | m_BDVER1 | m_BTVER1 | m_COREI7,
1971 /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL */
1972 m_BDVER1 | m_COREI7,
1974 /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL */
1977 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1978 are resolved on SSE register parts instead of whole registers, so we may
1979 maintain just lower part of scalar values in proper format leaving the
1980 upper part undefined. */
1983 /* X86_TUNE_SSE_TYPELESS_STORES */
1986 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1987 m_PPRO | m_PENT4 | m_NOCONA,
1989 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1990 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2I7 | m_GENERIC,
1992 /* X86_TUNE_PROLOGUE_USING_MOVE */
1993 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2I7 | m_GENERIC,
1995 /* X86_TUNE_EPILOGUE_USING_MOVE */
1996 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2I7 | m_GENERIC,
1998 /* X86_TUNE_SHIFT1 */
2001 /* X86_TUNE_USE_FFREEP */
2004 /* X86_TUNE_INTER_UNIT_MOVES */
2005 ~(m_AMD_MULTIPLE | m_GENERIC),
2007 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
2008 ~(m_AMDFAM10 | m_BDVER1),
2010 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
2011 than 4 branch instructions in the 16 byte window. */
2012 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2I7
2015 /* X86_TUNE_SCHEDULE */
2016 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2I7
2019 /* X86_TUNE_USE_BT */
2020 m_AMD_MULTIPLE | m_ATOM | m_CORE2I7 | m_GENERIC,
2022 /* X86_TUNE_USE_INCDEC */
2023 ~(m_PENT4 | m_NOCONA | m_CORE2I7 | m_GENERIC | m_ATOM),
2025 /* X86_TUNE_PAD_RETURNS */
2026 m_AMD_MULTIPLE | m_CORE2I7 | m_GENERIC,
2028 /* X86_TUNE_PAD_SHORT_FUNCTION: Pad short funtion. */
2031 /* X86_TUNE_EXT_80387_CONSTANTS */
2032 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
2033 | m_CORE2I7 | m_GENERIC,
2035 /* X86_TUNE_SHORTEN_X87_SSE */
2038 /* X86_TUNE_AVOID_VECTOR_DECODE */
2039 m_K8 | m_CORE2I7_64 | m_GENERIC64,
2041 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
2042 and SImode multiply, but 386 and 486 do HImode multiply faster. */
2045 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
2046 vector path on AMD machines. */
2047 m_K8 | m_CORE2I7_64 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1 | m_BTVER1,
2049 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
2051 m_K8 | m_CORE2I7_64 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1 | m_BTVER1,
2053 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
2057 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
2058 but one byte longer. */
2061 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
2062 operand that cannot be represented using a modRM byte. The XOR
2063 replacement is long decoded, so this split helps here as well. */
2066 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
2068 m_AMDFAM10 | m_CORE2I7 | m_GENERIC,
2070 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
2071 from integer to FP. */
2074 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
2075 with a subsequent conditional jump instruction into a single
2076 compare-and-branch uop. */
2079 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
2080 will impact LEA instruction selection. */
2083 /* X86_TUNE_VECTORIZE_DOUBLE: Enable double precision vector
2087 /* X86_TUNE_AVX128_OPTIMAL: Enable 128-bit AVX instruction generation for
2088 the auto-vectorizer. */
2092 /* Feature tests against the various architecture variations. */
2093 unsigned char ix86_arch_features[X86_ARCH_LAST];
2095 /* Feature tests against the various architecture variations, used to create
2096 ix86_arch_features based on the processor mask. */
2097 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
2098 /* X86_ARCH_CMOV: Conditional move was added for pentiumpro. */
2099 ~(m_386 | m_486 | m_PENT | m_K6),
2101 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
2104 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
2107 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
2110 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
2114 static const unsigned int x86_accumulate_outgoing_args
2115 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2I7
2118 static const unsigned int x86_arch_always_fancy_math_387
2119 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
2120 | m_NOCONA | m_CORE2I7 | m_GENERIC;
2122 static const unsigned int x86_avx256_split_unaligned_load
2123 = m_COREI7 | m_GENERIC;
2125 static const unsigned int x86_avx256_split_unaligned_store
2126 = m_COREI7 | m_BDVER1 | m_GENERIC;
2128 static enum stringop_alg stringop_alg = no_stringop;
2130 /* In case the average insn count for single function invocation is
2131 lower than this constant, emit fast (but longer) prologue and
2133 #define FAST_PROLOGUE_INSN_COUNT 20
2135 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
2136 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
2137 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
2138 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
2140 /* Array of the smallest class containing reg number REGNO, indexed by
2141 REGNO. Used by REGNO_REG_CLASS in i386.h. */
2143 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
2145 /* ax, dx, cx, bx */
2146 AREG, DREG, CREG, BREG,
2147 /* si, di, bp, sp */
2148 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
2150 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
2151 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
2154 /* flags, fpsr, fpcr, frame */
2155 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
2157 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
2160 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
2163 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
2164 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
2165 /* SSE REX registers */
2166 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
2170 /* The "default" register map used in 32bit mode. */
2172 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
2174 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
2175 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
2176 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2177 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
2178 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
2179 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
2180 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
2183 /* The "default" register map used in 64bit mode. */
2185 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
2187 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
2188 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
2189 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2190 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
2191 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
2192 8,9,10,11,12,13,14,15, /* extended integer registers */
2193 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
2196 /* Define the register numbers to be used in Dwarf debugging information.
2197 The SVR4 reference port C compiler uses the following register numbers
2198 in its Dwarf output code:
2199 0 for %eax (gcc regno = 0)
2200 1 for %ecx (gcc regno = 2)
2201 2 for %edx (gcc regno = 1)
2202 3 for %ebx (gcc regno = 3)
2203 4 for %esp (gcc regno = 7)
2204 5 for %ebp (gcc regno = 6)
2205 6 for %esi (gcc regno = 4)
2206 7 for %edi (gcc regno = 5)
2207 The following three DWARF register numbers are never generated by
2208 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
2209 believes these numbers have these meanings.
2210 8 for %eip (no gcc equivalent)
2211 9 for %eflags (gcc regno = 17)
2212 10 for %trapno (no gcc equivalent)
2213 It is not at all clear how we should number the FP stack registers
2214 for the x86 architecture. If the version of SDB on x86/svr4 were
2215 a bit less brain dead with respect to floating-point then we would
2216 have a precedent to follow with respect to DWARF register numbers
2217 for x86 FP registers, but the SDB on x86/svr4 is so completely
2218 broken with respect to FP registers that it is hardly worth thinking
2219 of it as something to strive for compatibility with.
2220 The version of x86/svr4 SDB I have at the moment does (partially)
2221 seem to believe that DWARF register number 11 is associated with
2222 the x86 register %st(0), but that's about all. Higher DWARF
2223 register numbers don't seem to be associated with anything in
2224 particular, and even for DWARF regno 11, SDB only seems to under-
2225 stand that it should say that a variable lives in %st(0) (when
2226 asked via an `=' command) if we said it was in DWARF regno 11,
2227 but SDB still prints garbage when asked for the value of the
2228 variable in question (via a `/' command).
2229 (Also note that the labels SDB prints for various FP stack regs
2230 when doing an `x' command are all wrong.)
2231 Note that these problems generally don't affect the native SVR4
2232 C compiler because it doesn't allow the use of -O with -g and
2233 because when it is *not* optimizing, it allocates a memory
2234 location for each floating-point variable, and the memory
2235 location is what gets described in the DWARF AT_location
2236 attribute for the variable in question.
2237 Regardless of the severe mental illness of the x86/svr4 SDB, we
2238 do something sensible here and we use the following DWARF
2239 register numbers. Note that these are all stack-top-relative
2241 11 for %st(0) (gcc regno = 8)
2242 12 for %st(1) (gcc regno = 9)
2243 13 for %st(2) (gcc regno = 10)
2244 14 for %st(3) (gcc regno = 11)
2245 15 for %st(4) (gcc regno = 12)
2246 16 for %st(5) (gcc regno = 13)
2247 17 for %st(6) (gcc regno = 14)
2248 18 for %st(7) (gcc regno = 15)
2250 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
2252 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
2253 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
2254 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2255 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
2256 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
2257 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
2258 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
2261 /* Define parameter passing and return registers. */
2263 static int const x86_64_int_parameter_registers[6] =
2265 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
2268 static int const x86_64_ms_abi_int_parameter_registers[4] =
2270 CX_REG, DX_REG, R8_REG, R9_REG
2273 static int const x86_64_int_return_registers[4] =
2275 AX_REG, DX_REG, DI_REG, SI_REG
2278 /* Define the structure for the machine field in struct function. */
2280 struct GTY(()) stack_local_entry {
2281 unsigned short mode;
2284 struct stack_local_entry *next;
2287 /* Structure describing stack frame layout.
2288 Stack grows downward:
2294 saved static chain if ix86_static_chain_on_stack
2296 saved frame pointer if frame_pointer_needed
2297 <- HARD_FRAME_POINTER
2303 <- sse_regs_save_offset
2306 [va_arg registers] |
2310 [padding2] | = to_allocate
2319 int outgoing_arguments_size;
2320 HOST_WIDE_INT frame;
2322 /* The offsets relative to ARG_POINTER. */
2323 HOST_WIDE_INT frame_pointer_offset;
2324 HOST_WIDE_INT hard_frame_pointer_offset;
2325 HOST_WIDE_INT stack_pointer_offset;
2326 HOST_WIDE_INT hfp_save_offset;
2327 HOST_WIDE_INT reg_save_offset;
2328 HOST_WIDE_INT sse_reg_save_offset;
2330 /* When save_regs_using_mov is set, emit prologue using
2331 move instead of push instructions. */
2332 bool save_regs_using_mov;
2335 /* Code model option. */
2336 enum cmodel ix86_cmodel;
2338 enum asm_dialect ix86_asm_dialect = ASM_ATT;
2340 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
2342 /* Which unit we are generating floating point math for. */
2343 enum fpmath_unit ix86_fpmath;
2345 /* Which cpu are we scheduling for. */
2346 enum attr_cpu ix86_schedule;
2348 /* Which cpu are we optimizing for. */
2349 enum processor_type ix86_tune;
2351 /* Which instruction set architecture to use. */
2352 enum processor_type ix86_arch;
2354 /* true if sse prefetch instruction is not NOOP. */
2355 int x86_prefetch_sse;
2357 /* ix86_regparm_string as a number */
2358 static int ix86_regparm;
2360 /* -mstackrealign option */
2361 static const char ix86_force_align_arg_pointer_string[]
2362 = "force_align_arg_pointer";
2364 static rtx (*ix86_gen_leave) (void);
2365 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
2366 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
2367 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
2368 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
2369 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
2370 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
2371 static rtx (*ix86_gen_allocate_stack_worker) (rtx, rtx);
2372 static rtx (*ix86_gen_adjust_stack_and_probe) (rtx, rtx, rtx);
2373 static rtx (*ix86_gen_probe_stack_range) (rtx, rtx, rtx);
2375 /* Preferred alignment for stack boundary in bits. */
2376 unsigned int ix86_preferred_stack_boundary;
2378 /* Alignment for incoming stack boundary in bits specified at
2380 static unsigned int ix86_user_incoming_stack_boundary;
2382 /* Default alignment for incoming stack boundary in bits. */
2383 static unsigned int ix86_default_incoming_stack_boundary;
2385 /* Alignment for incoming stack boundary in bits. */
2386 unsigned int ix86_incoming_stack_boundary;
2388 /* The abi used by target. */
2389 enum calling_abi ix86_abi;
2391 /* Values 1-5: see jump.c */
2392 int ix86_branch_cost;
2394 /* Calling abi specific va_list type nodes. */
2395 static GTY(()) tree sysv_va_list_type_node;
2396 static GTY(()) tree ms_va_list_type_node;
2398 /* Variables which are this size or smaller are put in the data/bss
2399 or ldata/lbss sections. */
2401 int ix86_section_threshold = 65536;
2403 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
2404 char internal_label_prefix[16];
2405 int internal_label_prefix_len;
2407 /* Fence to use after loop using movnt. */
2410 /* Register class used for passing given 64bit part of the argument.
2411 These represent classes as documented by the PS ABI, with the exception
2412 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
2413 use SF or DFmode move instead of DImode to avoid reformatting penalties.
2415 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
2416 whenever possible (upper half does contain padding). */
2417 enum x86_64_reg_class
2420 X86_64_INTEGER_CLASS,
2421 X86_64_INTEGERSI_CLASS,
2428 X86_64_COMPLEX_X87_CLASS,
2432 #define MAX_CLASSES 4
2434 /* Table of constants used by fldpi, fldln2, etc.... */
2435 static REAL_VALUE_TYPE ext_80387_constants_table [5];
2436 static bool ext_80387_constants_init = 0;
2439 static struct machine_function * ix86_init_machine_status (void);
2440 static rtx ix86_function_value (const_tree, const_tree, bool);
2441 static bool ix86_function_value_regno_p (const unsigned int);
2442 static unsigned int ix86_function_arg_boundary (enum machine_mode,
2444 static rtx ix86_static_chain (const_tree, bool);
2445 static int ix86_function_regparm (const_tree, const_tree);
2446 static void ix86_compute_frame_layout (struct ix86_frame *);
2447 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
2449 static void ix86_add_new_builtins (int);
2450 static rtx ix86_expand_vec_perm_builtin (tree);
2451 static tree ix86_canonical_va_list_type (tree);
2452 static void predict_jump (int);
2453 static unsigned int split_stack_prologue_scratch_regno (void);
2454 static bool i386_asm_output_addr_const_extra (FILE *, rtx);
2456 enum ix86_function_specific_strings
2458 IX86_FUNCTION_SPECIFIC_ARCH,
2459 IX86_FUNCTION_SPECIFIC_TUNE,
2460 IX86_FUNCTION_SPECIFIC_FPMATH,
2461 IX86_FUNCTION_SPECIFIC_MAX
2464 static char *ix86_target_string (int, int, const char *, const char *,
2465 const char *, bool);
2466 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
2467 static void ix86_function_specific_save (struct cl_target_option *);
2468 static void ix86_function_specific_restore (struct cl_target_option *);
2469 static void ix86_function_specific_print (FILE *, int,
2470 struct cl_target_option *);
2471 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
2472 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
2473 static bool ix86_can_inline_p (tree, tree);
2474 static void ix86_set_current_function (tree);
2475 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
2477 static enum calling_abi ix86_function_abi (const_tree);
2480 #ifndef SUBTARGET32_DEFAULT_CPU
2481 #define SUBTARGET32_DEFAULT_CPU "i386"
2484 /* The svr4 ABI for the i386 says that records and unions are returned
2486 #ifndef DEFAULT_PCC_STRUCT_RETURN
2487 #define DEFAULT_PCC_STRUCT_RETURN 1
2490 /* Whether -mtune= or -march= were specified */
2491 static int ix86_tune_defaulted;
2492 static int ix86_arch_specified;
2494 /* A mask of ix86_isa_flags that includes bit X if X
2495 was set or cleared on the command line. */
2496 static int ix86_isa_flags_explicit;
2498 /* Define a set of ISAs which are available when a given ISA is
2499 enabled. MMX and SSE ISAs are handled separately. */
2501 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
2502 #define OPTION_MASK_ISA_3DNOW_SET \
2503 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
2505 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
2506 #define OPTION_MASK_ISA_SSE2_SET \
2507 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
2508 #define OPTION_MASK_ISA_SSE3_SET \
2509 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
2510 #define OPTION_MASK_ISA_SSSE3_SET \
2511 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
2512 #define OPTION_MASK_ISA_SSE4_1_SET \
2513 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
2514 #define OPTION_MASK_ISA_SSE4_2_SET \
2515 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
2516 #define OPTION_MASK_ISA_AVX_SET \
2517 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
2518 #define OPTION_MASK_ISA_FMA_SET \
2519 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
2521 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
2523 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
2525 #define OPTION_MASK_ISA_SSE4A_SET \
2526 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
2527 #define OPTION_MASK_ISA_FMA4_SET \
2528 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
2529 | OPTION_MASK_ISA_AVX_SET)
2530 #define OPTION_MASK_ISA_XOP_SET \
2531 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
2532 #define OPTION_MASK_ISA_LWP_SET \
2535 /* AES and PCLMUL need SSE2 because they use xmm registers */
2536 #define OPTION_MASK_ISA_AES_SET \
2537 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
2538 #define OPTION_MASK_ISA_PCLMUL_SET \
2539 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
2541 #define OPTION_MASK_ISA_ABM_SET \
2542 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
2544 #define OPTION_MASK_ISA_BMI_SET OPTION_MASK_ISA_BMI
2545 #define OPTION_MASK_ISA_TBM_SET OPTION_MASK_ISA_TBM
2546 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
2547 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
2548 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
2549 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
2550 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
2552 #define OPTION_MASK_ISA_FSGSBASE_SET OPTION_MASK_ISA_FSGSBASE
2553 #define OPTION_MASK_ISA_RDRND_SET OPTION_MASK_ISA_RDRND
2554 #define OPTION_MASK_ISA_F16C_SET \
2555 (OPTION_MASK_ISA_F16C | OPTION_MASK_ISA_AVX_SET)
2557 /* Define a set of ISAs which aren't available when a given ISA is
2558 disabled. MMX and SSE ISAs are handled separately. */
2560 #define OPTION_MASK_ISA_MMX_UNSET \
2561 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
2562 #define OPTION_MASK_ISA_3DNOW_UNSET \
2563 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
2564 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
2566 #define OPTION_MASK_ISA_SSE_UNSET \
2567 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
2568 #define OPTION_MASK_ISA_SSE2_UNSET \
2569 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
2570 #define OPTION_MASK_ISA_SSE3_UNSET \
2571 (OPTION_MASK_ISA_SSE3 \
2572 | OPTION_MASK_ISA_SSSE3_UNSET \
2573 | OPTION_MASK_ISA_SSE4A_UNSET )
2574 #define OPTION_MASK_ISA_SSSE3_UNSET \
2575 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2576 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2577 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2578 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2579 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2580 #define OPTION_MASK_ISA_AVX_UNSET \
2581 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2582 | OPTION_MASK_ISA_FMA4_UNSET | OPTION_MASK_ISA_F16C_UNSET)
2583 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2585 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2587 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2589 #define OPTION_MASK_ISA_SSE4A_UNSET \
2590 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2592 #define OPTION_MASK_ISA_FMA4_UNSET \
2593 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2594 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2595 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2597 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2598 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2599 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2600 #define OPTION_MASK_ISA_BMI_UNSET OPTION_MASK_ISA_BMI
2601 #define OPTION_MASK_ISA_TBM_UNSET OPTION_MASK_ISA_TBM
2602 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2603 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2604 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2605 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2606 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2608 #define OPTION_MASK_ISA_FSGSBASE_UNSET OPTION_MASK_ISA_FSGSBASE
2609 #define OPTION_MASK_ISA_RDRND_UNSET OPTION_MASK_ISA_RDRND
2610 #define OPTION_MASK_ISA_F16C_UNSET OPTION_MASK_ISA_F16C
2612 /* Vectorization library interface and handlers. */
2613 static tree (*ix86_veclib_handler) (enum built_in_function, tree, tree);
2615 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2616 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2618 /* Processor target table, indexed by processor number */
2621 const struct processor_costs *cost; /* Processor costs */
2622 const int align_loop; /* Default alignments. */
2623 const int align_loop_max_skip;
2624 const int align_jump;
2625 const int align_jump_max_skip;
2626 const int align_func;
2629 static const struct ptt processor_target_table[PROCESSOR_max] =
2631 {&i386_cost, 4, 3, 4, 3, 4},
2632 {&i486_cost, 16, 15, 16, 15, 16},
2633 {&pentium_cost, 16, 7, 16, 7, 16},
2634 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2635 {&geode_cost, 0, 0, 0, 0, 0},
2636 {&k6_cost, 32, 7, 32, 7, 32},
2637 {&athlon_cost, 16, 7, 16, 7, 16},
2638 {&pentium4_cost, 0, 0, 0, 0, 0},
2639 {&k8_cost, 16, 7, 16, 7, 16},
2640 {&nocona_cost, 0, 0, 0, 0, 0},
2641 /* Core 2 32-bit. */
2642 {&generic32_cost, 16, 10, 16, 10, 16},
2643 /* Core 2 64-bit. */
2644 {&generic64_cost, 16, 10, 16, 10, 16},
2645 /* Core i7 32-bit. */
2646 {&generic32_cost, 16, 10, 16, 10, 16},
2647 /* Core i7 64-bit. */
2648 {&generic64_cost, 16, 10, 16, 10, 16},
2649 {&generic32_cost, 16, 7, 16, 7, 16},
2650 {&generic64_cost, 16, 10, 16, 10, 16},
2651 {&amdfam10_cost, 32, 24, 32, 7, 32},
2652 {&bdver1_cost, 32, 24, 32, 7, 32},
2653 {&btver1_cost, 32, 24, 32, 7, 32},
2654 {&atom_cost, 16, 7, 16, 7, 16}
2657 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2686 /* Return true if a red-zone is in use. */
2689 ix86_using_red_zone (void)
2691 return TARGET_RED_ZONE && !TARGET_64BIT_MS_ABI;
2694 /* Implement TARGET_HANDLE_OPTION. */
2697 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
2704 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2705 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2709 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2710 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2717 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2718 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2722 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2723 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2733 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2734 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2738 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2739 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2746 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2747 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2751 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2752 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2759 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2760 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2764 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2765 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2772 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2773 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2777 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2778 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2785 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2786 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2790 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2791 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2798 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2799 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2803 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2804 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2811 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2812 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2816 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2817 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2824 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2825 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2829 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2830 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2835 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2836 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2840 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2841 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2847 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2848 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2852 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2853 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2860 ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2861 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2865 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2866 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2873 ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2874 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2878 ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2879 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2886 ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2887 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2891 ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2892 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2899 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2900 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2904 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2905 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2912 ix86_isa_flags |= OPTION_MASK_ISA_BMI_SET;
2913 ix86_isa_flags_explicit |= OPTION_MASK_ISA_BMI_SET;
2917 ix86_isa_flags &= ~OPTION_MASK_ISA_BMI_UNSET;
2918 ix86_isa_flags_explicit |= OPTION_MASK_ISA_BMI_UNSET;
2925 ix86_isa_flags |= OPTION_MASK_ISA_TBM_SET;
2926 ix86_isa_flags_explicit |= OPTION_MASK_ISA_TBM_SET;
2930 ix86_isa_flags &= ~OPTION_MASK_ISA_TBM_UNSET;
2931 ix86_isa_flags_explicit |= OPTION_MASK_ISA_TBM_UNSET;
2938 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2939 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2943 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2944 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2951 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2952 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2956 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2957 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2964 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2965 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2969 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2970 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2977 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2978 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2982 ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2983 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2990 ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2991 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2995 ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2996 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
3003 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
3004 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
3008 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
3009 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
3016 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
3017 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
3021 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
3022 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
3029 ix86_isa_flags |= OPTION_MASK_ISA_FSGSBASE_SET;
3030 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FSGSBASE_SET;
3034 ix86_isa_flags &= ~OPTION_MASK_ISA_FSGSBASE_UNSET;
3035 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FSGSBASE_UNSET;
3042 ix86_isa_flags |= OPTION_MASK_ISA_RDRND_SET;
3043 ix86_isa_flags_explicit |= OPTION_MASK_ISA_RDRND_SET;
3047 ix86_isa_flags &= ~OPTION_MASK_ISA_RDRND_UNSET;
3048 ix86_isa_flags_explicit |= OPTION_MASK_ISA_RDRND_UNSET;
3055 ix86_isa_flags |= OPTION_MASK_ISA_F16C_SET;
3056 ix86_isa_flags_explicit |= OPTION_MASK_ISA_F16C_SET;
3060 ix86_isa_flags &= ~OPTION_MASK_ISA_F16C_UNSET;
3061 ix86_isa_flags_explicit |= OPTION_MASK_ISA_F16C_UNSET;
3070 /* Return a string that documents the current -m options. The caller is
3071 responsible for freeing the string. */
3074 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
3075 const char *fpmath, bool add_nl_p)
3077 struct ix86_target_opts
3079 const char *option; /* option string */
3080 int mask; /* isa mask options */
3083 /* This table is ordered so that options like -msse4.2 that imply
3084 preceding options while match those first. */
3085 static struct ix86_target_opts isa_opts[] =
3087 { "-m64", OPTION_MASK_ISA_64BIT },
3088 { "-mfma4", OPTION_MASK_ISA_FMA4 },
3089 { "-mfma", OPTION_MASK_ISA_FMA },
3090 { "-mxop", OPTION_MASK_ISA_XOP },
3091 { "-mlwp", OPTION_MASK_ISA_LWP },
3092 { "-msse4a", OPTION_MASK_ISA_SSE4A },
3093 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
3094 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
3095 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
3096 { "-msse3", OPTION_MASK_ISA_SSE3 },
3097 { "-msse2", OPTION_MASK_ISA_SSE2 },
3098 { "-msse", OPTION_MASK_ISA_SSE },
3099 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
3100 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
3101 { "-mmmx", OPTION_MASK_ISA_MMX },
3102 { "-mabm", OPTION_MASK_ISA_ABM },
3103 { "-mbmi", OPTION_MASK_ISA_BMI },
3104 { "-mtbm", OPTION_MASK_ISA_TBM },
3105 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
3106 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
3107 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
3108 { "-maes", OPTION_MASK_ISA_AES },
3109 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
3110 { "-mfsgsbase", OPTION_MASK_ISA_FSGSBASE },
3111 { "-mrdrnd", OPTION_MASK_ISA_RDRND },
3112 { "-mf16c", OPTION_MASK_ISA_F16C },
3116 static struct ix86_target_opts flag_opts[] =
3118 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
3119 { "-m80387", MASK_80387 },
3120 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
3121 { "-malign-double", MASK_ALIGN_DOUBLE },
3122 { "-mcld", MASK_CLD },
3123 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
3124 { "-mieee-fp", MASK_IEEE_FP },
3125 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
3126 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
3127 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
3128 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
3129 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
3130 { "-mno-push-args", MASK_NO_PUSH_ARGS },
3131 { "-mno-red-zone", MASK_NO_RED_ZONE },
3132 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
3133 { "-mrecip", MASK_RECIP },
3134 { "-mrtd", MASK_RTD },
3135 { "-msseregparm", MASK_SSEREGPARM },
3136 { "-mstack-arg-probe", MASK_STACK_PROBE },
3137 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
3138 { "-mvect8-ret-in-mem", MASK_VECT8_RETURNS },
3139 { "-m8bit-idiv", MASK_USE_8BIT_IDIV },
3140 { "-mvzeroupper", MASK_VZEROUPPER },
3141 { "-mavx256-split-unaligned-load", MASK_AVX256_SPLIT_UNALIGNED_LOAD},
3142 { "-mavx256-split-unaligned-store", MASK_AVX256_SPLIT_UNALIGNED_STORE},
3143 { "-mprefer-avx128", MASK_PREFER_AVX128},
3146 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
3149 char target_other[40];
3158 memset (opts, '\0', sizeof (opts));
3160 /* Add -march= option. */
3163 opts[num][0] = "-march=";
3164 opts[num++][1] = arch;
3167 /* Add -mtune= option. */
3170 opts[num][0] = "-mtune=";
3171 opts[num++][1] = tune;
3174 /* Pick out the options in isa options. */
3175 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
3177 if ((isa & isa_opts[i].mask) != 0)
3179 opts[num++][0] = isa_opts[i].option;
3180 isa &= ~ isa_opts[i].mask;
3184 if (isa && add_nl_p)
3186 opts[num++][0] = isa_other;
3187 sprintf (isa_other, "(other isa: %#x)", isa);
3190 /* Add flag options. */
3191 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
3193 if ((flags & flag_opts[i].mask) != 0)
3195 opts[num++][0] = flag_opts[i].option;
3196 flags &= ~ flag_opts[i].mask;
3200 if (flags && add_nl_p)
3202 opts[num++][0] = target_other;
3203 sprintf (target_other, "(other flags: %#x)", flags);
3206 /* Add -fpmath= option. */
3209 opts[num][0] = "-mfpmath=";
3210 opts[num++][1] = fpmath;
3217 gcc_assert (num < ARRAY_SIZE (opts));
3219 /* Size the string. */
3221 sep_len = (add_nl_p) ? 3 : 1;
3222 for (i = 0; i < num; i++)
3225 for (j = 0; j < 2; j++)
3227 len += strlen (opts[i][j]);
3230 /* Build the string. */
3231 ret = ptr = (char *) xmalloc (len);
3234 for (i = 0; i < num; i++)
3238 for (j = 0; j < 2; j++)
3239 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
3246 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
3254 for (j = 0; j < 2; j++)
3257 memcpy (ptr, opts[i][j], len2[j]);
3259 line_len += len2[j];
3264 gcc_assert (ret + len >= ptr);
3269 /* Return TRUE if software prefetching is beneficial for the
3273 software_prefetching_beneficial_p (void)
3277 case PROCESSOR_GEODE:
3279 case PROCESSOR_ATHLON:
3281 case PROCESSOR_AMDFAM10:
3282 case PROCESSOR_BTVER1:
3290 /* Return true, if profiling code should be emitted before
3291 prologue. Otherwise it returns false.
3292 Note: For x86 with "hotfix" it is sorried. */
3294 ix86_profile_before_prologue (void)
3296 return flag_fentry != 0;
3299 /* Function that is callable from the debugger to print the current
3302 ix86_debug_options (void)
3304 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
3305 ix86_arch_string, ix86_tune_string,
3306 ix86_fpmath_string, true);
3310 fprintf (stderr, "%s\n\n", opts);
3314 fputs ("<no options>\n\n", stderr);
3319 /* Override various settings based on options. If MAIN_ARGS_P, the
3320 options are from the command line, otherwise they are from
3324 ix86_option_override_internal (bool main_args_p)
3327 unsigned int ix86_arch_mask, ix86_tune_mask;
3328 const bool ix86_tune_specified = (ix86_tune_string != NULL);
3333 /* Comes from final.c -- no real reason to change it. */
3334 #define MAX_CODE_ALIGN 16
3342 PTA_PREFETCH_SSE = 1 << 4,
3344 PTA_3DNOW_A = 1 << 6,
3348 PTA_POPCNT = 1 << 10,
3350 PTA_SSE4A = 1 << 12,
3351 PTA_NO_SAHF = 1 << 13,
3352 PTA_SSE4_1 = 1 << 14,
3353 PTA_SSE4_2 = 1 << 15,
3355 PTA_PCLMUL = 1 << 17,
3358 PTA_MOVBE = 1 << 20,
3362 PTA_FSGSBASE = 1 << 24,
3363 PTA_RDRND = 1 << 25,
3367 /* if this reaches 32, need to widen struct pta flags below */
3372 const char *const name; /* processor name or nickname. */
3373 const enum processor_type processor;
3374 const enum attr_cpu schedule;
3375 const unsigned /*enum pta_flags*/ flags;
3377 const processor_alias_table[] =
3379 {"i386", PROCESSOR_I386, CPU_NONE, 0},
3380 {"i486", PROCESSOR_I486, CPU_NONE, 0},
3381 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
3382 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
3383 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
3384 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
3385 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
3386 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
3387 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
3388 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
3389 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
3390 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
3391 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
3393 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
3395 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
3396 PTA_MMX | PTA_SSE | PTA_SSE2},
3397 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
3398 PTA_MMX |PTA_SSE | PTA_SSE2},
3399 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
3400 PTA_MMX | PTA_SSE | PTA_SSE2},
3401 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
3402 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
3403 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
3404 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3405 | PTA_CX16 | PTA_NO_SAHF},
3406 {"core2", PROCESSOR_CORE2_64, CPU_CORE2,
3407 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3408 | PTA_SSSE3 | PTA_CX16},
3409 {"corei7", PROCESSOR_COREI7_64, CPU_COREI7,
3410 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3411 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_CX16},
3412 {"corei7-avx", PROCESSOR_COREI7_64, CPU_COREI7,
3413 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3414 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AVX
3415 | PTA_CX16 | PTA_POPCNT | PTA_AES | PTA_PCLMUL},
3416 {"core-avx-i", PROCESSOR_COREI7_64, CPU_COREI7,
3417 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3418 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AVX
3419 | PTA_CX16 | PTA_POPCNT | PTA_AES | PTA_PCLMUL | PTA_FSGSBASE
3420 | PTA_RDRND | PTA_F16C},
3421 {"atom", PROCESSOR_ATOM, CPU_ATOM,
3422 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3423 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
3424 {"geode", PROCESSOR_GEODE, CPU_GEODE,
3425 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
3426 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
3427 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
3428 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
3429 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
3430 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
3431 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
3432 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
3433 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
3434 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
3435 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
3436 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
3437 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
3438 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
3439 {"x86-64", PROCESSOR_K8, CPU_K8,
3440 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
3441 {"k8", PROCESSOR_K8, CPU_K8,
3442 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3443 | PTA_SSE2 | PTA_NO_SAHF},
3444 {"k8-sse3", PROCESSOR_K8, CPU_K8,
3445 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3446 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3447 {"opteron", PROCESSOR_K8, CPU_K8,
3448 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3449 | PTA_SSE2 | PTA_NO_SAHF},
3450 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
3451 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3452 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3453 {"athlon64", PROCESSOR_K8, CPU_K8,
3454 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3455 | PTA_SSE2 | PTA_NO_SAHF},
3456 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
3457 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3458 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3459 {"athlon-fx", PROCESSOR_K8, CPU_K8,
3460 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3461 | PTA_SSE2 | PTA_NO_SAHF},
3462 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
3463 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3464 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
3465 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
3466 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3467 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
3468 {"bdver1", PROCESSOR_BDVER1, CPU_BDVER1,
3469 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3470 | PTA_SSE4A | PTA_CX16 | PTA_ABM | PTA_SSSE3 | PTA_SSE4_1
3471 | PTA_SSE4_2 | PTA_AES | PTA_PCLMUL | PTA_AVX | PTA_FMA4
3472 | PTA_XOP | PTA_LWP},
3473 {"btver1", PROCESSOR_BTVER1, CPU_GENERIC64,
3474 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3475 | PTA_SSSE3 | PTA_SSE4A |PTA_ABM | PTA_CX16},
3476 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
3477 0 /* flags are only used for -march switch. */ },
3478 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
3479 PTA_64BIT /* flags are only used for -march switch. */ },
3482 int const pta_size = ARRAY_SIZE (processor_alias_table);
3484 /* Set up prefix/suffix so the error messages refer to either the command
3485 line argument, or the attribute(target). */
3494 prefix = "option(\"";
3499 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3500 SUBTARGET_OVERRIDE_OPTIONS;
3503 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3504 SUBSUBTARGET_OVERRIDE_OPTIONS;
3507 /* -fPIC is the default for x86_64. */
3508 if (TARGET_MACHO && TARGET_64BIT)
3511 /* Need to check -mtune=generic first. */
3512 if (ix86_tune_string)
3514 if (!strcmp (ix86_tune_string, "generic")
3515 || !strcmp (ix86_tune_string, "i686")
3516 /* As special support for cross compilers we read -mtune=native
3517 as -mtune=generic. With native compilers we won't see the
3518 -mtune=native, as it was changed by the driver. */
3519 || !strcmp (ix86_tune_string, "native"))
3522 ix86_tune_string = "generic64";
3524 ix86_tune_string = "generic32";
3526 /* If this call is for setting the option attribute, allow the
3527 generic32/generic64 that was previously set. */
3528 else if (!main_args_p
3529 && (!strcmp (ix86_tune_string, "generic32")
3530 || !strcmp (ix86_tune_string, "generic64")))
3532 else if (!strncmp (ix86_tune_string, "generic", 7))
3533 error ("bad value (%s) for %stune=%s %s",
3534 ix86_tune_string, prefix, suffix, sw);
3535 else if (!strcmp (ix86_tune_string, "x86-64"))
3536 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated; use "
3537 "%stune=k8%s or %stune=generic%s instead as appropriate",
3538 prefix, suffix, prefix, suffix, prefix, suffix);
3542 if (ix86_arch_string)
3543 ix86_tune_string = ix86_arch_string;
3544 if (!ix86_tune_string)
3546 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
3547 ix86_tune_defaulted = 1;
3550 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
3551 need to use a sensible tune option. */
3552 if (!strcmp (ix86_tune_string, "generic")
3553 || !strcmp (ix86_tune_string, "x86-64")
3554 || !strcmp (ix86_tune_string, "i686"))
3557 ix86_tune_string = "generic64";
3559 ix86_tune_string = "generic32";
3563 if (ix86_stringop_string)
3565 if (!strcmp (ix86_stringop_string, "rep_byte"))
3566 stringop_alg = rep_prefix_1_byte;
3567 else if (!strcmp (ix86_stringop_string, "libcall"))
3568 stringop_alg = libcall;
3569 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
3570 stringop_alg = rep_prefix_4_byte;
3571 else if (!strcmp (ix86_stringop_string, "rep_8byte")
3573 /* rep; movq isn't available in 32-bit code. */
3574 stringop_alg = rep_prefix_8_byte;
3575 else if (!strcmp (ix86_stringop_string, "byte_loop"))
3576 stringop_alg = loop_1_byte;
3577 else if (!strcmp (ix86_stringop_string, "loop"))
3578 stringop_alg = loop;
3579 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
3580 stringop_alg = unrolled_loop;
3582 error ("bad value (%s) for %sstringop-strategy=%s %s",
3583 ix86_stringop_string, prefix, suffix, sw);
3586 if (!ix86_arch_string)
3587 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
3589 ix86_arch_specified = 1;
3591 /* Validate -mabi= value. */
3592 if (ix86_abi_string)
3594 if (strcmp (ix86_abi_string, "sysv") == 0)
3595 ix86_abi = SYSV_ABI;
3596 else if (strcmp (ix86_abi_string, "ms") == 0)
3599 error ("unknown ABI (%s) for %sabi=%s %s",
3600 ix86_abi_string, prefix, suffix, sw);
3603 ix86_abi = DEFAULT_ABI;
3605 if (ix86_cmodel_string != 0)
3607 if (!strcmp (ix86_cmodel_string, "small"))
3608 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3609 else if (!strcmp (ix86_cmodel_string, "medium"))
3610 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
3611 else if (!strcmp (ix86_cmodel_string, "large"))
3612 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
3614 error ("code model %s does not support PIC mode", ix86_cmodel_string);
3615 else if (!strcmp (ix86_cmodel_string, "32"))
3616 ix86_cmodel = CM_32;
3617 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
3618 ix86_cmodel = CM_KERNEL;
3620 error ("bad value (%s) for %scmodel=%s %s",
3621 ix86_cmodel_string, prefix, suffix, sw);
3625 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
3626 use of rip-relative addressing. This eliminates fixups that
3627 would otherwise be needed if this object is to be placed in a
3628 DLL, and is essentially just as efficient as direct addressing. */
3629 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
3630 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
3631 else if (TARGET_64BIT)
3632 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3634 ix86_cmodel = CM_32;
3636 if (ix86_asm_string != 0)
3639 && !strcmp (ix86_asm_string, "intel"))
3640 ix86_asm_dialect = ASM_INTEL;
3641 else if (!strcmp (ix86_asm_string, "att"))
3642 ix86_asm_dialect = ASM_ATT;
3644 error ("bad value (%s) for %sasm=%s %s",
3645 ix86_asm_string, prefix, suffix, sw);
3647 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
3648 error ("code model %qs not supported in the %s bit mode",
3649 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
3650 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
3651 sorry ("%i-bit mode not compiled in",
3652 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
3654 for (i = 0; i < pta_size; i++)
3655 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
3657 ix86_schedule = processor_alias_table[i].schedule;
3658 ix86_arch = processor_alias_table[i].processor;
3659 /* Default cpu tuning to the architecture. */
3660 ix86_tune = ix86_arch;
3662 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3663 error ("CPU you selected does not support x86-64 "
3666 if (processor_alias_table[i].flags & PTA_MMX
3667 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
3668 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
3669 if (processor_alias_table[i].flags & PTA_3DNOW
3670 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
3671 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
3672 if (processor_alias_table[i].flags & PTA_3DNOW_A
3673 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
3674 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
3675 if (processor_alias_table[i].flags & PTA_SSE
3676 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
3677 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
3678 if (processor_alias_table[i].flags & PTA_SSE2
3679 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
3680 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
3681 if (processor_alias_table[i].flags & PTA_SSE3
3682 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
3683 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
3684 if (processor_alias_table[i].flags & PTA_SSSE3
3685 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
3686 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
3687 if (processor_alias_table[i].flags & PTA_SSE4_1
3688 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
3689 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
3690 if (processor_alias_table[i].flags & PTA_SSE4_2
3691 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
3692 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
3693 if (processor_alias_table[i].flags & PTA_AVX
3694 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
3695 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
3696 if (processor_alias_table[i].flags & PTA_FMA
3697 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
3698 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
3699 if (processor_alias_table[i].flags & PTA_SSE4A
3700 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3701 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3702 if (processor_alias_table[i].flags & PTA_FMA4
3703 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3704 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3705 if (processor_alias_table[i].flags & PTA_XOP
3706 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3707 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3708 if (processor_alias_table[i].flags & PTA_LWP
3709 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3710 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3711 if (processor_alias_table[i].flags & PTA_ABM
3712 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3713 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3714 if (processor_alias_table[i].flags & PTA_BMI
3715 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_BMI))
3716 ix86_isa_flags |= OPTION_MASK_ISA_BMI;
3717 if (processor_alias_table[i].flags & PTA_TBM
3718 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_TBM))
3719 ix86_isa_flags |= OPTION_MASK_ISA_TBM;
3720 if (processor_alias_table[i].flags & PTA_CX16
3721 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3722 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3723 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3724 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3725 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3726 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3727 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3728 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3729 if (processor_alias_table[i].flags & PTA_MOVBE
3730 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3731 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3732 if (processor_alias_table[i].flags & PTA_AES
3733 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3734 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3735 if (processor_alias_table[i].flags & PTA_PCLMUL
3736 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3737 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3738 if (processor_alias_table[i].flags & PTA_FSGSBASE
3739 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FSGSBASE))
3740 ix86_isa_flags |= OPTION_MASK_ISA_FSGSBASE;
3741 if (processor_alias_table[i].flags & PTA_RDRND
3742 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_RDRND))
3743 ix86_isa_flags |= OPTION_MASK_ISA_RDRND;
3744 if (processor_alias_table[i].flags & PTA_F16C
3745 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_F16C))
3746 ix86_isa_flags |= OPTION_MASK_ISA_F16C;
3747 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3748 x86_prefetch_sse = true;
3753 if (!strcmp (ix86_arch_string, "generic"))
3754 error ("generic CPU can be used only for %stune=%s %s",
3755 prefix, suffix, sw);
3756 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3757 error ("bad value (%s) for %sarch=%s %s",
3758 ix86_arch_string, prefix, suffix, sw);
3760 ix86_arch_mask = 1u << ix86_arch;
3761 for (i = 0; i < X86_ARCH_LAST; ++i)
3762 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3764 for (i = 0; i < pta_size; i++)
3765 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3767 ix86_schedule = processor_alias_table[i].schedule;
3768 ix86_tune = processor_alias_table[i].processor;
3771 if (!(processor_alias_table[i].flags & PTA_64BIT))
3773 if (ix86_tune_defaulted)
3775 ix86_tune_string = "x86-64";
3776 for (i = 0; i < pta_size; i++)
3777 if (! strcmp (ix86_tune_string,
3778 processor_alias_table[i].name))
3780 ix86_schedule = processor_alias_table[i].schedule;
3781 ix86_tune = processor_alias_table[i].processor;
3784 error ("CPU you selected does not support x86-64 "
3790 /* Adjust tuning when compiling for 32-bit ABI. */
3793 case PROCESSOR_GENERIC64:
3794 ix86_tune = PROCESSOR_GENERIC32;
3795 ix86_schedule = CPU_PENTIUMPRO;
3798 case PROCESSOR_CORE2_64:
3799 ix86_tune = PROCESSOR_CORE2_32;
3802 case PROCESSOR_COREI7_64:
3803 ix86_tune = PROCESSOR_COREI7_32;
3810 /* Intel CPUs have always interpreted SSE prefetch instructions as
3811 NOPs; so, we can enable SSE prefetch instructions even when
3812 -mtune (rather than -march) points us to a processor that has them.
3813 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3814 higher processors. */
3816 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3817 x86_prefetch_sse = true;
3821 if (ix86_tune_specified && i == pta_size)
3822 error ("bad value (%s) for %stune=%s %s",
3823 ix86_tune_string, prefix, suffix, sw);
3825 ix86_tune_mask = 1u << ix86_tune;
3826 for (i = 0; i < X86_TUNE_LAST; ++i)
3827 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3829 #ifndef USE_IX86_FRAME_POINTER
3830 #define USE_IX86_FRAME_POINTER 0
3833 #ifndef USE_X86_64_FRAME_POINTER
3834 #define USE_X86_64_FRAME_POINTER 0
3837 /* Set the default values for switches whose default depends on TARGET_64BIT
3838 in case they weren't overwritten by command line options. */
3841 if (optimize > 1 && !global_options_set.x_flag_zee)
3843 if (optimize >= 1 && !global_options_set.x_flag_omit_frame_pointer)
3844 flag_omit_frame_pointer = !USE_X86_64_FRAME_POINTER;
3845 if (flag_asynchronous_unwind_tables == 2)
3846 flag_unwind_tables = flag_asynchronous_unwind_tables = 1;
3847 if (flag_pcc_struct_return == 2)
3848 flag_pcc_struct_return = 0;
3852 if (optimize >= 1 && !global_options_set.x_flag_omit_frame_pointer)
3853 flag_omit_frame_pointer = !(USE_IX86_FRAME_POINTER || optimize_size);
3854 if (flag_asynchronous_unwind_tables == 2)
3855 flag_asynchronous_unwind_tables = !USE_IX86_FRAME_POINTER;
3856 if (flag_pcc_struct_return == 2)
3857 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
3861 ix86_cost = &ix86_size_cost;
3863 ix86_cost = processor_target_table[ix86_tune].cost;
3865 /* Arrange to set up i386_stack_locals for all functions. */
3866 init_machine_status = ix86_init_machine_status;
3868 /* Validate -mregparm= value. */
3869 if (ix86_regparm_string)
3872 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3873 i = atoi (ix86_regparm_string);
3874 if (i < 0 || i > REGPARM_MAX)
3875 error ("%sregparm=%d%s is not between 0 and %d",
3876 prefix, i, suffix, REGPARM_MAX);
3881 ix86_regparm = REGPARM_MAX;
3883 /* If the user has provided any of the -malign-* options,
3884 warn and use that value only if -falign-* is not set.
3885 Remove this code in GCC 3.2 or later. */
3886 if (ix86_align_loops_string)
3888 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3889 prefix, suffix, suffix);
3890 if (align_loops == 0)
3892 i = atoi (ix86_align_loops_string);
3893 if (i < 0 || i > MAX_CODE_ALIGN)
3894 error ("%salign-loops=%d%s is not between 0 and %d",
3895 prefix, i, suffix, MAX_CODE_ALIGN);
3897 align_loops = 1 << i;
3901 if (ix86_align_jumps_string)
3903 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3904 prefix, suffix, suffix);
3905 if (align_jumps == 0)
3907 i = atoi (ix86_align_jumps_string);
3908 if (i < 0 || i > MAX_CODE_ALIGN)
3909 error ("%salign-loops=%d%s is not between 0 and %d",
3910 prefix, i, suffix, MAX_CODE_ALIGN);
3912 align_jumps = 1 << i;
3916 if (ix86_align_funcs_string)
3918 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3919 prefix, suffix, suffix);
3920 if (align_functions == 0)
3922 i = atoi (ix86_align_funcs_string);
3923 if (i < 0 || i > MAX_CODE_ALIGN)
3924 error ("%salign-loops=%d%s is not between 0 and %d",
3925 prefix, i, suffix, MAX_CODE_ALIGN);
3927 align_functions = 1 << i;
3931 /* Default align_* from the processor table. */
3932 if (align_loops == 0)
3934 align_loops = processor_target_table[ix86_tune].align_loop;
3935 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3937 if (align_jumps == 0)
3939 align_jumps = processor_target_table[ix86_tune].align_jump;
3940 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3942 if (align_functions == 0)
3944 align_functions = processor_target_table[ix86_tune].align_func;
3947 /* Validate -mbranch-cost= value, or provide default. */
3948 ix86_branch_cost = ix86_cost->branch_cost;
3949 if (ix86_branch_cost_string)
3951 i = atoi (ix86_branch_cost_string);
3953 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3955 ix86_branch_cost = i;
3957 if (ix86_section_threshold_string)
3959 i = atoi (ix86_section_threshold_string);
3961 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3963 ix86_section_threshold = i;
3966 if (ix86_tls_dialect_string)
3968 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3969 ix86_tls_dialect = TLS_DIALECT_GNU;
3970 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3971 ix86_tls_dialect = TLS_DIALECT_GNU2;
3973 error ("bad value (%s) for %stls-dialect=%s %s",
3974 ix86_tls_dialect_string, prefix, suffix, sw);
3977 if (ix87_precision_string)
3979 i = atoi (ix87_precision_string);
3980 if (i != 32 && i != 64 && i != 80)
3981 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3986 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3988 /* Enable by default the SSE and MMX builtins. Do allow the user to
3989 explicitly disable any of these. In particular, disabling SSE and
3990 MMX for kernel code is extremely useful. */
3991 if (!ix86_arch_specified)
3993 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3994 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3997 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
4001 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
4003 if (!ix86_arch_specified)
4005 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
4007 /* i386 ABI does not specify red zone. It still makes sense to use it
4008 when programmer takes care to stack from being destroyed. */
4009 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
4010 target_flags |= MASK_NO_RED_ZONE;
4013 /* Keep nonleaf frame pointers. */
4014 if (flag_omit_frame_pointer)
4015 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
4016 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
4017 flag_omit_frame_pointer = 1;
4019 /* If we're doing fast math, we don't care about comparison order
4020 wrt NaNs. This lets us use a shorter comparison sequence. */
4021 if (flag_finite_math_only)
4022 target_flags &= ~MASK_IEEE_FP;
4024 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
4025 since the insns won't need emulation. */
4026 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
4027 target_flags &= ~MASK_NO_FANCY_MATH_387;
4029 /* Likewise, if the target doesn't have a 387, or we've specified
4030 software floating point, don't use 387 inline intrinsics. */
4032 target_flags |= MASK_NO_FANCY_MATH_387;
4034 /* Turn on MMX builtins for -msse. */
4037 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
4038 x86_prefetch_sse = true;
4041 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
4042 if (TARGET_SSE4_2 || TARGET_ABM)
4043 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
4045 /* Validate -mpreferred-stack-boundary= value or default it to
4046 PREFERRED_STACK_BOUNDARY_DEFAULT. */
4047 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
4048 if (ix86_preferred_stack_boundary_string)
4050 int min = (TARGET_64BIT ? 4 : 2);
4051 int max = (TARGET_SEH ? 4 : 12);
4053 i = atoi (ix86_preferred_stack_boundary_string);
4054 if (i < min || i > max)
4057 error ("%spreferred-stack-boundary%s is not supported "
4058 "for this target", prefix, suffix);
4060 error ("%spreferred-stack-boundary=%d%s is not between %d and %d",
4061 prefix, i, suffix, min, max);
4064 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
4067 /* Set the default value for -mstackrealign. */
4068 if (ix86_force_align_arg_pointer == -1)
4069 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
4071 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
4073 /* Validate -mincoming-stack-boundary= value or default it to
4074 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
4075 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
4076 if (ix86_incoming_stack_boundary_string)
4078 i = atoi (ix86_incoming_stack_boundary_string);
4079 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
4080 error ("-mincoming-stack-boundary=%d is not between %d and 12",
4081 i, TARGET_64BIT ? 4 : 2);
4084 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
4085 ix86_incoming_stack_boundary
4086 = ix86_user_incoming_stack_boundary;
4090 /* Accept -msseregparm only if at least SSE support is enabled. */
4091 if (TARGET_SSEREGPARM
4093 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
4095 ix86_fpmath = TARGET_FPMATH_DEFAULT;
4096 if (ix86_fpmath_string != 0)
4098 if (! strcmp (ix86_fpmath_string, "387"))
4099 ix86_fpmath = FPMATH_387;
4100 else if (! strcmp (ix86_fpmath_string, "sse"))
4104 warning (0, "SSE instruction set disabled, using 387 arithmetics");
4105 ix86_fpmath = FPMATH_387;
4108 ix86_fpmath = FPMATH_SSE;
4110 else if (! strcmp (ix86_fpmath_string, "387,sse")
4111 || ! strcmp (ix86_fpmath_string, "387+sse")
4112 || ! strcmp (ix86_fpmath_string, "sse,387")
4113 || ! strcmp (ix86_fpmath_string, "sse+387")
4114 || ! strcmp (ix86_fpmath_string, "both"))
4118 warning (0, "SSE instruction set disabled, using 387 arithmetics");
4119 ix86_fpmath = FPMATH_387;
4121 else if (!TARGET_80387)
4123 warning (0, "387 instruction set disabled, using SSE arithmetics");
4124 ix86_fpmath = FPMATH_SSE;
4127 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
4130 error ("bad value (%s) for %sfpmath=%s %s",
4131 ix86_fpmath_string, prefix, suffix, sw);
4134 /* If the i387 is disabled, then do not return values in it. */
4136 target_flags &= ~MASK_FLOAT_RETURNS;
4138 /* Use external vectorized library in vectorizing intrinsics. */
4139 if (ix86_veclibabi_string)
4141 if (strcmp (ix86_veclibabi_string, "svml") == 0)
4142 ix86_veclib_handler = ix86_veclibabi_svml;
4143 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
4144 ix86_veclib_handler = ix86_veclibabi_acml;
4146 error ("unknown vectorization library ABI type (%s) for "
4147 "%sveclibabi=%s %s", ix86_veclibabi_string,
4148 prefix, suffix, sw);
4151 if ((!USE_IX86_FRAME_POINTER
4152 || (x86_accumulate_outgoing_args & ix86_tune_mask))
4153 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
4155 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
4157 /* ??? Unwind info is not correct around the CFG unless either a frame
4158 pointer is present or M_A_O_A is set. Fixing this requires rewriting
4159 unwind info generation to be aware of the CFG and propagating states
4161 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
4162 || flag_exceptions || flag_non_call_exceptions)
4163 && flag_omit_frame_pointer
4164 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
4166 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
4167 warning (0, "unwind tables currently require either a frame pointer "
4168 "or %saccumulate-outgoing-args%s for correctness",
4170 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
4173 /* If stack probes are required, the space used for large function
4174 arguments on the stack must also be probed, so enable
4175 -maccumulate-outgoing-args so this happens in the prologue. */
4176 if (TARGET_STACK_PROBE
4177 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
4179 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
4180 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
4181 "for correctness", prefix, suffix);
4182 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
4185 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
4188 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
4189 p = strchr (internal_label_prefix, 'X');
4190 internal_label_prefix_len = p - internal_label_prefix;
4194 /* When scheduling description is not available, disable scheduler pass
4195 so it won't slow down the compilation and make x87 code slower. */
4196 if (!TARGET_SCHEDULE)
4197 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
4199 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4200 ix86_cost->simultaneous_prefetches,
4201 global_options.x_param_values,
4202 global_options_set.x_param_values);
4203 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, ix86_cost->prefetch_block,
4204 global_options.x_param_values,
4205 global_options_set.x_param_values);
4206 maybe_set_param_value (PARAM_L1_CACHE_SIZE, ix86_cost->l1_cache_size,
4207 global_options.x_param_values,
4208 global_options_set.x_param_values);
4209 maybe_set_param_value (PARAM_L2_CACHE_SIZE, ix86_cost->l2_cache_size,
4210 global_options.x_param_values,
4211 global_options_set.x_param_values);
4213 /* Enable sw prefetching at -O3 for CPUS that prefetching is helpful. */
4214 if (flag_prefetch_loop_arrays < 0
4217 && software_prefetching_beneficial_p ())
4218 flag_prefetch_loop_arrays = 1;
4220 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
4221 can be optimized to ap = __builtin_next_arg (0). */
4222 if (!TARGET_64BIT && !flag_split_stack)
4223 targetm.expand_builtin_va_start = NULL;
4227 ix86_gen_leave = gen_leave_rex64;
4228 ix86_gen_add3 = gen_adddi3;
4229 ix86_gen_sub3 = gen_subdi3;
4230 ix86_gen_sub3_carry = gen_subdi3_carry;
4231 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
4232 ix86_gen_monitor = gen_sse3_monitor64;
4233 ix86_gen_andsp = gen_anddi3;
4234 ix86_gen_allocate_stack_worker = gen_allocate_stack_worker_probe_di;
4235 ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probedi;
4236 ix86_gen_probe_stack_range = gen_probe_stack_rangedi;
4240 ix86_gen_leave = gen_leave;
4241 ix86_gen_add3 = gen_addsi3;
4242 ix86_gen_sub3 = gen_subsi3;
4243 ix86_gen_sub3_carry = gen_subsi3_carry;
4244 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
4245 ix86_gen_monitor = gen_sse3_monitor;
4246 ix86_gen_andsp = gen_andsi3;
4247 ix86_gen_allocate_stack_worker = gen_allocate_stack_worker_probe_si;
4248 ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probesi;
4249 ix86_gen_probe_stack_range = gen_probe_stack_rangesi;
4253 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
4255 target_flags |= MASK_CLD & ~target_flags_explicit;
4258 if (!TARGET_64BIT && flag_pic)
4260 if (flag_fentry > 0)
4261 sorry ("-mfentry isn%'t supported for 32-bit in combination "
4265 else if (TARGET_SEH)
4267 if (flag_fentry == 0)
4268 sorry ("-mno-fentry isn%'t compatible with SEH");
4271 else if (flag_fentry < 0)
4273 #if defined(PROFILE_BEFORE_PROLOGUE)
4280 /* Save the initial options in case the user does function specific options */
4282 target_option_default_node = target_option_current_node
4283 = build_target_option_node ();
4287 /* When not optimize for size, enable vzeroupper optimization for
4288 TARGET_AVX with -fexpensive-optimizations and split 32-byte
4289 AVX unaligned load/store. */
4292 if (flag_expensive_optimizations
4293 && !(target_flags_explicit & MASK_VZEROUPPER))
4294 target_flags |= MASK_VZEROUPPER;
4295 if ((x86_avx256_split_unaligned_load & ix86_tune_mask)
4296 && !(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_LOAD))
4297 target_flags |= MASK_AVX256_SPLIT_UNALIGNED_LOAD;
4298 if ((x86_avx256_split_unaligned_store & ix86_tune_mask)
4299 && !(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_STORE))
4300 target_flags |= MASK_AVX256_SPLIT_UNALIGNED_STORE;
4301 /* Enable 128-bit AVX instruction generation for the auto-vectorizer. */
4302 if (TARGET_AVX128_OPTIMAL && !(target_flags_explicit & MASK_PREFER_AVX128))
4303 target_flags |= MASK_PREFER_AVX128;
4308 /* Disable vzeroupper pass if TARGET_AVX is disabled. */
4309 target_flags &= ~MASK_VZEROUPPER;
4313 /* Return TRUE if VAL is passed in register with 256bit AVX modes. */
4316 function_pass_avx256_p (const_rtx val)
4321 if (REG_P (val) && VALID_AVX256_REG_MODE (GET_MODE (val)))
4324 if (GET_CODE (val) == PARALLEL)
4329 for (i = XVECLEN (val, 0) - 1; i >= 0; i--)
4331 r = XVECEXP (val, 0, i);
4332 if (GET_CODE (r) == EXPR_LIST
4334 && REG_P (XEXP (r, 0))
4335 && (GET_MODE (XEXP (r, 0)) == OImode
4336 || VALID_AVX256_REG_MODE (GET_MODE (XEXP (r, 0)))))
4344 /* Implement the TARGET_OPTION_OVERRIDE hook. */
4347 ix86_option_override (void)
4349 ix86_option_override_internal (true);
4352 /* Update register usage after having seen the compiler flags. */
4355 ix86_conditional_register_usage (void)
4360 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4362 if (fixed_regs[i] > 1)
4363 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
4364 if (call_used_regs[i] > 1)
4365 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
4368 /* The PIC register, if it exists, is fixed. */
4369 j = PIC_OFFSET_TABLE_REGNUM;
4370 if (j != INVALID_REGNUM)
4371 fixed_regs[j] = call_used_regs[j] = 1;
4373 /* The MS_ABI changes the set of call-used registers. */
4374 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
4376 call_used_regs[SI_REG] = 0;
4377 call_used_regs[DI_REG] = 0;
4378 call_used_regs[XMM6_REG] = 0;
4379 call_used_regs[XMM7_REG] = 0;
4380 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
4381 call_used_regs[i] = 0;
4384 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
4385 other call-clobbered regs for 64-bit. */
4388 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
4390 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4391 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
4392 && call_used_regs[i])
4393 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
4396 /* If MMX is disabled, squash the registers. */
4398 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4399 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
4400 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
4402 /* If SSE is disabled, squash the registers. */
4404 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4405 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
4406 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
4408 /* If the FPU is disabled, squash the registers. */
4409 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
4410 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4411 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
4412 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
4414 /* If 32-bit, squash the 64-bit registers. */
4417 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
4419 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
4425 /* Save the current options */
4428 ix86_function_specific_save (struct cl_target_option *ptr)
4430 ptr->arch = ix86_arch;
4431 ptr->schedule = ix86_schedule;
4432 ptr->tune = ix86_tune;
4433 ptr->fpmath = ix86_fpmath;
4434 ptr->branch_cost = ix86_branch_cost;
4435 ptr->tune_defaulted = ix86_tune_defaulted;
4436 ptr->arch_specified = ix86_arch_specified;
4437 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
4438 ptr->ix86_target_flags_explicit = target_flags_explicit;
4440 /* The fields are char but the variables are not; make sure the
4441 values fit in the fields. */
4442 gcc_assert (ptr->arch == ix86_arch);
4443 gcc_assert (ptr->schedule == ix86_schedule);
4444 gcc_assert (ptr->tune == ix86_tune);
4445 gcc_assert (ptr->fpmath == ix86_fpmath);
4446 gcc_assert (ptr->branch_cost == ix86_branch_cost);
4449 /* Restore the current options */
4452 ix86_function_specific_restore (struct cl_target_option *ptr)
4454 enum processor_type old_tune = ix86_tune;
4455 enum processor_type old_arch = ix86_arch;
4456 unsigned int ix86_arch_mask, ix86_tune_mask;
4459 ix86_arch = (enum processor_type) ptr->arch;
4460 ix86_schedule = (enum attr_cpu) ptr->schedule;
4461 ix86_tune = (enum processor_type) ptr->tune;
4462 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
4463 ix86_branch_cost = ptr->branch_cost;
4464 ix86_tune_defaulted = ptr->tune_defaulted;
4465 ix86_arch_specified = ptr->arch_specified;
4466 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
4467 target_flags_explicit = ptr->ix86_target_flags_explicit;
4469 /* Recreate the arch feature tests if the arch changed */
4470 if (old_arch != ix86_arch)
4472 ix86_arch_mask = 1u << ix86_arch;
4473 for (i = 0; i < X86_ARCH_LAST; ++i)
4474 ix86_arch_features[i]
4475 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
4478 /* Recreate the tune optimization tests */
4479 if (old_tune != ix86_tune)
4481 ix86_tune_mask = 1u << ix86_tune;
4482 for (i = 0; i < X86_TUNE_LAST; ++i)
4483 ix86_tune_features[i]
4484 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
4488 /* Print the current options */
4491 ix86_function_specific_print (FILE *file, int indent,
4492 struct cl_target_option *ptr)
4495 = ix86_target_string (ptr->x_ix86_isa_flags, ptr->x_target_flags,
4496 NULL, NULL, NULL, false);
4498 fprintf (file, "%*sarch = %d (%s)\n",
4501 ((ptr->arch < TARGET_CPU_DEFAULT_max)
4502 ? cpu_names[ptr->arch]
4505 fprintf (file, "%*stune = %d (%s)\n",
4508 ((ptr->tune < TARGET_CPU_DEFAULT_max)
4509 ? cpu_names[ptr->tune]
4512 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
4513 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
4514 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
4515 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
4519 fprintf (file, "%*s%s\n", indent, "", target_string);
4520 free (target_string);
4525 /* Inner function to process the attribute((target(...))), take an argument and
4526 set the current options from the argument. If we have a list, recursively go
4530 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
4535 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
4536 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
4537 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
4538 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
4553 enum ix86_opt_type type;
4558 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
4559 IX86_ATTR_ISA ("abm", OPT_mabm),
4560 IX86_ATTR_ISA ("bmi", OPT_mbmi),
4561 IX86_ATTR_ISA ("tbm", OPT_mtbm),
4562 IX86_ATTR_ISA ("aes", OPT_maes),
4563 IX86_ATTR_ISA ("avx", OPT_mavx),
4564 IX86_ATTR_ISA ("mmx", OPT_mmmx),
4565 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
4566 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
4567 IX86_ATTR_ISA ("sse", OPT_msse),
4568 IX86_ATTR_ISA ("sse2", OPT_msse2),
4569 IX86_ATTR_ISA ("sse3", OPT_msse3),
4570 IX86_ATTR_ISA ("sse4", OPT_msse4),
4571 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
4572 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
4573 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
4574 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
4575 IX86_ATTR_ISA ("fma4", OPT_mfma4),
4576 IX86_ATTR_ISA ("xop", OPT_mxop),
4577 IX86_ATTR_ISA ("lwp", OPT_mlwp),
4578 IX86_ATTR_ISA ("fsgsbase", OPT_mfsgsbase),
4579 IX86_ATTR_ISA ("rdrnd", OPT_mrdrnd),
4580 IX86_ATTR_ISA ("f16c", OPT_mf16c),
4582 /* string options */
4583 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
4584 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
4585 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
4588 IX86_ATTR_YES ("cld",
4592 IX86_ATTR_NO ("fancy-math-387",
4593 OPT_mfancy_math_387,
4594 MASK_NO_FANCY_MATH_387),
4596 IX86_ATTR_YES ("ieee-fp",
4600 IX86_ATTR_YES ("inline-all-stringops",
4601 OPT_minline_all_stringops,
4602 MASK_INLINE_ALL_STRINGOPS),
4604 IX86_ATTR_YES ("inline-stringops-dynamically",
4605 OPT_minline_stringops_dynamically,
4606 MASK_INLINE_STRINGOPS_DYNAMICALLY),
4608 IX86_ATTR_NO ("align-stringops",
4609 OPT_mno_align_stringops,
4610 MASK_NO_ALIGN_STRINGOPS),
4612 IX86_ATTR_YES ("recip",
4618 /* If this is a list, recurse to get the options. */
4619 if (TREE_CODE (args) == TREE_LIST)
4623 for (; args; args = TREE_CHAIN (args))
4624 if (TREE_VALUE (args)
4625 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
4631 else if (TREE_CODE (args) != STRING_CST)
4634 /* Handle multiple arguments separated by commas. */
4635 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
4637 while (next_optstr && *next_optstr != '\0')
4639 char *p = next_optstr;
4641 char *comma = strchr (next_optstr, ',');
4642 const char *opt_string;
4643 size_t len, opt_len;
4648 enum ix86_opt_type type = ix86_opt_unknown;
4654 len = comma - next_optstr;
4655 next_optstr = comma + 1;
4663 /* Recognize no-xxx. */
4664 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
4673 /* Find the option. */
4676 for (i = 0; i < ARRAY_SIZE (attrs); i++)
4678 type = attrs[i].type;
4679 opt_len = attrs[i].len;
4680 if (ch == attrs[i].string[0]
4681 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
4682 && memcmp (p, attrs[i].string, opt_len) == 0)
4685 mask = attrs[i].mask;
4686 opt_string = attrs[i].string;
4691 /* Process the option. */
4694 error ("attribute(target(\"%s\")) is unknown", orig_p);
4698 else if (type == ix86_opt_isa)
4699 ix86_handle_option (opt, p, opt_set_p);
4701 else if (type == ix86_opt_yes || type == ix86_opt_no)
4703 if (type == ix86_opt_no)
4704 opt_set_p = !opt_set_p;
4707 target_flags |= mask;
4709 target_flags &= ~mask;
4712 else if (type == ix86_opt_str)
4716 error ("option(\"%s\") was already specified", opt_string);
4720 p_strings[opt] = xstrdup (p + opt_len);
4730 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
4733 ix86_valid_target_attribute_tree (tree args)
4735 const char *orig_arch_string = ix86_arch_string;
4736 const char *orig_tune_string = ix86_tune_string;
4737 const char *orig_fpmath_string = ix86_fpmath_string;
4738 int orig_tune_defaulted = ix86_tune_defaulted;
4739 int orig_arch_specified = ix86_arch_specified;
4740 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
4743 struct cl_target_option *def
4744 = TREE_TARGET_OPTION (target_option_default_node);
4746 /* Process each of the options on the chain. */
4747 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
4750 /* If the changed options are different from the default, rerun
4751 ix86_option_override_internal, and then save the options away.
4752 The string options are are attribute options, and will be undone
4753 when we copy the save structure. */
4754 if (ix86_isa_flags != def->x_ix86_isa_flags
4755 || target_flags != def->x_target_flags
4756 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
4757 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
4758 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
4760 /* If we are using the default tune= or arch=, undo the string assigned,
4761 and use the default. */
4762 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
4763 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
4764 else if (!orig_arch_specified)
4765 ix86_arch_string = NULL;
4767 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
4768 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
4769 else if (orig_tune_defaulted)
4770 ix86_tune_string = NULL;
4772 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
4773 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
4774 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
4775 else if (!TARGET_64BIT && TARGET_SSE)
4776 ix86_fpmath_string = "sse,387";
4778 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
4779 ix86_option_override_internal (false);
4781 /* Add any builtin functions with the new isa if any. */
4782 ix86_add_new_builtins (ix86_isa_flags);
4784 /* Save the current options unless we are validating options for
4786 t = build_target_option_node ();
4788 ix86_arch_string = orig_arch_string;
4789 ix86_tune_string = orig_tune_string;
4790 ix86_fpmath_string = orig_fpmath_string;
4792 /* Free up memory allocated to hold the strings */
4793 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
4794 if (option_strings[i])
4795 free (option_strings[i]);
4801 /* Hook to validate attribute((target("string"))). */
4804 ix86_valid_target_attribute_p (tree fndecl,
4805 tree ARG_UNUSED (name),
4807 int ARG_UNUSED (flags))
4809 struct cl_target_option cur_target;
4811 tree old_optimize = build_optimization_node ();
4812 tree new_target, new_optimize;
4813 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
4815 /* If the function changed the optimization levels as well as setting target
4816 options, start with the optimizations specified. */
4817 if (func_optimize && func_optimize != old_optimize)
4818 cl_optimization_restore (&global_options,
4819 TREE_OPTIMIZATION (func_optimize));
4821 /* The target attributes may also change some optimization flags, so update
4822 the optimization options if necessary. */
4823 cl_target_option_save (&cur_target, &global_options);
4824 new_target = ix86_valid_target_attribute_tree (args);
4825 new_optimize = build_optimization_node ();
4832 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
4834 if (old_optimize != new_optimize)
4835 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
4838 cl_target_option_restore (&global_options, &cur_target);
4840 if (old_optimize != new_optimize)
4841 cl_optimization_restore (&global_options,
4842 TREE_OPTIMIZATION (old_optimize));
4848 /* Hook to determine if one function can safely inline another. */
4851 ix86_can_inline_p (tree caller, tree callee)
4854 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
4855 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
4857 /* If callee has no option attributes, then it is ok to inline. */
4861 /* If caller has no option attributes, but callee does then it is not ok to
4863 else if (!caller_tree)
4868 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
4869 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
4871 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
4872 can inline a SSE2 function but a SSE2 function can't inline a SSE4
4874 if ((caller_opts->x_ix86_isa_flags & callee_opts->x_ix86_isa_flags)
4875 != callee_opts->x_ix86_isa_flags)
4878 /* See if we have the same non-isa options. */
4879 else if (caller_opts->x_target_flags != callee_opts->x_target_flags)
4882 /* See if arch, tune, etc. are the same. */
4883 else if (caller_opts->arch != callee_opts->arch)
4886 else if (caller_opts->tune != callee_opts->tune)
4889 else if (caller_opts->fpmath != callee_opts->fpmath)
4892 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4903 /* Remember the last target of ix86_set_current_function. */
4904 static GTY(()) tree ix86_previous_fndecl;
4906 /* Establish appropriate back-end context for processing the function
4907 FNDECL. The argument might be NULL to indicate processing at top
4908 level, outside of any function scope. */
4910 ix86_set_current_function (tree fndecl)
4912 /* Only change the context if the function changes. This hook is called
4913 several times in the course of compiling a function, and we don't want to
4914 slow things down too much or call target_reinit when it isn't safe. */
4915 if (fndecl && fndecl != ix86_previous_fndecl)
4917 tree old_tree = (ix86_previous_fndecl
4918 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4921 tree new_tree = (fndecl
4922 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4925 ix86_previous_fndecl = fndecl;
4926 if (old_tree == new_tree)
4931 cl_target_option_restore (&global_options,
4932 TREE_TARGET_OPTION (new_tree));
4938 struct cl_target_option *def
4939 = TREE_TARGET_OPTION (target_option_current_node);
4941 cl_target_option_restore (&global_options, def);
4948 /* Return true if this goes in large data/bss. */
4951 ix86_in_large_data_p (tree exp)
4953 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4956 /* Functions are never large data. */
4957 if (TREE_CODE (exp) == FUNCTION_DECL)
4960 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4962 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4963 if (strcmp (section, ".ldata") == 0
4964 || strcmp (section, ".lbss") == 0)
4970 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4972 /* If this is an incomplete type with size 0, then we can't put it
4973 in data because it might be too big when completed. */
4974 if (!size || size > ix86_section_threshold)
4981 /* Switch to the appropriate section for output of DECL.
4982 DECL is either a `VAR_DECL' node or a constant of some sort.
4983 RELOC indicates whether forming the initial value of DECL requires
4984 link-time relocations. */
4986 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4990 x86_64_elf_select_section (tree decl, int reloc,
4991 unsigned HOST_WIDE_INT align)
4993 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4994 && ix86_in_large_data_p (decl))
4996 const char *sname = NULL;
4997 unsigned int flags = SECTION_WRITE;
4998 switch (categorize_decl_for_section (decl, reloc))
5003 case SECCAT_DATA_REL:
5004 sname = ".ldata.rel";
5006 case SECCAT_DATA_REL_LOCAL:
5007 sname = ".ldata.rel.local";
5009 case SECCAT_DATA_REL_RO:
5010 sname = ".ldata.rel.ro";
5012 case SECCAT_DATA_REL_RO_LOCAL:
5013 sname = ".ldata.rel.ro.local";
5017 flags |= SECTION_BSS;
5020 case SECCAT_RODATA_MERGE_STR:
5021 case SECCAT_RODATA_MERGE_STR_INIT:
5022 case SECCAT_RODATA_MERGE_CONST:
5026 case SECCAT_SRODATA:
5033 /* We don't split these for medium model. Place them into
5034 default sections and hope for best. */
5039 /* We might get called with string constants, but get_named_section
5040 doesn't like them as they are not DECLs. Also, we need to set
5041 flags in that case. */
5043 return get_section (sname, flags, NULL);
5044 return get_named_section (decl, sname, reloc);
5047 return default_elf_select_section (decl, reloc, align);
5050 /* Build up a unique section name, expressed as a
5051 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
5052 RELOC indicates whether the initial value of EXP requires
5053 link-time relocations. */
5055 static void ATTRIBUTE_UNUSED
5056 x86_64_elf_unique_section (tree decl, int reloc)
5058 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
5059 && ix86_in_large_data_p (decl))
5061 const char *prefix = NULL;
5062 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
5063 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
5065 switch (categorize_decl_for_section (decl, reloc))
5068 case SECCAT_DATA_REL:
5069 case SECCAT_DATA_REL_LOCAL:
5070 case SECCAT_DATA_REL_RO:
5071 case SECCAT_DATA_REL_RO_LOCAL:
5072 prefix = one_only ? ".ld" : ".ldata";
5075 prefix = one_only ? ".lb" : ".lbss";
5078 case SECCAT_RODATA_MERGE_STR:
5079 case SECCAT_RODATA_MERGE_STR_INIT:
5080 case SECCAT_RODATA_MERGE_CONST:
5081 prefix = one_only ? ".lr" : ".lrodata";
5083 case SECCAT_SRODATA:
5090 /* We don't split these for medium model. Place them into
5091 default sections and hope for best. */
5096 const char *name, *linkonce;
5099 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
5100 name = targetm.strip_name_encoding (name);
5102 /* If we're using one_only, then there needs to be a .gnu.linkonce
5103 prefix to the section name. */
5104 linkonce = one_only ? ".gnu.linkonce" : "";
5106 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
5108 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
5112 default_unique_section (decl, reloc);
5115 #ifdef COMMON_ASM_OP
5116 /* This says how to output assembler code to declare an
5117 uninitialized external linkage data object.
5119 For medium model x86-64 we need to use .largecomm opcode for
5122 x86_elf_aligned_common (FILE *file,
5123 const char *name, unsigned HOST_WIDE_INT size,
5126 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
5127 && size > (unsigned int)ix86_section_threshold)
5128 fputs (".largecomm\t", file);
5130 fputs (COMMON_ASM_OP, file);
5131 assemble_name (file, name);
5132 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
5133 size, align / BITS_PER_UNIT);
5137 /* Utility function for targets to use in implementing
5138 ASM_OUTPUT_ALIGNED_BSS. */
5141 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
5142 const char *name, unsigned HOST_WIDE_INT size,
5145 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
5146 && size > (unsigned int)ix86_section_threshold)
5147 switch_to_section (get_named_section (decl, ".lbss", 0));
5149 switch_to_section (bss_section);
5150 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
5151 #ifdef ASM_DECLARE_OBJECT_NAME
5152 last_assemble_variable_decl = decl;
5153 ASM_DECLARE_OBJECT_NAME (file, name, decl);
5155 /* Standard thing is just output label for the object. */
5156 ASM_OUTPUT_LABEL (file, name);
5157 #endif /* ASM_DECLARE_OBJECT_NAME */
5158 ASM_OUTPUT_SKIP (file, size ? size : 1);
5161 static const struct default_options ix86_option_optimization_table[] =
5163 /* Turn off -fschedule-insns by default. It tends to make the
5164 problem with not enough registers even worse. */
5165 #ifdef INSN_SCHEDULING
5166 { OPT_LEVELS_ALL, OPT_fschedule_insns, NULL, 0 },
5169 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
5170 SUBTARGET_OPTIMIZATION_OPTIONS,
5172 { OPT_LEVELS_NONE, 0, NULL, 0 }
5175 /* Implement TARGET_OPTION_INIT_STRUCT. */
5178 ix86_option_init_struct (struct gcc_options *opts)
5181 /* The Darwin libraries never set errno, so we might as well
5182 avoid calling them when that's the only reason we would. */
5183 opts->x_flag_errno_math = 0;
5185 opts->x_flag_pcc_struct_return = 2;
5186 opts->x_flag_asynchronous_unwind_tables = 2;
5187 opts->x_flag_vect_cost_model = 1;
5190 /* Decide whether we must probe the stack before any space allocation
5191 on this target. It's essentially TARGET_STACK_PROBE except when
5192 -fstack-check causes the stack to be already probed differently. */
5195 ix86_target_stack_probe (void)
5197 /* Do not probe the stack twice if static stack checking is enabled. */
5198 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
5201 return TARGET_STACK_PROBE;
5204 /* Decide whether we can make a sibling call to a function. DECL is the
5205 declaration of the function being targeted by the call and EXP is the
5206 CALL_EXPR representing the call. */
5209 ix86_function_ok_for_sibcall (tree decl, tree exp)
5211 tree type, decl_or_type;
5214 /* If we are generating position-independent code, we cannot sibcall
5215 optimize any indirect call, or a direct call to a global function,
5216 as the PLT requires %ebx be live. (Darwin does not have a PLT.) */
5220 && (!decl || !targetm.binds_local_p (decl)))
5223 /* If we need to align the outgoing stack, then sibcalling would
5224 unalign the stack, which may break the called function. */
5225 if (ix86_minimum_incoming_stack_boundary (true)
5226 < PREFERRED_STACK_BOUNDARY)
5231 decl_or_type = decl;
5232 type = TREE_TYPE (decl);
5236 /* We're looking at the CALL_EXPR, we need the type of the function. */
5237 type = CALL_EXPR_FN (exp); /* pointer expression */
5238 type = TREE_TYPE (type); /* pointer type */
5239 type = TREE_TYPE (type); /* function type */
5240 decl_or_type = type;
5243 /* Check that the return value locations are the same. Like
5244 if we are returning floats on the 80387 register stack, we cannot
5245 make a sibcall from a function that doesn't return a float to a
5246 function that does or, conversely, from a function that does return
5247 a float to a function that doesn't; the necessary stack adjustment
5248 would not be executed. This is also the place we notice
5249 differences in the return value ABI. Note that it is ok for one
5250 of the functions to have void return type as long as the return
5251 value of the other is passed in a register. */
5252 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
5253 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
5255 if (STACK_REG_P (a) || STACK_REG_P (b))
5257 if (!rtx_equal_p (a, b))
5260 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
5262 /* Disable sibcall if we need to generate vzeroupper after
5264 if (TARGET_VZEROUPPER
5265 && cfun->machine->callee_return_avx256_p
5266 && !cfun->machine->caller_return_avx256_p)
5269 else if (!rtx_equal_p (a, b))
5274 /* The SYSV ABI has more call-clobbered registers;
5275 disallow sibcalls from MS to SYSV. */
5276 if (cfun->machine->call_abi == MS_ABI
5277 && ix86_function_type_abi (type) == SYSV_ABI)
5282 /* If this call is indirect, we'll need to be able to use a
5283 call-clobbered register for the address of the target function.
5284 Make sure that all such registers are not used for passing
5285 parameters. Note that DLLIMPORT functions are indirect. */
5287 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
5289 if (ix86_function_regparm (type, NULL) >= 3)
5291 /* ??? Need to count the actual number of registers to be used,
5292 not the possible number of registers. Fix later. */
5298 /* Otherwise okay. That also includes certain types of indirect calls. */
5302 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
5303 and "sseregparm" calling convention attributes;
5304 arguments as in struct attribute_spec.handler. */
5307 ix86_handle_cconv_attribute (tree *node, tree name,
5309 int flags ATTRIBUTE_UNUSED,
5312 if (TREE_CODE (*node) != FUNCTION_TYPE
5313 && TREE_CODE (*node) != METHOD_TYPE
5314 && TREE_CODE (*node) != FIELD_DECL
5315 && TREE_CODE (*node) != TYPE_DECL)
5317 warning (OPT_Wattributes, "%qE attribute only applies to functions",
5319 *no_add_attrs = true;
5323 /* Can combine regparm with all attributes but fastcall. */
5324 if (is_attribute_p ("regparm", name))
5328 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5330 error ("fastcall and regparm attributes are not compatible");
5333 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5335 error ("regparam and thiscall attributes are not compatible");
5338 cst = TREE_VALUE (args);
5339 if (TREE_CODE (cst) != INTEGER_CST)
5341 warning (OPT_Wattributes,
5342 "%qE attribute requires an integer constant argument",
5344 *no_add_attrs = true;
5346 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
5348 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
5350 *no_add_attrs = true;
5358 /* Do not warn when emulating the MS ABI. */
5359 if ((TREE_CODE (*node) != FUNCTION_TYPE
5360 && TREE_CODE (*node) != METHOD_TYPE)
5361 || ix86_function_type_abi (*node) != MS_ABI)
5362 warning (OPT_Wattributes, "%qE attribute ignored",
5364 *no_add_attrs = true;
5368 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
5369 if (is_attribute_p ("fastcall", name))
5371 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
5373 error ("fastcall and cdecl attributes are not compatible");
5375 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
5377 error ("fastcall and stdcall attributes are not compatible");
5379 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
5381 error ("fastcall and regparm attributes are not compatible");
5383 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5385 error ("fastcall and thiscall attributes are not compatible");
5389 /* Can combine stdcall with fastcall (redundant), regparm and
5391 else if (is_attribute_p ("stdcall", name))
5393 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
5395 error ("stdcall and cdecl attributes are not compatible");
5397 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5399 error ("stdcall and fastcall attributes are not compatible");
5401 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5403 error ("stdcall and thiscall attributes are not compatible");
5407 /* Can combine cdecl with regparm and sseregparm. */
5408 else if (is_attribute_p ("cdecl", name))
5410 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
5412 error ("stdcall and cdecl attributes are not compatible");
5414 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5416 error ("fastcall and cdecl attributes are not compatible");
5418 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5420 error ("cdecl and thiscall attributes are not compatible");
5423 else if (is_attribute_p ("thiscall", name))
5425 if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
5426 warning (OPT_Wattributes, "%qE attribute is used for none class-method",
5428 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
5430 error ("stdcall and thiscall attributes are not compatible");
5432 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5434 error ("fastcall and thiscall attributes are not compatible");
5436 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
5438 error ("cdecl and thiscall attributes are not compatible");
5442 /* Can combine sseregparm with all attributes. */
5447 /* Return 0 if the attributes for two types are incompatible, 1 if they
5448 are compatible, and 2 if they are nearly compatible (which causes a
5449 warning to be generated). */
5452 ix86_comp_type_attributes (const_tree type1, const_tree type2)
5454 /* Check for mismatch of non-default calling convention. */
5455 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
5457 if (TREE_CODE (type1) != FUNCTION_TYPE
5458 && TREE_CODE (type1) != METHOD_TYPE)
5461 /* Check for mismatched fastcall/regparm types. */
5462 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
5463 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
5464 || (ix86_function_regparm (type1, NULL)
5465 != ix86_function_regparm (type2, NULL)))
5468 /* Check for mismatched sseregparm types. */
5469 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
5470 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
5473 /* Check for mismatched thiscall types. */
5474 if (!lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type1))
5475 != !lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type2)))
5478 /* Check for mismatched return types (cdecl vs stdcall). */
5479 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
5480 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
5486 /* Return the regparm value for a function with the indicated TYPE and DECL.
5487 DECL may be NULL when calling function indirectly
5488 or considering a libcall. */
5491 ix86_function_regparm (const_tree type, const_tree decl)
5497 return (ix86_function_type_abi (type) == SYSV_ABI
5498 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
5500 regparm = ix86_regparm;
5501 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
5504 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
5508 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
5511 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
5514 /* Use register calling convention for local functions when possible. */
5516 && TREE_CODE (decl) == FUNCTION_DECL
5518 && !(profile_flag && !flag_fentry))
5520 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
5521 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
5522 if (i && i->local && i->can_change_signature)
5524 int local_regparm, globals = 0, regno;
5526 /* Make sure no regparm register is taken by a
5527 fixed register variable. */
5528 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
5529 if (fixed_regs[local_regparm])
5532 /* We don't want to use regparm(3) for nested functions as
5533 these use a static chain pointer in the third argument. */
5534 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
5537 /* In 32-bit mode save a register for the split stack. */
5538 if (!TARGET_64BIT && local_regparm == 3 && flag_split_stack)
5541 /* Each fixed register usage increases register pressure,
5542 so less registers should be used for argument passing.
5543 This functionality can be overriden by an explicit
5545 for (regno = 0; regno <= DI_REG; regno++)
5546 if (fixed_regs[regno])
5550 = globals < local_regparm ? local_regparm - globals : 0;
5552 if (local_regparm > regparm)
5553 regparm = local_regparm;
5560 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
5561 DFmode (2) arguments in SSE registers for a function with the
5562 indicated TYPE and DECL. DECL may be NULL when calling function
5563 indirectly or considering a libcall. Otherwise return 0. */
5566 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
5568 gcc_assert (!TARGET_64BIT);
5570 /* Use SSE registers to pass SFmode and DFmode arguments if requested
5571 by the sseregparm attribute. */
5572 if (TARGET_SSEREGPARM
5573 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
5580 error ("calling %qD with attribute sseregparm without "
5581 "SSE/SSE2 enabled", decl);
5583 error ("calling %qT with attribute sseregparm without "
5584 "SSE/SSE2 enabled", type);
5592 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
5593 (and DFmode for SSE2) arguments in SSE registers. */
5594 if (decl && TARGET_SSE_MATH && optimize
5595 && !(profile_flag && !flag_fentry))
5597 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
5598 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
5599 if (i && i->local && i->can_change_signature)
5600 return TARGET_SSE2 ? 2 : 1;
5606 /* Return true if EAX is live at the start of the function. Used by
5607 ix86_expand_prologue to determine if we need special help before
5608 calling allocate_stack_worker. */
5611 ix86_eax_live_at_start_p (void)
5613 /* Cheat. Don't bother working forward from ix86_function_regparm
5614 to the function type to whether an actual argument is located in
5615 eax. Instead just look at cfg info, which is still close enough
5616 to correct at this point. This gives false positives for broken
5617 functions that might use uninitialized data that happens to be
5618 allocated in eax, but who cares? */
5619 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
5623 ix86_keep_aggregate_return_pointer (tree fntype)
5627 attr = lookup_attribute ("callee_pop_aggregate_return",
5628 TYPE_ATTRIBUTES (fntype));
5630 return (TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr))) == 0);
5632 return KEEP_AGGREGATE_RETURN_POINTER != 0;
5635 /* Value is the number of bytes of arguments automatically
5636 popped when returning from a subroutine call.
5637 FUNDECL is the declaration node of the function (as a tree),
5638 FUNTYPE is the data type of the function (as a tree),
5639 or for a library call it is an identifier node for the subroutine name.
5640 SIZE is the number of bytes of arguments passed on the stack.
5642 On the 80386, the RTD insn may be used to pop them if the number
5643 of args is fixed, but if the number is variable then the caller
5644 must pop them all. RTD can't be used for library calls now
5645 because the library is compiled with the Unix compiler.
5646 Use of RTD is a selectable option, since it is incompatible with
5647 standard Unix calling sequences. If the option is not selected,
5648 the caller must always pop the args.
5650 The attribute stdcall is equivalent to RTD on a per module basis. */
5653 ix86_return_pops_args (tree fundecl, tree funtype, int size)
5657 /* None of the 64-bit ABIs pop arguments. */
5661 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
5663 /* Cdecl functions override -mrtd, and never pop the stack. */
5664 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
5666 /* Stdcall and fastcall functions will pop the stack if not
5668 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
5669 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype))
5670 || lookup_attribute ("thiscall", TYPE_ATTRIBUTES (funtype)))
5673 if (rtd && ! stdarg_p (funtype))
5677 /* Lose any fake structure return argument if it is passed on the stack. */
5678 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
5679 && !ix86_keep_aggregate_return_pointer (funtype))
5681 int nregs = ix86_function_regparm (funtype, fundecl);
5683 return GET_MODE_SIZE (Pmode);
5689 /* Argument support functions. */
5691 /* Return true when register may be used to pass function parameters. */
5693 ix86_function_arg_regno_p (int regno)
5696 const int *parm_regs;
5701 return (regno < REGPARM_MAX
5702 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
5704 return (regno < REGPARM_MAX
5705 || (TARGET_MMX && MMX_REGNO_P (regno)
5706 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
5707 || (TARGET_SSE && SSE_REGNO_P (regno)
5708 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
5713 if (SSE_REGNO_P (regno) && TARGET_SSE)
5718 if (TARGET_SSE && SSE_REGNO_P (regno)
5719 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
5723 /* TODO: The function should depend on current function ABI but
5724 builtins.c would need updating then. Therefore we use the
5727 /* RAX is used as hidden argument to va_arg functions. */
5728 if (ix86_abi == SYSV_ABI && regno == AX_REG)
5731 if (ix86_abi == MS_ABI)
5732 parm_regs = x86_64_ms_abi_int_parameter_registers;
5734 parm_regs = x86_64_int_parameter_registers;
5735 for (i = 0; i < (ix86_abi == MS_ABI
5736 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
5737 if (regno == parm_regs[i])
5742 /* Return if we do not know how to pass TYPE solely in registers. */
5745 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
5747 if (must_pass_in_stack_var_size_or_pad (mode, type))
5750 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
5751 The layout_type routine is crafty and tries to trick us into passing
5752 currently unsupported vector types on the stack by using TImode. */
5753 return (!TARGET_64BIT && mode == TImode
5754 && type && TREE_CODE (type) != VECTOR_TYPE);
5757 /* It returns the size, in bytes, of the area reserved for arguments passed
5758 in registers for the function represented by fndecl dependent to the used
5761 ix86_reg_parm_stack_space (const_tree fndecl)
5763 enum calling_abi call_abi = SYSV_ABI;
5764 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
5765 call_abi = ix86_function_abi (fndecl);
5767 call_abi = ix86_function_type_abi (fndecl);
5768 if (call_abi == MS_ABI)
5773 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
5776 ix86_function_type_abi (const_tree fntype)
5778 if (TARGET_64BIT && fntype != NULL)
5780 enum calling_abi abi = ix86_abi;
5781 if (abi == SYSV_ABI)
5783 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
5786 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
5794 ix86_function_ms_hook_prologue (const_tree fn)
5796 if (fn && lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fn)))
5798 if (decl_function_context (fn) != NULL_TREE)
5799 error_at (DECL_SOURCE_LOCATION (fn),
5800 "ms_hook_prologue is not compatible with nested function");
5807 static enum calling_abi
5808 ix86_function_abi (const_tree fndecl)
5812 return ix86_function_type_abi (TREE_TYPE (fndecl));
5815 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
5818 ix86_cfun_abi (void)
5820 if (! cfun || ! TARGET_64BIT)
5822 return cfun->machine->call_abi;
5825 /* Write the extra assembler code needed to declare a function properly. */
5828 ix86_asm_output_function_label (FILE *asm_out_file, const char *fname,
5831 bool is_ms_hook = ix86_function_ms_hook_prologue (decl);
5835 int i, filler_count = (TARGET_64BIT ? 32 : 16);
5836 unsigned int filler_cc = 0xcccccccc;
5838 for (i = 0; i < filler_count; i += 4)
5839 fprintf (asm_out_file, ASM_LONG " %#x\n", filler_cc);
5842 #ifdef SUBTARGET_ASM_UNWIND_INIT
5843 SUBTARGET_ASM_UNWIND_INIT (asm_out_file);
5846 ASM_OUTPUT_LABEL (asm_out_file, fname);
5848 /* Output magic byte marker, if hot-patch attribute is set. */
5853 /* leaq [%rsp + 0], %rsp */
5854 asm_fprintf (asm_out_file, ASM_BYTE
5855 "0x48, 0x8d, 0xa4, 0x24, 0x00, 0x00, 0x00, 0x00\n");
5859 /* movl.s %edi, %edi
5861 movl.s %esp, %ebp */
5862 asm_fprintf (asm_out_file, ASM_BYTE
5863 "0x8b, 0xff, 0x55, 0x8b, 0xec\n");
5869 extern void init_regs (void);
5871 /* Implementation of call abi switching target hook. Specific to FNDECL
5872 the specific call register sets are set. See also
5873 ix86_conditional_register_usage for more details. */
5875 ix86_call_abi_override (const_tree fndecl)
5877 if (fndecl == NULL_TREE)
5878 cfun->machine->call_abi = ix86_abi;
5880 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
5883 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
5884 re-initialization of init_regs each time we switch function context since
5885 this is needed only during RTL expansion. */
5887 ix86_maybe_switch_abi (void)
5890 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
5894 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5895 for a call to a function whose data type is FNTYPE.
5896 For a library call, FNTYPE is 0. */
5899 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
5900 tree fntype, /* tree ptr for function decl */
5901 rtx libname, /* SYMBOL_REF of library name or 0 */
5905 struct cgraph_local_info *i;
5908 memset (cum, 0, sizeof (*cum));
5910 /* Initialize for the current callee. */
5913 cfun->machine->callee_pass_avx256_p = false;
5914 cfun->machine->callee_return_avx256_p = false;
5919 i = cgraph_local_info (fndecl);
5920 cum->call_abi = ix86_function_abi (fndecl);
5921 fnret_type = TREE_TYPE (TREE_TYPE (fndecl));
5926 cum->call_abi = ix86_function_type_abi (fntype);
5928 fnret_type = TREE_TYPE (fntype);
5933 if (TARGET_VZEROUPPER && fnret_type)
5935 rtx fnret_value = ix86_function_value (fnret_type, fntype,
5937 if (function_pass_avx256_p (fnret_value))
5939 /* The return value of this function uses 256bit AVX modes. */
5942 cfun->machine->callee_return_avx256_p = true;
5943 cum->callee_return_avx256_p = true;
5946 cfun->machine->caller_return_avx256_p = true;
5950 cum->caller = caller;
5952 /* Set up the number of registers to use for passing arguments. */
5954 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
5955 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
5956 "or subtarget optimization implying it");
5957 cum->nregs = ix86_regparm;
5960 cum->nregs = (cum->call_abi == SYSV_ABI
5961 ? X86_64_REGPARM_MAX
5962 : X86_64_MS_REGPARM_MAX);
5966 cum->sse_nregs = SSE_REGPARM_MAX;
5969 cum->sse_nregs = (cum->call_abi == SYSV_ABI
5970 ? X86_64_SSE_REGPARM_MAX
5971 : X86_64_MS_SSE_REGPARM_MAX);
5975 cum->mmx_nregs = MMX_REGPARM_MAX;
5976 cum->warn_avx = true;
5977 cum->warn_sse = true;
5978 cum->warn_mmx = true;
5980 /* Because type might mismatch in between caller and callee, we need to
5981 use actual type of function for local calls.
5982 FIXME: cgraph_analyze can be told to actually record if function uses
5983 va_start so for local functions maybe_vaarg can be made aggressive
5985 FIXME: once typesytem is fixed, we won't need this code anymore. */
5986 if (i && i->local && i->can_change_signature)
5987 fntype = TREE_TYPE (fndecl);
5988 cum->maybe_vaarg = (fntype
5989 ? (!prototype_p (fntype) || stdarg_p (fntype))
5994 /* If there are variable arguments, then we won't pass anything
5995 in registers in 32-bit mode. */
5996 if (stdarg_p (fntype))
6007 /* Use ecx and edx registers if function has fastcall attribute,
6008 else look for regparm information. */
6011 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
6014 cum->fastcall = 1; /* Same first register as in fastcall. */
6016 else if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
6022 cum->nregs = ix86_function_regparm (fntype, fndecl);
6025 /* Set up the number of SSE registers used for passing SFmode
6026 and DFmode arguments. Warn for mismatching ABI. */
6027 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
6031 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
6032 But in the case of vector types, it is some vector mode.
6034 When we have only some of our vector isa extensions enabled, then there
6035 are some modes for which vector_mode_supported_p is false. For these
6036 modes, the generic vector support in gcc will choose some non-vector mode
6037 in order to implement the type. By computing the natural mode, we'll
6038 select the proper ABI location for the operand and not depend on whatever
6039 the middle-end decides to do with these vector types.
6041 The midde-end can't deal with the vector types > 16 bytes. In this
6042 case, we return the original mode and warn ABI change if CUM isn't
6045 static enum machine_mode
6046 type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum)
6048 enum machine_mode mode = TYPE_MODE (type);
6050 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
6052 HOST_WIDE_INT size = int_size_in_bytes (type);
6053 if ((size == 8 || size == 16 || size == 32)
6054 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
6055 && TYPE_VECTOR_SUBPARTS (type) > 1)
6057 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
6059 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
6060 mode = MIN_MODE_VECTOR_FLOAT;
6062 mode = MIN_MODE_VECTOR_INT;
6064 /* Get the mode which has this inner mode and number of units. */
6065 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
6066 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
6067 && GET_MODE_INNER (mode) == innermode)
6069 if (size == 32 && !TARGET_AVX)
6071 static bool warnedavx;
6078 warning (0, "AVX vector argument without AVX "
6079 "enabled changes the ABI");
6081 return TYPE_MODE (type);
6094 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
6095 this may not agree with the mode that the type system has chosen for the
6096 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
6097 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
6100 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
6105 if (orig_mode != BLKmode)
6106 tmp = gen_rtx_REG (orig_mode, regno);
6109 tmp = gen_rtx_REG (mode, regno);
6110 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
6111 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
6117 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
6118 of this code is to classify each 8bytes of incoming argument by the register
6119 class and assign registers accordingly. */
6121 /* Return the union class of CLASS1 and CLASS2.
6122 See the x86-64 PS ABI for details. */
6124 static enum x86_64_reg_class
6125 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
6127 /* Rule #1: If both classes are equal, this is the resulting class. */
6128 if (class1 == class2)
6131 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
6133 if (class1 == X86_64_NO_CLASS)
6135 if (class2 == X86_64_NO_CLASS)
6138 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
6139 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
6140 return X86_64_MEMORY_CLASS;
6142 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
6143 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
6144 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
6145 return X86_64_INTEGERSI_CLASS;
6146 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
6147 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
6148 return X86_64_INTEGER_CLASS;
6150 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
6152 if (class1 == X86_64_X87_CLASS
6153 || class1 == X86_64_X87UP_CLASS
6154 || class1 == X86_64_COMPLEX_X87_CLASS
6155 || class2 == X86_64_X87_CLASS
6156 || class2 == X86_64_X87UP_CLASS
6157 || class2 == X86_64_COMPLEX_X87_CLASS)
6158 return X86_64_MEMORY_CLASS;
6160 /* Rule #6: Otherwise class SSE is used. */
6161 return X86_64_SSE_CLASS;
6164 /* Classify the argument of type TYPE and mode MODE.
6165 CLASSES will be filled by the register class used to pass each word
6166 of the operand. The number of words is returned. In case the parameter
6167 should be passed in memory, 0 is returned. As a special case for zero
6168 sized containers, classes[0] will be NO_CLASS and 1 is returned.
6170 BIT_OFFSET is used internally for handling records and specifies offset
6171 of the offset in bits modulo 256 to avoid overflow cases.
6173 See the x86-64 PS ABI for details.
6177 classify_argument (enum machine_mode mode, const_tree type,
6178 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
6180 HOST_WIDE_INT bytes =
6181 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
6182 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6184 /* Variable sized entities are always passed/returned in memory. */
6188 if (mode != VOIDmode
6189 && targetm.calls.must_pass_in_stack (mode, type))
6192 if (type && AGGREGATE_TYPE_P (type))
6196 enum x86_64_reg_class subclasses[MAX_CLASSES];
6198 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
6202 for (i = 0; i < words; i++)
6203 classes[i] = X86_64_NO_CLASS;
6205 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
6206 signalize memory class, so handle it as special case. */
6209 classes[0] = X86_64_NO_CLASS;
6213 /* Classify each field of record and merge classes. */
6214 switch (TREE_CODE (type))
6217 /* And now merge the fields of structure. */
6218 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6220 if (TREE_CODE (field) == FIELD_DECL)
6224 if (TREE_TYPE (field) == error_mark_node)
6227 /* Bitfields are always classified as integer. Handle them
6228 early, since later code would consider them to be
6229 misaligned integers. */
6230 if (DECL_BIT_FIELD (field))
6232 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
6233 i < ((int_bit_position (field) + (bit_offset % 64))
6234 + tree_low_cst (DECL_SIZE (field), 0)
6237 merge_classes (X86_64_INTEGER_CLASS,
6244 type = TREE_TYPE (field);
6246 /* Flexible array member is ignored. */
6247 if (TYPE_MODE (type) == BLKmode
6248 && TREE_CODE (type) == ARRAY_TYPE
6249 && TYPE_SIZE (type) == NULL_TREE
6250 && TYPE_DOMAIN (type) != NULL_TREE
6251 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
6256 if (!warned && warn_psabi)
6259 inform (input_location,
6260 "the ABI of passing struct with"
6261 " a flexible array member has"
6262 " changed in GCC 4.4");
6266 num = classify_argument (TYPE_MODE (type), type,
6268 (int_bit_position (field)
6269 + bit_offset) % 256);
6272 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
6273 for (i = 0; i < num && (i + pos) < words; i++)
6275 merge_classes (subclasses[i], classes[i + pos]);
6282 /* Arrays are handled as small records. */
6285 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
6286 TREE_TYPE (type), subclasses, bit_offset);
6290 /* The partial classes are now full classes. */
6291 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
6292 subclasses[0] = X86_64_SSE_CLASS;
6293 if (subclasses[0] == X86_64_INTEGERSI_CLASS
6294 && !((bit_offset % 64) == 0 && bytes == 4))
6295 subclasses[0] = X86_64_INTEGER_CLASS;
6297 for (i = 0; i < words; i++)
6298 classes[i] = subclasses[i % num];
6303 case QUAL_UNION_TYPE:
6304 /* Unions are similar to RECORD_TYPE but offset is always 0.
6306 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6308 if (TREE_CODE (field) == FIELD_DECL)
6312 if (TREE_TYPE (field) == error_mark_node)
6315 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
6316 TREE_TYPE (field), subclasses,
6320 for (i = 0; i < num; i++)
6321 classes[i] = merge_classes (subclasses[i], classes[i]);
6332 /* When size > 16 bytes, if the first one isn't
6333 X86_64_SSE_CLASS or any other ones aren't
6334 X86_64_SSEUP_CLASS, everything should be passed in
6336 if (classes[0] != X86_64_SSE_CLASS)
6339 for (i = 1; i < words; i++)
6340 if (classes[i] != X86_64_SSEUP_CLASS)
6344 /* Final merger cleanup. */
6345 for (i = 0; i < words; i++)
6347 /* If one class is MEMORY, everything should be passed in
6349 if (classes[i] == X86_64_MEMORY_CLASS)
6352 /* The X86_64_SSEUP_CLASS should be always preceded by
6353 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
6354 if (classes[i] == X86_64_SSEUP_CLASS
6355 && classes[i - 1] != X86_64_SSE_CLASS
6356 && classes[i - 1] != X86_64_SSEUP_CLASS)
6358 /* The first one should never be X86_64_SSEUP_CLASS. */
6359 gcc_assert (i != 0);
6360 classes[i] = X86_64_SSE_CLASS;
6363 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
6364 everything should be passed in memory. */
6365 if (classes[i] == X86_64_X87UP_CLASS
6366 && (classes[i - 1] != X86_64_X87_CLASS))
6370 /* The first one should never be X86_64_X87UP_CLASS. */
6371 gcc_assert (i != 0);
6372 if (!warned && warn_psabi)
6375 inform (input_location,
6376 "the ABI of passing union with long double"
6377 " has changed in GCC 4.4");
6385 /* Compute alignment needed. We align all types to natural boundaries with
6386 exception of XFmode that is aligned to 64bits. */
6387 if (mode != VOIDmode && mode != BLKmode)
6389 int mode_alignment = GET_MODE_BITSIZE (mode);
6392 mode_alignment = 128;
6393 else if (mode == XCmode)
6394 mode_alignment = 256;
6395 if (COMPLEX_MODE_P (mode))
6396 mode_alignment /= 2;
6397 /* Misaligned fields are always returned in memory. */
6398 if (bit_offset % mode_alignment)
6402 /* for V1xx modes, just use the base mode */
6403 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
6404 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
6405 mode = GET_MODE_INNER (mode);
6407 /* Classification of atomic types. */
6412 classes[0] = X86_64_SSE_CLASS;
6415 classes[0] = X86_64_SSE_CLASS;
6416 classes[1] = X86_64_SSEUP_CLASS;
6426 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
6430 classes[0] = X86_64_INTEGERSI_CLASS;
6433 else if (size <= 64)
6435 classes[0] = X86_64_INTEGER_CLASS;
6438 else if (size <= 64+32)
6440 classes[0] = X86_64_INTEGER_CLASS;
6441 classes[1] = X86_64_INTEGERSI_CLASS;
6444 else if (size <= 64+64)
6446 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
6454 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
6458 /* OImode shouldn't be used directly. */
6463 if (!(bit_offset % 64))
6464 classes[0] = X86_64_SSESF_CLASS;
6466 classes[0] = X86_64_SSE_CLASS;
6469 classes[0] = X86_64_SSEDF_CLASS;
6472 classes[0] = X86_64_X87_CLASS;
6473 classes[1] = X86_64_X87UP_CLASS;
6476 classes[0] = X86_64_SSE_CLASS;
6477 classes[1] = X86_64_SSEUP_CLASS;
6480 classes[0] = X86_64_SSE_CLASS;
6481 if (!(bit_offset % 64))
6487 if (!warned && warn_psabi)
6490 inform (input_location,
6491 "the ABI of passing structure with complex float"
6492 " member has changed in GCC 4.4");
6494 classes[1] = X86_64_SSESF_CLASS;
6498 classes[0] = X86_64_SSEDF_CLASS;
6499 classes[1] = X86_64_SSEDF_CLASS;
6502 classes[0] = X86_64_COMPLEX_X87_CLASS;
6505 /* This modes is larger than 16 bytes. */
6513 classes[0] = X86_64_SSE_CLASS;
6514 classes[1] = X86_64_SSEUP_CLASS;
6515 classes[2] = X86_64_SSEUP_CLASS;
6516 classes[3] = X86_64_SSEUP_CLASS;
6524 classes[0] = X86_64_SSE_CLASS;
6525 classes[1] = X86_64_SSEUP_CLASS;
6533 classes[0] = X86_64_SSE_CLASS;
6539 gcc_assert (VECTOR_MODE_P (mode));
6544 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
6546 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
6547 classes[0] = X86_64_INTEGERSI_CLASS;
6549 classes[0] = X86_64_INTEGER_CLASS;
6550 classes[1] = X86_64_INTEGER_CLASS;
6551 return 1 + (bytes > 8);
6555 /* Examine the argument and return set number of register required in each
6556 class. Return 0 iff parameter should be passed in memory. */
6558 examine_argument (enum machine_mode mode, const_tree type, int in_return,
6559 int *int_nregs, int *sse_nregs)
6561 enum x86_64_reg_class regclass[MAX_CLASSES];
6562 int n = classify_argument (mode, type, regclass, 0);
6568 for (n--; n >= 0; n--)
6569 switch (regclass[n])
6571 case X86_64_INTEGER_CLASS:
6572 case X86_64_INTEGERSI_CLASS:
6575 case X86_64_SSE_CLASS:
6576 case X86_64_SSESF_CLASS:
6577 case X86_64_SSEDF_CLASS:
6580 case X86_64_NO_CLASS:
6581 case X86_64_SSEUP_CLASS:
6583 case X86_64_X87_CLASS:
6584 case X86_64_X87UP_CLASS:
6588 case X86_64_COMPLEX_X87_CLASS:
6589 return in_return ? 2 : 0;
6590 case X86_64_MEMORY_CLASS:
6596 /* Construct container for the argument used by GCC interface. See
6597 FUNCTION_ARG for the detailed description. */
6600 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
6601 const_tree type, int in_return, int nintregs, int nsseregs,
6602 const int *intreg, int sse_regno)
6604 /* The following variables hold the static issued_error state. */
6605 static bool issued_sse_arg_error;
6606 static bool issued_sse_ret_error;
6607 static bool issued_x87_ret_error;
6609 enum machine_mode tmpmode;
6611 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
6612 enum x86_64_reg_class regclass[MAX_CLASSES];
6616 int needed_sseregs, needed_intregs;
6617 rtx exp[MAX_CLASSES];
6620 n = classify_argument (mode, type, regclass, 0);
6623 if (!examine_argument (mode, type, in_return, &needed_intregs,
6626 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
6629 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
6630 some less clueful developer tries to use floating-point anyway. */
6631 if (needed_sseregs && !TARGET_SSE)
6635 if (!issued_sse_ret_error)
6637 error ("SSE register return with SSE disabled");
6638 issued_sse_ret_error = true;
6641 else if (!issued_sse_arg_error)
6643 error ("SSE register argument with SSE disabled");
6644 issued_sse_arg_error = true;
6649 /* Likewise, error if the ABI requires us to return values in the
6650 x87 registers and the user specified -mno-80387. */
6651 if (!TARGET_80387 && in_return)
6652 for (i = 0; i < n; i++)
6653 if (regclass[i] == X86_64_X87_CLASS
6654 || regclass[i] == X86_64_X87UP_CLASS
6655 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
6657 if (!issued_x87_ret_error)
6659 error ("x87 register return with x87 disabled");
6660 issued_x87_ret_error = true;
6665 /* First construct simple cases. Avoid SCmode, since we want to use
6666 single register to pass this type. */
6667 if (n == 1 && mode != SCmode)
6668 switch (regclass[0])
6670 case X86_64_INTEGER_CLASS:
6671 case X86_64_INTEGERSI_CLASS:
6672 return gen_rtx_REG (mode, intreg[0]);
6673 case X86_64_SSE_CLASS:
6674 case X86_64_SSESF_CLASS:
6675 case X86_64_SSEDF_CLASS:
6676 if (mode != BLKmode)
6677 return gen_reg_or_parallel (mode, orig_mode,
6678 SSE_REGNO (sse_regno));
6680 case X86_64_X87_CLASS:
6681 case X86_64_COMPLEX_X87_CLASS:
6682 return gen_rtx_REG (mode, FIRST_STACK_REG);
6683 case X86_64_NO_CLASS:
6684 /* Zero sized array, struct or class. */
6689 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
6690 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
6691 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
6693 && regclass[0] == X86_64_SSE_CLASS
6694 && regclass[1] == X86_64_SSEUP_CLASS
6695 && regclass[2] == X86_64_SSEUP_CLASS
6696 && regclass[3] == X86_64_SSEUP_CLASS
6698 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
6701 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
6702 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
6703 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
6704 && regclass[1] == X86_64_INTEGER_CLASS
6705 && (mode == CDImode || mode == TImode || mode == TFmode)
6706 && intreg[0] + 1 == intreg[1])
6707 return gen_rtx_REG (mode, intreg[0]);
6709 /* Otherwise figure out the entries of the PARALLEL. */
6710 for (i = 0; i < n; i++)
6714 switch (regclass[i])
6716 case X86_64_NO_CLASS:
6718 case X86_64_INTEGER_CLASS:
6719 case X86_64_INTEGERSI_CLASS:
6720 /* Merge TImodes on aligned occasions here too. */
6721 if (i * 8 + 8 > bytes)
6722 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
6723 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
6727 /* We've requested 24 bytes we don't have mode for. Use DImode. */
6728 if (tmpmode == BLKmode)
6730 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6731 gen_rtx_REG (tmpmode, *intreg),
6735 case X86_64_SSESF_CLASS:
6736 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6737 gen_rtx_REG (SFmode,
6738 SSE_REGNO (sse_regno)),
6742 case X86_64_SSEDF_CLASS:
6743 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6744 gen_rtx_REG (DFmode,
6745 SSE_REGNO (sse_regno)),
6749 case X86_64_SSE_CLASS:
6757 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
6767 && regclass[1] == X86_64_SSEUP_CLASS
6768 && regclass[2] == X86_64_SSEUP_CLASS
6769 && regclass[3] == X86_64_SSEUP_CLASS);
6776 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6777 gen_rtx_REG (tmpmode,
6778 SSE_REGNO (sse_regno)),
6787 /* Empty aligned struct, union or class. */
6791 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
6792 for (i = 0; i < nexps; i++)
6793 XVECEXP (ret, 0, i) = exp [i];
6797 /* Update the data in CUM to advance over an argument of mode MODE
6798 and data type TYPE. (TYPE is null for libcalls where that information
6799 may not be available.) */
6802 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6803 const_tree type, HOST_WIDE_INT bytes,
6804 HOST_WIDE_INT words)
6820 cum->words += words;
6821 cum->nregs -= words;
6822 cum->regno += words;
6824 if (cum->nregs <= 0)
6832 /* OImode shouldn't be used directly. */
6836 if (cum->float_in_sse < 2)
6839 if (cum->float_in_sse < 1)
6856 if (!type || !AGGREGATE_TYPE_P (type))
6858 cum->sse_words += words;
6859 cum->sse_nregs -= 1;
6860 cum->sse_regno += 1;
6861 if (cum->sse_nregs <= 0)
6875 if (!type || !AGGREGATE_TYPE_P (type))
6877 cum->mmx_words += words;
6878 cum->mmx_nregs -= 1;
6879 cum->mmx_regno += 1;
6880 if (cum->mmx_nregs <= 0)
6891 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6892 const_tree type, HOST_WIDE_INT words, bool named)
6894 int int_nregs, sse_nregs;
6896 /* Unnamed 256bit vector mode parameters are passed on stack. */
6897 if (!named && VALID_AVX256_REG_MODE (mode))
6900 if (examine_argument (mode, type, 0, &int_nregs, &sse_nregs)
6901 && sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
6903 cum->nregs -= int_nregs;
6904 cum->sse_nregs -= sse_nregs;
6905 cum->regno += int_nregs;
6906 cum->sse_regno += sse_nregs;
6910 int align = ix86_function_arg_boundary (mode, type) / BITS_PER_WORD;
6911 cum->words = (cum->words + align - 1) & ~(align - 1);
6912 cum->words += words;
6917 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
6918 HOST_WIDE_INT words)
6920 /* Otherwise, this should be passed indirect. */
6921 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
6923 cum->words += words;
6931 /* Update the data in CUM to advance over an argument of mode MODE and
6932 data type TYPE. (TYPE is null for libcalls where that information
6933 may not be available.) */
6936 ix86_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6937 const_tree type, bool named)
6939 HOST_WIDE_INT bytes, words;
6941 if (mode == BLKmode)
6942 bytes = int_size_in_bytes (type);
6944 bytes = GET_MODE_SIZE (mode);
6945 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6948 mode = type_natural_mode (type, NULL);
6950 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6951 function_arg_advance_ms_64 (cum, bytes, words);
6952 else if (TARGET_64BIT)
6953 function_arg_advance_64 (cum, mode, type, words, named);
6955 function_arg_advance_32 (cum, mode, type, bytes, words);
6958 /* Define where to put the arguments to a function.
6959 Value is zero to push the argument on the stack,
6960 or a hard register in which to store the argument.
6962 MODE is the argument's machine mode.
6963 TYPE is the data type of the argument (as a tree).
6964 This is null for libcalls where that information may
6966 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6967 the preceding args and about the function being called.
6968 NAMED is nonzero if this argument is a named parameter
6969 (otherwise it is an extra parameter matching an ellipsis). */
6972 function_arg_32 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6973 enum machine_mode orig_mode, const_tree type,
6974 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
6976 static bool warnedsse, warnedmmx;
6978 /* Avoid the AL settings for the Unix64 ABI. */
6979 if (mode == VOIDmode)
6995 if (words <= cum->nregs)
6997 int regno = cum->regno;
6999 /* Fastcall allocates the first two DWORD (SImode) or
7000 smaller arguments to ECX and EDX if it isn't an
7006 || (type && AGGREGATE_TYPE_P (type)))
7009 /* ECX not EAX is the first allocated register. */
7010 if (regno == AX_REG)
7013 return gen_rtx_REG (mode, regno);
7018 if (cum->float_in_sse < 2)
7021 if (cum->float_in_sse < 1)
7025 /* In 32bit, we pass TImode in xmm registers. */
7032 if (!type || !AGGREGATE_TYPE_P (type))
7034 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
7037 warning (0, "SSE vector argument without SSE enabled "
7041 return gen_reg_or_parallel (mode, orig_mode,
7042 cum->sse_regno + FIRST_SSE_REG);
7047 /* OImode shouldn't be used directly. */
7056 if (!type || !AGGREGATE_TYPE_P (type))
7059 return gen_reg_or_parallel (mode, orig_mode,
7060 cum->sse_regno + FIRST_SSE_REG);
7070 if (!type || !AGGREGATE_TYPE_P (type))
7072 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
7075 warning (0, "MMX vector argument without MMX enabled "
7079 return gen_reg_or_parallel (mode, orig_mode,
7080 cum->mmx_regno + FIRST_MMX_REG);
7089 function_arg_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
7090 enum machine_mode orig_mode, const_tree type, bool named)
7092 /* Handle a hidden AL argument containing number of registers
7093 for varargs x86-64 functions. */
7094 if (mode == VOIDmode)
7095 return GEN_INT (cum->maybe_vaarg
7096 ? (cum->sse_nregs < 0
7097 ? X86_64_SSE_REGPARM_MAX
7112 /* Unnamed 256bit vector mode parameters are passed on stack. */
7118 return construct_container (mode, orig_mode, type, 0, cum->nregs,
7120 &x86_64_int_parameter_registers [cum->regno],
7125 function_arg_ms_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
7126 enum machine_mode orig_mode, bool named,
7127 HOST_WIDE_INT bytes)
7131 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
7132 We use value of -2 to specify that current function call is MSABI. */
7133 if (mode == VOIDmode)
7134 return GEN_INT (-2);
7136 /* If we've run out of registers, it goes on the stack. */
7137 if (cum->nregs == 0)
7140 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
7142 /* Only floating point modes are passed in anything but integer regs. */
7143 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
7146 regno = cum->regno + FIRST_SSE_REG;
7151 /* Unnamed floating parameters are passed in both the
7152 SSE and integer registers. */
7153 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
7154 t2 = gen_rtx_REG (mode, regno);
7155 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
7156 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
7157 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
7160 /* Handle aggregated types passed in register. */
7161 if (orig_mode == BLKmode)
7163 if (bytes > 0 && bytes <= 8)
7164 mode = (bytes > 4 ? DImode : SImode);
7165 if (mode == BLKmode)
7169 return gen_reg_or_parallel (mode, orig_mode, regno);
7172 /* Return where to put the arguments to a function.
7173 Return zero to push the argument on the stack, or a hard register in which to store the argument.
7175 MODE is the argument's machine mode. TYPE is the data type of the
7176 argument. It is null for libcalls where that information may not be
7177 available. CUM gives information about the preceding args and about
7178 the function being called. NAMED is nonzero if this argument is a
7179 named parameter (otherwise it is an extra parameter matching an
7183 ix86_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
7184 const_tree type, bool named)
7186 enum machine_mode mode = omode;
7187 HOST_WIDE_INT bytes, words;
7190 if (mode == BLKmode)
7191 bytes = int_size_in_bytes (type);
7193 bytes = GET_MODE_SIZE (mode);
7194 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7196 /* To simplify the code below, represent vector types with a vector mode
7197 even if MMX/SSE are not active. */
7198 if (type && TREE_CODE (type) == VECTOR_TYPE)
7199 mode = type_natural_mode (type, cum);
7201 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
7202 arg = function_arg_ms_64 (cum, mode, omode, named, bytes);
7203 else if (TARGET_64BIT)
7204 arg = function_arg_64 (cum, mode, omode, type, named);
7206 arg = function_arg_32 (cum, mode, omode, type, bytes, words);
7208 if (TARGET_VZEROUPPER && function_pass_avx256_p (arg))
7210 /* This argument uses 256bit AVX modes. */
7212 cum->callee_pass_avx256_p = true;
7214 cfun->machine->caller_pass_avx256_p = true;
7217 if (cum->caller && mode == VOIDmode)
7219 /* This function is called with MODE == VOIDmode immediately
7220 before the call instruction is emitted. We copy callee 256bit
7221 AVX info from the current CUM here. */
7222 cfun->machine->callee_return_avx256_p = cum->callee_return_avx256_p;
7223 cfun->machine->callee_pass_avx256_p = cum->callee_pass_avx256_p;
7229 /* A C expression that indicates when an argument must be passed by
7230 reference. If nonzero for an argument, a copy of that argument is
7231 made in memory and a pointer to the argument is passed instead of
7232 the argument itself. The pointer is passed in whatever way is
7233 appropriate for passing a pointer to that type. */
7236 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7237 enum machine_mode mode ATTRIBUTE_UNUSED,
7238 const_tree type, bool named ATTRIBUTE_UNUSED)
7240 /* See Windows x64 Software Convention. */
7241 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
7243 int msize = (int) GET_MODE_SIZE (mode);
7246 /* Arrays are passed by reference. */
7247 if (TREE_CODE (type) == ARRAY_TYPE)
7250 if (AGGREGATE_TYPE_P (type))
7252 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
7253 are passed by reference. */
7254 msize = int_size_in_bytes (type);
7258 /* __m128 is passed by reference. */
7260 case 1: case 2: case 4: case 8:
7266 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
7272 /* Return true when TYPE should be 128bit aligned for 32bit argument
7273 passing ABI. XXX: This function is obsolete and is only used for
7274 checking psABI compatibility with previous versions of GCC. */
7277 ix86_compat_aligned_value_p (const_tree type)
7279 enum machine_mode mode = TYPE_MODE (type);
7280 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
7284 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
7286 if (TYPE_ALIGN (type) < 128)
7289 if (AGGREGATE_TYPE_P (type))
7291 /* Walk the aggregates recursively. */
7292 switch (TREE_CODE (type))
7296 case QUAL_UNION_TYPE:
7300 /* Walk all the structure fields. */
7301 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
7303 if (TREE_CODE (field) == FIELD_DECL
7304 && ix86_compat_aligned_value_p (TREE_TYPE (field)))
7311 /* Just for use if some languages passes arrays by value. */
7312 if (ix86_compat_aligned_value_p (TREE_TYPE (type)))
7323 /* Return the alignment boundary for MODE and TYPE with alignment ALIGN.
7324 XXX: This function is obsolete and is only used for checking psABI
7325 compatibility with previous versions of GCC. */
7328 ix86_compat_function_arg_boundary (enum machine_mode mode,
7329 const_tree type, unsigned int align)
7331 /* In 32bit, only _Decimal128 and __float128 are aligned to their
7332 natural boundaries. */
7333 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
7335 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
7336 make an exception for SSE modes since these require 128bit
7339 The handling here differs from field_alignment. ICC aligns MMX
7340 arguments to 4 byte boundaries, while structure fields are aligned
7341 to 8 byte boundaries. */
7344 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
7345 align = PARM_BOUNDARY;
7349 if (!ix86_compat_aligned_value_p (type))
7350 align = PARM_BOUNDARY;
7353 if (align > BIGGEST_ALIGNMENT)
7354 align = BIGGEST_ALIGNMENT;
7358 /* Return true when TYPE should be 128bit aligned for 32bit argument
7362 ix86_contains_aligned_value_p (const_tree type)
7364 enum machine_mode mode = TYPE_MODE (type);
7366 if (mode == XFmode || mode == XCmode)
7369 if (TYPE_ALIGN (type) < 128)
7372 if (AGGREGATE_TYPE_P (type))
7374 /* Walk the aggregates recursively. */
7375 switch (TREE_CODE (type))
7379 case QUAL_UNION_TYPE:
7383 /* Walk all the structure fields. */
7384 for (field = TYPE_FIELDS (type);
7386 field = DECL_CHAIN (field))
7388 if (TREE_CODE (field) == FIELD_DECL
7389 && ix86_contains_aligned_value_p (TREE_TYPE (field)))
7396 /* Just for use if some languages passes arrays by value. */
7397 if (ix86_contains_aligned_value_p (TREE_TYPE (type)))
7406 return TYPE_ALIGN (type) >= 128;
7411 /* Gives the alignment boundary, in bits, of an argument with the
7412 specified mode and type. */
7415 ix86_function_arg_boundary (enum machine_mode mode, const_tree type)
7420 /* Since the main variant type is used for call, we convert it to
7421 the main variant type. */
7422 type = TYPE_MAIN_VARIANT (type);
7423 align = TYPE_ALIGN (type);
7426 align = GET_MODE_ALIGNMENT (mode);
7427 if (align < PARM_BOUNDARY)
7428 align = PARM_BOUNDARY;
7432 unsigned int saved_align = align;
7436 /* i386 ABI defines XFmode arguments to be 4 byte aligned. */
7439 if (mode == XFmode || mode == XCmode)
7440 align = PARM_BOUNDARY;
7442 else if (!ix86_contains_aligned_value_p (type))
7443 align = PARM_BOUNDARY;
7446 align = PARM_BOUNDARY;
7451 && align != ix86_compat_function_arg_boundary (mode, type,
7455 inform (input_location,
7456 "The ABI for passing parameters with %d-byte"
7457 " alignment has changed in GCC 4.6",
7458 align / BITS_PER_UNIT);
7465 /* Return true if N is a possible register number of function value. */
7468 ix86_function_value_regno_p (const unsigned int regno)
7475 case FIRST_FLOAT_REG:
7476 /* TODO: The function should depend on current function ABI but
7477 builtins.c would need updating then. Therefore we use the
7479 if (TARGET_64BIT && ix86_abi == MS_ABI)
7481 return TARGET_FLOAT_RETURNS_IN_80387;
7487 if (TARGET_MACHO || TARGET_64BIT)
7495 /* Define how to find the value returned by a function.
7496 VALTYPE is the data type of the value (as a tree).
7497 If the precise function being called is known, FUNC is its FUNCTION_DECL;
7498 otherwise, FUNC is 0. */
7501 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
7502 const_tree fntype, const_tree fn)
7506 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
7507 we normally prevent this case when mmx is not available. However
7508 some ABIs may require the result to be returned like DImode. */
7509 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
7510 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
7512 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
7513 we prevent this case when sse is not available. However some ABIs
7514 may require the result to be returned like integer TImode. */
7515 else if (mode == TImode
7516 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
7517 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
7519 /* 32-byte vector modes in %ymm0. */
7520 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
7521 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
7523 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
7524 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
7525 regno = FIRST_FLOAT_REG;
7527 /* Most things go in %eax. */
7530 /* Override FP return register with %xmm0 for local functions when
7531 SSE math is enabled or for functions with sseregparm attribute. */
7532 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
7534 int sse_level = ix86_function_sseregparm (fntype, fn, false);
7535 if ((sse_level >= 1 && mode == SFmode)
7536 || (sse_level == 2 && mode == DFmode))
7537 regno = FIRST_SSE_REG;
7540 /* OImode shouldn't be used directly. */
7541 gcc_assert (mode != OImode);
7543 return gen_rtx_REG (orig_mode, regno);
7547 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
7552 /* Handle libcalls, which don't provide a type node. */
7553 if (valtype == NULL)
7565 return gen_rtx_REG (mode, FIRST_SSE_REG);
7568 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
7572 return gen_rtx_REG (mode, AX_REG);
7576 ret = construct_container (mode, orig_mode, valtype, 1,
7577 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
7578 x86_64_int_return_registers, 0);
7580 /* For zero sized structures, construct_container returns NULL, but we
7581 need to keep rest of compiler happy by returning meaningful value. */
7583 ret = gen_rtx_REG (orig_mode, AX_REG);
7589 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
7591 unsigned int regno = AX_REG;
7595 switch (GET_MODE_SIZE (mode))
7598 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
7599 && !COMPLEX_MODE_P (mode))
7600 regno = FIRST_SSE_REG;
7604 if (mode == SFmode || mode == DFmode)
7605 regno = FIRST_SSE_REG;
7611 return gen_rtx_REG (orig_mode, regno);
7615 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
7616 enum machine_mode orig_mode, enum machine_mode mode)
7618 const_tree fn, fntype;
7621 if (fntype_or_decl && DECL_P (fntype_or_decl))
7622 fn = fntype_or_decl;
7623 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
7625 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
7626 return function_value_ms_64 (orig_mode, mode);
7627 else if (TARGET_64BIT)
7628 return function_value_64 (orig_mode, mode, valtype);
7630 return function_value_32 (orig_mode, mode, fntype, fn);
7634 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
7635 bool outgoing ATTRIBUTE_UNUSED)
7637 enum machine_mode mode, orig_mode;
7639 orig_mode = TYPE_MODE (valtype);
7640 mode = type_natural_mode (valtype, NULL);
7641 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
7645 ix86_libcall_value (enum machine_mode mode)
7647 return ix86_function_value_1 (NULL, NULL, mode, mode);
7650 /* Return true iff type is returned in memory. */
7652 static bool ATTRIBUTE_UNUSED
7653 return_in_memory_32 (const_tree type, enum machine_mode mode)
7657 if (mode == BLKmode)
7660 size = int_size_in_bytes (type);
7662 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
7665 if (VECTOR_MODE_P (mode) || mode == TImode)
7667 /* User-created vectors small enough to fit in EAX. */
7671 /* MMX/3dNow values are returned in MM0,
7672 except when it doesn't exits or the ABI prescribes otherwise. */
7674 return !TARGET_MMX || TARGET_VECT8_RETURNS;
7676 /* SSE values are returned in XMM0, except when it doesn't exist. */
7680 /* AVX values are returned in YMM0, except when it doesn't exist. */
7691 /* OImode shouldn't be used directly. */
7692 gcc_assert (mode != OImode);
7697 static bool ATTRIBUTE_UNUSED
7698 return_in_memory_64 (const_tree type, enum machine_mode mode)
7700 int needed_intregs, needed_sseregs;
7701 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
7704 static bool ATTRIBUTE_UNUSED
7705 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
7707 HOST_WIDE_INT size = int_size_in_bytes (type);
7709 /* __m128 is returned in xmm0. */
7710 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
7711 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
7714 /* Otherwise, the size must be exactly in [1248]. */
7715 return size != 1 && size != 2 && size != 4 && size != 8;
7719 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7721 #ifdef SUBTARGET_RETURN_IN_MEMORY
7722 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
7724 const enum machine_mode mode = type_natural_mode (type, NULL);
7728 if (ix86_function_type_abi (fntype) == MS_ABI)
7729 return return_in_memory_ms_64 (type, mode);
7731 return return_in_memory_64 (type, mode);
7734 return return_in_memory_32 (type, mode);
7738 /* When returning SSE vector types, we have a choice of either
7739 (1) being abi incompatible with a -march switch, or
7740 (2) generating an error.
7741 Given no good solution, I think the safest thing is one warning.
7742 The user won't be able to use -Werror, but....
7744 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
7745 called in response to actually generating a caller or callee that
7746 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
7747 via aggregate_value_p for general type probing from tree-ssa. */
7750 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
7752 static bool warnedsse, warnedmmx;
7754 if (!TARGET_64BIT && type)
7756 /* Look at the return type of the function, not the function type. */
7757 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
7759 if (!TARGET_SSE && !warnedsse)
7762 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
7765 warning (0, "SSE vector return without SSE enabled "
7770 if (!TARGET_MMX && !warnedmmx)
7772 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
7775 warning (0, "MMX vector return without MMX enabled "
7785 /* Create the va_list data type. */
7787 /* Returns the calling convention specific va_list date type.
7788 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
7791 ix86_build_builtin_va_list_abi (enum calling_abi abi)
7793 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
7795 /* For i386 we use plain pointer to argument area. */
7796 if (!TARGET_64BIT || abi == MS_ABI)
7797 return build_pointer_type (char_type_node);
7799 record = lang_hooks.types.make_type (RECORD_TYPE);
7800 type_decl = build_decl (BUILTINS_LOCATION,
7801 TYPE_DECL, get_identifier ("__va_list_tag"), record);
7803 f_gpr = build_decl (BUILTINS_LOCATION,
7804 FIELD_DECL, get_identifier ("gp_offset"),
7805 unsigned_type_node);
7806 f_fpr = build_decl (BUILTINS_LOCATION,
7807 FIELD_DECL, get_identifier ("fp_offset"),
7808 unsigned_type_node);
7809 f_ovf = build_decl (BUILTINS_LOCATION,
7810 FIELD_DECL, get_identifier ("overflow_arg_area"),
7812 f_sav = build_decl (BUILTINS_LOCATION,
7813 FIELD_DECL, get_identifier ("reg_save_area"),
7816 va_list_gpr_counter_field = f_gpr;
7817 va_list_fpr_counter_field = f_fpr;
7819 DECL_FIELD_CONTEXT (f_gpr) = record;
7820 DECL_FIELD_CONTEXT (f_fpr) = record;
7821 DECL_FIELD_CONTEXT (f_ovf) = record;
7822 DECL_FIELD_CONTEXT (f_sav) = record;
7824 TYPE_STUB_DECL (record) = type_decl;
7825 TYPE_NAME (record) = type_decl;
7826 TYPE_FIELDS (record) = f_gpr;
7827 DECL_CHAIN (f_gpr) = f_fpr;
7828 DECL_CHAIN (f_fpr) = f_ovf;
7829 DECL_CHAIN (f_ovf) = f_sav;
7831 layout_type (record);
7833 /* The correct type is an array type of one element. */
7834 return build_array_type (record, build_index_type (size_zero_node));
7837 /* Setup the builtin va_list data type and for 64-bit the additional
7838 calling convention specific va_list data types. */
7841 ix86_build_builtin_va_list (void)
7843 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
7845 /* Initialize abi specific va_list builtin types. */
7849 if (ix86_abi == MS_ABI)
7851 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
7852 if (TREE_CODE (t) != RECORD_TYPE)
7853 t = build_variant_type_copy (t);
7854 sysv_va_list_type_node = t;
7859 if (TREE_CODE (t) != RECORD_TYPE)
7860 t = build_variant_type_copy (t);
7861 sysv_va_list_type_node = t;
7863 if (ix86_abi != MS_ABI)
7865 t = ix86_build_builtin_va_list_abi (MS_ABI);
7866 if (TREE_CODE (t) != RECORD_TYPE)
7867 t = build_variant_type_copy (t);
7868 ms_va_list_type_node = t;
7873 if (TREE_CODE (t) != RECORD_TYPE)
7874 t = build_variant_type_copy (t);
7875 ms_va_list_type_node = t;
7882 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
7885 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
7891 /* GPR size of varargs save area. */
7892 if (cfun->va_list_gpr_size)
7893 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
7895 ix86_varargs_gpr_size = 0;
7897 /* FPR size of varargs save area. We don't need it if we don't pass
7898 anything in SSE registers. */
7899 if (TARGET_SSE && cfun->va_list_fpr_size)
7900 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
7902 ix86_varargs_fpr_size = 0;
7904 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
7907 save_area = frame_pointer_rtx;
7908 set = get_varargs_alias_set ();
7910 max = cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
7911 if (max > X86_64_REGPARM_MAX)
7912 max = X86_64_REGPARM_MAX;
7914 for (i = cum->regno; i < max; i++)
7916 mem = gen_rtx_MEM (Pmode,
7917 plus_constant (save_area, i * UNITS_PER_WORD));
7918 MEM_NOTRAP_P (mem) = 1;
7919 set_mem_alias_set (mem, set);
7920 emit_move_insn (mem, gen_rtx_REG (Pmode,
7921 x86_64_int_parameter_registers[i]));
7924 if (ix86_varargs_fpr_size)
7926 enum machine_mode smode;
7929 /* Now emit code to save SSE registers. The AX parameter contains number
7930 of SSE parameter registers used to call this function, though all we
7931 actually check here is the zero/non-zero status. */
7933 label = gen_label_rtx ();
7934 test = gen_rtx_EQ (VOIDmode, gen_rtx_REG (QImode, AX_REG), const0_rtx);
7935 emit_jump_insn (gen_cbranchqi4 (test, XEXP (test, 0), XEXP (test, 1),
7938 /* ??? If !TARGET_SSE_TYPELESS_STORES, would we perform better if
7939 we used movdqa (i.e. TImode) instead? Perhaps even better would
7940 be if we could determine the real mode of the data, via a hook
7941 into pass_stdarg. Ignore all that for now. */
7943 if (crtl->stack_alignment_needed < GET_MODE_ALIGNMENT (smode))
7944 crtl->stack_alignment_needed = GET_MODE_ALIGNMENT (smode);
7946 max = cum->sse_regno + cfun->va_list_fpr_size / 16;
7947 if (max > X86_64_SSE_REGPARM_MAX)
7948 max = X86_64_SSE_REGPARM_MAX;
7950 for (i = cum->sse_regno; i < max; ++i)
7952 mem = plus_constant (save_area, i * 16 + ix86_varargs_gpr_size);
7953 mem = gen_rtx_MEM (smode, mem);
7954 MEM_NOTRAP_P (mem) = 1;
7955 set_mem_alias_set (mem, set);
7956 set_mem_align (mem, GET_MODE_ALIGNMENT (smode));
7958 emit_move_insn (mem, gen_rtx_REG (smode, SSE_REGNO (i)));
7966 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
7968 alias_set_type set = get_varargs_alias_set ();
7971 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
7975 mem = gen_rtx_MEM (Pmode,
7976 plus_constant (virtual_incoming_args_rtx,
7977 i * UNITS_PER_WORD));
7978 MEM_NOTRAP_P (mem) = 1;
7979 set_mem_alias_set (mem, set);
7981 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
7982 emit_move_insn (mem, reg);
7987 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7988 tree type, int *pretend_size ATTRIBUTE_UNUSED,
7991 CUMULATIVE_ARGS next_cum;
7994 /* This argument doesn't appear to be used anymore. Which is good,
7995 because the old code here didn't suppress rtl generation. */
7996 gcc_assert (!no_rtl);
8001 fntype = TREE_TYPE (current_function_decl);
8003 /* For varargs, we do not want to skip the dummy va_dcl argument.
8004 For stdargs, we do want to skip the last named argument. */
8006 if (stdarg_p (fntype))
8007 ix86_function_arg_advance (&next_cum, mode, type, true);
8009 if (cum->call_abi == MS_ABI)
8010 setup_incoming_varargs_ms_64 (&next_cum);
8012 setup_incoming_varargs_64 (&next_cum);
8015 /* Checks if TYPE is of kind va_list char *. */
8018 is_va_list_char_pointer (tree type)
8022 /* For 32-bit it is always true. */
8025 canonic = ix86_canonical_va_list_type (type);
8026 return (canonic == ms_va_list_type_node
8027 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
8030 /* Implement va_start. */
8033 ix86_va_start (tree valist, rtx nextarg)
8035 HOST_WIDE_INT words, n_gpr, n_fpr;
8036 tree f_gpr, f_fpr, f_ovf, f_sav;
8037 tree gpr, fpr, ovf, sav, t;
8041 if (flag_split_stack
8042 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
8044 unsigned int scratch_regno;
8046 /* When we are splitting the stack, we can't refer to the stack
8047 arguments using internal_arg_pointer, because they may be on
8048 the old stack. The split stack prologue will arrange to
8049 leave a pointer to the old stack arguments in a scratch
8050 register, which we here copy to a pseudo-register. The split
8051 stack prologue can't set the pseudo-register directly because
8052 it (the prologue) runs before any registers have been saved. */
8054 scratch_regno = split_stack_prologue_scratch_regno ();
8055 if (scratch_regno != INVALID_REGNUM)
8059 reg = gen_reg_rtx (Pmode);
8060 cfun->machine->split_stack_varargs_pointer = reg;
8063 emit_move_insn (reg, gen_rtx_REG (Pmode, scratch_regno));
8067 push_topmost_sequence ();
8068 emit_insn_after (seq, entry_of_function ());
8069 pop_topmost_sequence ();
8073 /* Only 64bit target needs something special. */
8074 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
8076 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
8077 std_expand_builtin_va_start (valist, nextarg);
8082 va_r = expand_expr (valist, NULL_RTX, VOIDmode, EXPAND_WRITE);
8083 next = expand_binop (ptr_mode, add_optab,
8084 cfun->machine->split_stack_varargs_pointer,
8085 crtl->args.arg_offset_rtx,
8086 NULL_RTX, 0, OPTAB_LIB_WIDEN);
8087 convert_move (va_r, next, 0);
8092 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
8093 f_fpr = DECL_CHAIN (f_gpr);
8094 f_ovf = DECL_CHAIN (f_fpr);
8095 f_sav = DECL_CHAIN (f_ovf);
8097 valist = build_simple_mem_ref (valist);
8098 TREE_TYPE (valist) = TREE_TYPE (sysv_va_list_type_node);
8099 /* The following should be folded into the MEM_REF offset. */
8100 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), unshare_expr (valist),
8102 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
8104 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
8106 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
8109 /* Count number of gp and fp argument registers used. */
8110 words = crtl->args.info.words;
8111 n_gpr = crtl->args.info.regno;
8112 n_fpr = crtl->args.info.sse_regno;
8114 if (cfun->va_list_gpr_size)
8116 type = TREE_TYPE (gpr);
8117 t = build2 (MODIFY_EXPR, type,
8118 gpr, build_int_cst (type, n_gpr * 8));
8119 TREE_SIDE_EFFECTS (t) = 1;
8120 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8123 if (TARGET_SSE && cfun->va_list_fpr_size)
8125 type = TREE_TYPE (fpr);
8126 t = build2 (MODIFY_EXPR, type, fpr,
8127 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
8128 TREE_SIDE_EFFECTS (t) = 1;
8129 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8132 /* Find the overflow area. */
8133 type = TREE_TYPE (ovf);
8134 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
8135 ovf_rtx = crtl->args.internal_arg_pointer;
8137 ovf_rtx = cfun->machine->split_stack_varargs_pointer;
8138 t = make_tree (type, ovf_rtx);
8140 t = build2 (POINTER_PLUS_EXPR, type, t,
8141 size_int (words * UNITS_PER_WORD));
8142 t = build2 (MODIFY_EXPR, type, ovf, t);
8143 TREE_SIDE_EFFECTS (t) = 1;
8144 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8146 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
8148 /* Find the register save area.
8149 Prologue of the function save it right above stack frame. */
8150 type = TREE_TYPE (sav);
8151 t = make_tree (type, frame_pointer_rtx);
8152 if (!ix86_varargs_gpr_size)
8153 t = build2 (POINTER_PLUS_EXPR, type, t,
8154 size_int (-8 * X86_64_REGPARM_MAX));
8155 t = build2 (MODIFY_EXPR, type, sav, t);
8156 TREE_SIDE_EFFECTS (t) = 1;
8157 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8161 /* Implement va_arg. */
8164 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
8167 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
8168 tree f_gpr, f_fpr, f_ovf, f_sav;
8169 tree gpr, fpr, ovf, sav, t;
8171 tree lab_false, lab_over = NULL_TREE;
8176 enum machine_mode nat_mode;
8177 unsigned int arg_boundary;
8179 /* Only 64bit target needs something special. */
8180 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
8181 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
8183 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
8184 f_fpr = DECL_CHAIN (f_gpr);
8185 f_ovf = DECL_CHAIN (f_fpr);
8186 f_sav = DECL_CHAIN (f_ovf);
8188 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
8189 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
8190 valist = build_va_arg_indirect_ref (valist);
8191 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8192 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8193 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8195 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
8197 type = build_pointer_type (type);
8198 size = int_size_in_bytes (type);
8199 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8201 nat_mode = type_natural_mode (type, NULL);
8210 /* Unnamed 256bit vector mode parameters are passed on stack. */
8211 if (ix86_cfun_abi () == SYSV_ABI)
8218 container = construct_container (nat_mode, TYPE_MODE (type),
8219 type, 0, X86_64_REGPARM_MAX,
8220 X86_64_SSE_REGPARM_MAX, intreg,
8225 /* Pull the value out of the saved registers. */
8227 addr = create_tmp_var (ptr_type_node, "addr");
8231 int needed_intregs, needed_sseregs;
8233 tree int_addr, sse_addr;
8235 lab_false = create_artificial_label (UNKNOWN_LOCATION);
8236 lab_over = create_artificial_label (UNKNOWN_LOCATION);
8238 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
8240 need_temp = (!REG_P (container)
8241 && ((needed_intregs && TYPE_ALIGN (type) > 64)
8242 || TYPE_ALIGN (type) > 128));
8244 /* In case we are passing structure, verify that it is consecutive block
8245 on the register save area. If not we need to do moves. */
8246 if (!need_temp && !REG_P (container))
8248 /* Verify that all registers are strictly consecutive */
8249 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
8253 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
8255 rtx slot = XVECEXP (container, 0, i);
8256 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
8257 || INTVAL (XEXP (slot, 1)) != i * 16)
8265 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
8267 rtx slot = XVECEXP (container, 0, i);
8268 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
8269 || INTVAL (XEXP (slot, 1)) != i * 8)
8281 int_addr = create_tmp_var (ptr_type_node, "int_addr");
8282 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
8285 /* First ensure that we fit completely in registers. */
8288 t = build_int_cst (TREE_TYPE (gpr),
8289 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
8290 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
8291 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
8292 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
8293 gimplify_and_add (t, pre_p);
8297 t = build_int_cst (TREE_TYPE (fpr),
8298 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
8299 + X86_64_REGPARM_MAX * 8);
8300 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
8301 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
8302 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
8303 gimplify_and_add (t, pre_p);
8306 /* Compute index to start of area used for integer regs. */
8309 /* int_addr = gpr + sav; */
8310 t = fold_convert (sizetype, gpr);
8311 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
8312 gimplify_assign (int_addr, t, pre_p);
8316 /* sse_addr = fpr + sav; */
8317 t = fold_convert (sizetype, fpr);
8318 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
8319 gimplify_assign (sse_addr, t, pre_p);
8323 int i, prev_size = 0;
8324 tree temp = create_tmp_var (type, "va_arg_tmp");
8327 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
8328 gimplify_assign (addr, t, pre_p);
8330 for (i = 0; i < XVECLEN (container, 0); i++)
8332 rtx slot = XVECEXP (container, 0, i);
8333 rtx reg = XEXP (slot, 0);
8334 enum machine_mode mode = GET_MODE (reg);
8340 tree dest_addr, dest;
8341 int cur_size = GET_MODE_SIZE (mode);
8343 gcc_assert (prev_size <= INTVAL (XEXP (slot, 1)));
8344 prev_size = INTVAL (XEXP (slot, 1));
8345 if (prev_size + cur_size > size)
8347 cur_size = size - prev_size;
8348 mode = mode_for_size (cur_size * BITS_PER_UNIT, MODE_INT, 1);
8349 if (mode == BLKmode)
8352 piece_type = lang_hooks.types.type_for_mode (mode, 1);
8353 if (mode == GET_MODE (reg))
8354 addr_type = build_pointer_type (piece_type);
8356 addr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
8358 daddr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
8361 if (SSE_REGNO_P (REGNO (reg)))
8363 src_addr = sse_addr;
8364 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
8368 src_addr = int_addr;
8369 src_offset = REGNO (reg) * 8;
8371 src_addr = fold_convert (addr_type, src_addr);
8372 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
8373 size_int (src_offset));
8375 dest_addr = fold_convert (daddr_type, addr);
8376 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
8377 size_int (prev_size));
8378 if (cur_size == GET_MODE_SIZE (mode))
8380 src = build_va_arg_indirect_ref (src_addr);
8381 dest = build_va_arg_indirect_ref (dest_addr);
8383 gimplify_assign (dest, src, pre_p);
8388 = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
8389 3, dest_addr, src_addr,
8390 size_int (cur_size));
8391 gimplify_and_add (copy, pre_p);
8393 prev_size += cur_size;
8399 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
8400 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
8401 gimplify_assign (gpr, t, pre_p);
8406 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
8407 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
8408 gimplify_assign (fpr, t, pre_p);
8411 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
8413 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
8416 /* ... otherwise out of the overflow area. */
8418 /* When we align parameter on stack for caller, if the parameter
8419 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
8420 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
8421 here with caller. */
8422 arg_boundary = ix86_function_arg_boundary (VOIDmode, type);
8423 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
8424 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
8426 /* Care for on-stack alignment if needed. */
8427 if (arg_boundary <= 64 || size == 0)
8431 HOST_WIDE_INT align = arg_boundary / 8;
8432 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
8433 size_int (align - 1));
8434 t = fold_convert (sizetype, t);
8435 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
8437 t = fold_convert (TREE_TYPE (ovf), t);
8440 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
8441 gimplify_assign (addr, t, pre_p);
8443 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
8444 size_int (rsize * UNITS_PER_WORD));
8445 gimplify_assign (unshare_expr (ovf), t, pre_p);
8448 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
8450 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
8451 addr = fold_convert (ptrtype, addr);
8454 addr = build_va_arg_indirect_ref (addr);
8455 return build_va_arg_indirect_ref (addr);
8458 /* Return true if OPNUM's MEM should be matched
8459 in movabs* patterns. */
8462 ix86_check_movabs (rtx insn, int opnum)
8466 set = PATTERN (insn);
8467 if (GET_CODE (set) == PARALLEL)
8468 set = XVECEXP (set, 0, 0);
8469 gcc_assert (GET_CODE (set) == SET);
8470 mem = XEXP (set, opnum);
8471 while (GET_CODE (mem) == SUBREG)
8472 mem = SUBREG_REG (mem);
8473 gcc_assert (MEM_P (mem));
8474 return volatile_ok || !MEM_VOLATILE_P (mem);
8477 /* Initialize the table of extra 80387 mathematical constants. */
8480 init_ext_80387_constants (void)
8482 static const char * cst[5] =
8484 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
8485 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
8486 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
8487 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
8488 "3.1415926535897932385128089594061862044", /* 4: fldpi */
8492 for (i = 0; i < 5; i++)
8494 real_from_string (&ext_80387_constants_table[i], cst[i]);
8495 /* Ensure each constant is rounded to XFmode precision. */
8496 real_convert (&ext_80387_constants_table[i],
8497 XFmode, &ext_80387_constants_table[i]);
8500 ext_80387_constants_init = 1;
8503 /* Return non-zero if the constant is something that
8504 can be loaded with a special instruction. */
8507 standard_80387_constant_p (rtx x)
8509 enum machine_mode mode = GET_MODE (x);
8513 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
8516 if (x == CONST0_RTX (mode))
8518 if (x == CONST1_RTX (mode))
8521 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8523 /* For XFmode constants, try to find a special 80387 instruction when
8524 optimizing for size or on those CPUs that benefit from them. */
8526 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
8530 if (! ext_80387_constants_init)
8531 init_ext_80387_constants ();
8533 for (i = 0; i < 5; i++)
8534 if (real_identical (&r, &ext_80387_constants_table[i]))
8538 /* Load of the constant -0.0 or -1.0 will be split as
8539 fldz;fchs or fld1;fchs sequence. */
8540 if (real_isnegzero (&r))
8542 if (real_identical (&r, &dconstm1))
8548 /* Return the opcode of the special instruction to be used to load
8552 standard_80387_constant_opcode (rtx x)
8554 switch (standard_80387_constant_p (x))
8578 /* Return the CONST_DOUBLE representing the 80387 constant that is
8579 loaded by the specified special instruction. The argument IDX
8580 matches the return value from standard_80387_constant_p. */
8583 standard_80387_constant_rtx (int idx)
8587 if (! ext_80387_constants_init)
8588 init_ext_80387_constants ();
8604 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
8608 /* Return 1 if X is all 0s and 2 if x is all 1s
8609 in supported SSE vector mode. */
8612 standard_sse_constant_p (rtx x)
8614 enum machine_mode mode = GET_MODE (x);
8616 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
8618 if (vector_all_ones_operand (x, mode))
8634 /* Return the opcode of the special instruction to be used to load
8638 standard_sse_constant_opcode (rtx insn, rtx x)
8640 switch (standard_sse_constant_p (x))
8643 switch (get_attr_mode (insn))
8646 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
8648 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8649 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
8651 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
8653 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8654 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
8656 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
8658 return "vxorps\t%x0, %x0, %x0";
8660 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8661 return "vxorps\t%x0, %x0, %x0";
8663 return "vxorpd\t%x0, %x0, %x0";
8665 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8666 return "vxorps\t%x0, %x0, %x0";
8668 return "vpxor\t%x0, %x0, %x0";
8673 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
8680 /* Returns true if OP contains a symbol reference */
8683 symbolic_reference_mentioned_p (rtx op)
8688 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
8691 fmt = GET_RTX_FORMAT (GET_CODE (op));
8692 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
8698 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
8699 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
8703 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
8710 /* Return true if it is appropriate to emit `ret' instructions in the
8711 body of a function. Do this only if the epilogue is simple, needing a
8712 couple of insns. Prior to reloading, we can't tell how many registers
8713 must be saved, so return false then. Return false if there is no frame
8714 marker to de-allocate. */
8717 ix86_can_use_return_insn_p (void)
8719 struct ix86_frame frame;
8721 if (! reload_completed || frame_pointer_needed)
8724 /* Don't allow more than 32k pop, since that's all we can do
8725 with one instruction. */
8726 if (crtl->args.pops_args && crtl->args.size >= 32768)
8729 ix86_compute_frame_layout (&frame);
8730 return (frame.stack_pointer_offset == UNITS_PER_WORD
8731 && (frame.nregs + frame.nsseregs) == 0);
8734 /* Value should be nonzero if functions must have frame pointers.
8735 Zero means the frame pointer need not be set up (and parms may
8736 be accessed via the stack pointer) in functions that seem suitable. */
8739 ix86_frame_pointer_required (void)
8741 /* If we accessed previous frames, then the generated code expects
8742 to be able to access the saved ebp value in our frame. */
8743 if (cfun->machine->accesses_prev_frame)
8746 /* Several x86 os'es need a frame pointer for other reasons,
8747 usually pertaining to setjmp. */
8748 if (SUBTARGET_FRAME_POINTER_REQUIRED)
8751 /* In ix86_option_override_internal, TARGET_OMIT_LEAF_FRAME_POINTER
8752 turns off the frame pointer by default. Turn it back on now if
8753 we've not got a leaf function. */
8754 if (TARGET_OMIT_LEAF_FRAME_POINTER
8755 && (!current_function_is_leaf
8756 || ix86_current_function_calls_tls_descriptor))
8759 if (crtl->profile && !flag_fentry)
8765 /* Record that the current function accesses previous call frames. */
8768 ix86_setup_frame_addresses (void)
8770 cfun->machine->accesses_prev_frame = 1;
8773 #ifndef USE_HIDDEN_LINKONCE
8774 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
8775 # define USE_HIDDEN_LINKONCE 1
8777 # define USE_HIDDEN_LINKONCE 0
8781 static int pic_labels_used;
8783 /* Fills in the label name that should be used for a pc thunk for
8784 the given register. */
8787 get_pc_thunk_name (char name[32], unsigned int regno)
8789 gcc_assert (!TARGET_64BIT);
8791 if (USE_HIDDEN_LINKONCE)
8792 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
8794 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
8798 /* This function generates code for -fpic that loads %ebx with
8799 the return address of the caller and then returns. */
8802 ix86_code_end (void)
8807 for (regno = AX_REG; regno <= SP_REG; regno++)
8812 if (!(pic_labels_used & (1 << regno)))
8815 get_pc_thunk_name (name, regno);
8817 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
8818 get_identifier (name),
8819 build_function_type (void_type_node, void_list_node));
8820 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
8821 NULL_TREE, void_type_node);
8822 TREE_PUBLIC (decl) = 1;
8823 TREE_STATIC (decl) = 1;
8828 switch_to_section (darwin_sections[text_coal_section]);
8829 fputs ("\t.weak_definition\t", asm_out_file);
8830 assemble_name (asm_out_file, name);
8831 fputs ("\n\t.private_extern\t", asm_out_file);
8832 assemble_name (asm_out_file, name);
8833 putc ('\n', asm_out_file);
8834 ASM_OUTPUT_LABEL (asm_out_file, name);
8835 DECL_WEAK (decl) = 1;
8839 if (USE_HIDDEN_LINKONCE)
8841 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
8843 targetm.asm_out.unique_section (decl, 0);
8844 switch_to_section (get_named_section (decl, NULL, 0));
8846 targetm.asm_out.globalize_label (asm_out_file, name);
8847 fputs ("\t.hidden\t", asm_out_file);
8848 assemble_name (asm_out_file, name);
8849 putc ('\n', asm_out_file);
8850 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
8854 switch_to_section (text_section);
8855 ASM_OUTPUT_LABEL (asm_out_file, name);
8858 DECL_INITIAL (decl) = make_node (BLOCK);
8859 current_function_decl = decl;
8860 init_function_start (decl);
8861 first_function_block_is_cold = false;
8862 /* Make sure unwind info is emitted for the thunk if needed. */
8863 final_start_function (emit_barrier (), asm_out_file, 1);
8865 /* Pad stack IP move with 4 instructions (two NOPs count
8866 as one instruction). */
8867 if (TARGET_PAD_SHORT_FUNCTION)
8872 fputs ("\tnop\n", asm_out_file);
8875 xops[0] = gen_rtx_REG (Pmode, regno);
8876 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8877 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
8878 fputs ("\tret\n", asm_out_file);
8879 final_end_function ();
8880 init_insn_lengths ();
8881 free_after_compilation (cfun);
8883 current_function_decl = NULL;
8886 if (flag_split_stack)
8887 file_end_indicate_split_stack ();
8890 /* Emit code for the SET_GOT patterns. */
8893 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
8899 if (TARGET_VXWORKS_RTP && flag_pic)
8901 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
8902 xops[2] = gen_rtx_MEM (Pmode,
8903 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
8904 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
8906 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
8907 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
8908 an unadorned address. */
8909 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
8910 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
8911 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
8915 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
8917 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
8919 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
8922 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
8925 output_asm_insn ("call\t%a2", xops);
8926 #ifdef DWARF2_UNWIND_INFO
8927 /* The call to next label acts as a push. */
8928 if (dwarf2out_do_frame ())
8932 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8933 gen_rtx_PLUS (Pmode,
8936 RTX_FRAME_RELATED_P (insn) = 1;
8937 dwarf2out_frame_debug (insn, true);
8944 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
8945 is what will be referenced by the Mach-O PIC subsystem. */
8947 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
8950 targetm.asm_out.internal_label (asm_out_file, "L",
8951 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
8955 output_asm_insn ("pop%z0\t%0", xops);
8956 #ifdef DWARF2_UNWIND_INFO
8957 /* The pop is a pop and clobbers dest, but doesn't restore it
8958 for unwind info purposes. */
8959 if (dwarf2out_do_frame ())
8963 insn = emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
8964 dwarf2out_frame_debug (insn, true);
8965 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8966 gen_rtx_PLUS (Pmode,
8969 RTX_FRAME_RELATED_P (insn) = 1;
8970 dwarf2out_frame_debug (insn, true);
8979 get_pc_thunk_name (name, REGNO (dest));
8980 pic_labels_used |= 1 << REGNO (dest);
8982 #ifdef DWARF2_UNWIND_INFO
8983 /* Ensure all queued register saves are flushed before the
8985 if (dwarf2out_do_frame ())
8986 dwarf2out_flush_queued_reg_saves ();
8988 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
8989 xops[2] = gen_rtx_MEM (QImode, xops[2]);
8990 output_asm_insn ("call\t%X2", xops);
8991 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
8992 is what will be referenced by the Mach-O PIC subsystem. */
8995 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
8997 targetm.asm_out.internal_label (asm_out_file, "L",
8998 CODE_LABEL_NUMBER (label));
9005 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
9006 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
9008 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
9013 /* Generate an "push" pattern for input ARG. */
9018 struct machine_function *m = cfun->machine;
9020 if (m->fs.cfa_reg == stack_pointer_rtx)
9021 m->fs.cfa_offset += UNITS_PER_WORD;
9022 m->fs.sp_offset += UNITS_PER_WORD;
9024 return gen_rtx_SET (VOIDmode,
9026 gen_rtx_PRE_DEC (Pmode,
9027 stack_pointer_rtx)),
9031 /* Generate an "pop" pattern for input ARG. */
9036 return gen_rtx_SET (VOIDmode,
9039 gen_rtx_POST_INC (Pmode,
9040 stack_pointer_rtx)));
9043 /* Return >= 0 if there is an unused call-clobbered register available
9044 for the entire function. */
9047 ix86_select_alt_pic_regnum (void)
9049 if (current_function_is_leaf
9051 && !ix86_current_function_calls_tls_descriptor)
9054 /* Can't use the same register for both PIC and DRAP. */
9056 drap = REGNO (crtl->drap_reg);
9059 for (i = 2; i >= 0; --i)
9060 if (i != drap && !df_regs_ever_live_p (i))
9064 return INVALID_REGNUM;
9067 /* Return 1 if we need to save REGNO. */
9069 ix86_save_reg (unsigned int regno, int maybe_eh_return)
9071 if (pic_offset_table_rtx
9072 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
9073 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
9075 || crtl->calls_eh_return
9076 || crtl->uses_const_pool))
9078 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
9083 if (crtl->calls_eh_return && maybe_eh_return)
9088 unsigned test = EH_RETURN_DATA_REGNO (i);
9089 if (test == INVALID_REGNUM)
9096 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
9099 return (df_regs_ever_live_p (regno)
9100 && !call_used_regs[regno]
9101 && !fixed_regs[regno]
9102 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
9105 /* Return number of saved general prupose registers. */
9108 ix86_nsaved_regs (void)
9113 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9114 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9119 /* Return number of saved SSE registrers. */
9122 ix86_nsaved_sseregs (void)
9127 if (ix86_cfun_abi () != MS_ABI)
9129 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9130 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9135 /* Given FROM and TO register numbers, say whether this elimination is
9136 allowed. If stack alignment is needed, we can only replace argument
9137 pointer with hard frame pointer, or replace frame pointer with stack
9138 pointer. Otherwise, frame pointer elimination is automatically
9139 handled and all other eliminations are valid. */
9142 ix86_can_eliminate (const int from, const int to)
9144 if (stack_realign_fp)
9145 return ((from == ARG_POINTER_REGNUM
9146 && to == HARD_FRAME_POINTER_REGNUM)
9147 || (from == FRAME_POINTER_REGNUM
9148 && to == STACK_POINTER_REGNUM));
9150 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
9153 /* Return the offset between two registers, one to be eliminated, and the other
9154 its replacement, at the start of a routine. */
9157 ix86_initial_elimination_offset (int from, int to)
9159 struct ix86_frame frame;
9160 ix86_compute_frame_layout (&frame);
9162 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9163 return frame.hard_frame_pointer_offset;
9164 else if (from == FRAME_POINTER_REGNUM
9165 && to == HARD_FRAME_POINTER_REGNUM)
9166 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
9169 gcc_assert (to == STACK_POINTER_REGNUM);
9171 if (from == ARG_POINTER_REGNUM)
9172 return frame.stack_pointer_offset;
9174 gcc_assert (from == FRAME_POINTER_REGNUM);
9175 return frame.stack_pointer_offset - frame.frame_pointer_offset;
9179 /* In a dynamically-aligned function, we can't know the offset from
9180 stack pointer to frame pointer, so we must ensure that setjmp
9181 eliminates fp against the hard fp (%ebp) rather than trying to
9182 index from %esp up to the top of the frame across a gap that is
9183 of unknown (at compile-time) size. */
9185 ix86_builtin_setjmp_frame_value (void)
9187 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
9190 /* On the x86 -fsplit-stack and -fstack-protector both use the same
9191 field in the TCB, so they can not be used together. */
9194 ix86_supports_split_stack (bool report ATTRIBUTE_UNUSED,
9195 struct gcc_options *opts ATTRIBUTE_UNUSED)
9199 #ifndef TARGET_THREAD_SPLIT_STACK_OFFSET
9201 error ("%<-fsplit-stack%> currently only supported on GNU/Linux");
9204 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
9207 error ("%<-fsplit-stack%> requires "
9208 "assembler support for CFI directives");
9216 /* When using -fsplit-stack, the allocation routines set a field in
9217 the TCB to the bottom of the stack plus this much space, measured
9220 #define SPLIT_STACK_AVAILABLE 256
9222 /* Fill structure ix86_frame about frame of currently computed function. */
9225 ix86_compute_frame_layout (struct ix86_frame *frame)
9227 unsigned int stack_alignment_needed;
9228 HOST_WIDE_INT offset;
9229 unsigned int preferred_alignment;
9230 HOST_WIDE_INT size = get_frame_size ();
9231 HOST_WIDE_INT to_allocate;
9233 frame->nregs = ix86_nsaved_regs ();
9234 frame->nsseregs = ix86_nsaved_sseregs ();
9236 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
9237 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
9239 /* MS ABI seem to require stack alignment to be always 16 except for function
9240 prologues and leaf. */
9241 if ((ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
9242 && (!current_function_is_leaf || cfun->calls_alloca != 0
9243 || ix86_current_function_calls_tls_descriptor))
9245 preferred_alignment = 16;
9246 stack_alignment_needed = 16;
9247 crtl->preferred_stack_boundary = 128;
9248 crtl->stack_alignment_needed = 128;
9251 gcc_assert (!size || stack_alignment_needed);
9252 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
9253 gcc_assert (preferred_alignment <= stack_alignment_needed);
9255 /* For SEH we have to limit the amount of code movement into the prologue.
9256 At present we do this via a BLOCKAGE, at which point there's very little
9257 scheduling that can be done, which means that there's very little point
9258 in doing anything except PUSHs. */
9260 cfun->machine->use_fast_prologue_epilogue = false;
9262 /* During reload iteration the amount of registers saved can change.
9263 Recompute the value as needed. Do not recompute when amount of registers
9264 didn't change as reload does multiple calls to the function and does not
9265 expect the decision to change within single iteration. */
9266 else if (!optimize_function_for_size_p (cfun)
9267 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
9269 int count = frame->nregs;
9270 struct cgraph_node *node = cgraph_node (current_function_decl);
9272 cfun->machine->use_fast_prologue_epilogue_nregs = count;
9274 /* The fast prologue uses move instead of push to save registers. This
9275 is significantly longer, but also executes faster as modern hardware
9276 can execute the moves in parallel, but can't do that for push/pop.
9278 Be careful about choosing what prologue to emit: When function takes
9279 many instructions to execute we may use slow version as well as in
9280 case function is known to be outside hot spot (this is known with
9281 feedback only). Weight the size of function by number of registers
9282 to save as it is cheap to use one or two push instructions but very
9283 slow to use many of them. */
9285 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
9286 if (node->frequency < NODE_FREQUENCY_NORMAL
9287 || (flag_branch_probabilities
9288 && node->frequency < NODE_FREQUENCY_HOT))
9289 cfun->machine->use_fast_prologue_epilogue = false;
9291 cfun->machine->use_fast_prologue_epilogue
9292 = !expensive_function_p (count);
9294 if (TARGET_PROLOGUE_USING_MOVE
9295 && cfun->machine->use_fast_prologue_epilogue)
9296 frame->save_regs_using_mov = true;
9298 frame->save_regs_using_mov = false;
9300 /* If static stack checking is enabled and done with probes, the registers
9301 need to be saved before allocating the frame. */
9302 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
9303 frame->save_regs_using_mov = false;
9305 /* Skip return address. */
9306 offset = UNITS_PER_WORD;
9308 /* Skip pushed static chain. */
9309 if (ix86_static_chain_on_stack)
9310 offset += UNITS_PER_WORD;
9312 /* Skip saved base pointer. */
9313 if (frame_pointer_needed)
9314 offset += UNITS_PER_WORD;
9315 frame->hfp_save_offset = offset;
9317 /* The traditional frame pointer location is at the top of the frame. */
9318 frame->hard_frame_pointer_offset = offset;
9320 /* Register save area */
9321 offset += frame->nregs * UNITS_PER_WORD;
9322 frame->reg_save_offset = offset;
9324 /* Align and set SSE register save area. */
9325 if (frame->nsseregs)
9327 /* The only ABI that has saved SSE registers (Win64) also has a
9328 16-byte aligned default stack, and thus we don't need to be
9329 within the re-aligned local stack frame to save them. */
9330 gcc_assert (INCOMING_STACK_BOUNDARY >= 128);
9331 offset = (offset + 16 - 1) & -16;
9332 offset += frame->nsseregs * 16;
9334 frame->sse_reg_save_offset = offset;
9336 /* The re-aligned stack starts here. Values before this point are not
9337 directly comparable with values below this point. In order to make
9338 sure that no value happens to be the same before and after, force
9339 the alignment computation below to add a non-zero value. */
9340 if (stack_realign_fp)
9341 offset = (offset + stack_alignment_needed) & -stack_alignment_needed;
9344 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
9345 offset += frame->va_arg_size;
9347 /* Align start of frame for local function. */
9348 if (stack_realign_fp
9349 || offset != frame->sse_reg_save_offset
9351 || !current_function_is_leaf
9352 || cfun->calls_alloca
9353 || ix86_current_function_calls_tls_descriptor)
9354 offset = (offset + stack_alignment_needed - 1) & -stack_alignment_needed;
9356 /* Frame pointer points here. */
9357 frame->frame_pointer_offset = offset;
9361 /* Add outgoing arguments area. Can be skipped if we eliminated
9362 all the function calls as dead code.
9363 Skipping is however impossible when function calls alloca. Alloca
9364 expander assumes that last crtl->outgoing_args_size
9365 of stack frame are unused. */
9366 if (ACCUMULATE_OUTGOING_ARGS
9367 && (!current_function_is_leaf || cfun->calls_alloca
9368 || ix86_current_function_calls_tls_descriptor))
9370 offset += crtl->outgoing_args_size;
9371 frame->outgoing_arguments_size = crtl->outgoing_args_size;
9374 frame->outgoing_arguments_size = 0;
9376 /* Align stack boundary. Only needed if we're calling another function
9378 if (!current_function_is_leaf || cfun->calls_alloca
9379 || ix86_current_function_calls_tls_descriptor)
9380 offset = (offset + preferred_alignment - 1) & -preferred_alignment;
9382 /* We've reached end of stack frame. */
9383 frame->stack_pointer_offset = offset;
9385 /* Size prologue needs to allocate. */
9386 to_allocate = offset - frame->sse_reg_save_offset;
9388 if ((!to_allocate && frame->nregs <= 1)
9389 || (TARGET_64BIT && to_allocate >= (HOST_WIDE_INT) 0x80000000))
9390 frame->save_regs_using_mov = false;
9392 if (ix86_using_red_zone ()
9393 && current_function_sp_is_unchanging
9394 && current_function_is_leaf
9395 && !ix86_current_function_calls_tls_descriptor)
9397 frame->red_zone_size = to_allocate;
9398 if (frame->save_regs_using_mov)
9399 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
9400 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
9401 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
9404 frame->red_zone_size = 0;
9405 frame->stack_pointer_offset -= frame->red_zone_size;
9407 /* The SEH frame pointer location is near the bottom of the frame.
9408 This is enforced by the fact that the difference between the
9409 stack pointer and the frame pointer is limited to 240 bytes in
9410 the unwind data structure. */
9415 /* If we can leave the frame pointer where it is, do so. */
9416 diff = frame->stack_pointer_offset - frame->hard_frame_pointer_offset;
9417 if (diff > 240 || (diff & 15) != 0)
9419 /* Ideally we'd determine what portion of the local stack frame
9420 (within the constraint of the lowest 240) is most heavily used.
9421 But without that complication, simply bias the frame pointer
9422 by 128 bytes so as to maximize the amount of the local stack
9423 frame that is addressable with 8-bit offsets. */
9424 frame->hard_frame_pointer_offset = frame->stack_pointer_offset - 128;
9429 /* This is semi-inlined memory_address_length, but simplified
9430 since we know that we're always dealing with reg+offset, and
9431 to avoid having to create and discard all that rtl. */
9434 choose_baseaddr_len (unsigned int regno, HOST_WIDE_INT offset)
9440 /* EBP and R13 cannot be encoded without an offset. */
9441 len = (regno == BP_REG || regno == R13_REG);
9443 else if (IN_RANGE (offset, -128, 127))
9446 /* ESP and R12 must be encoded with a SIB byte. */
9447 if (regno == SP_REG || regno == R12_REG)
9453 /* Return an RTX that points to CFA_OFFSET within the stack frame.
9454 The valid base registers are taken from CFUN->MACHINE->FS. */
9457 choose_baseaddr (HOST_WIDE_INT cfa_offset)
9459 const struct machine_function *m = cfun->machine;
9460 rtx base_reg = NULL;
9461 HOST_WIDE_INT base_offset = 0;
9463 if (m->use_fast_prologue_epilogue)
9465 /* Choose the base register most likely to allow the most scheduling
9466 opportunities. Generally FP is valid througout the function,
9467 while DRAP must be reloaded within the epilogue. But choose either
9468 over the SP due to increased encoding size. */
9472 base_reg = hard_frame_pointer_rtx;
9473 base_offset = m->fs.fp_offset - cfa_offset;
9475 else if (m->fs.drap_valid)
9477 base_reg = crtl->drap_reg;
9478 base_offset = 0 - cfa_offset;
9480 else if (m->fs.sp_valid)
9482 base_reg = stack_pointer_rtx;
9483 base_offset = m->fs.sp_offset - cfa_offset;
9488 HOST_WIDE_INT toffset;
9491 /* Choose the base register with the smallest address encoding.
9492 With a tie, choose FP > DRAP > SP. */
9495 base_reg = stack_pointer_rtx;
9496 base_offset = m->fs.sp_offset - cfa_offset;
9497 len = choose_baseaddr_len (STACK_POINTER_REGNUM, base_offset);
9499 if (m->fs.drap_valid)
9501 toffset = 0 - cfa_offset;
9502 tlen = choose_baseaddr_len (REGNO (crtl->drap_reg), toffset);
9505 base_reg = crtl->drap_reg;
9506 base_offset = toffset;
9512 toffset = m->fs.fp_offset - cfa_offset;
9513 tlen = choose_baseaddr_len (HARD_FRAME_POINTER_REGNUM, toffset);
9516 base_reg = hard_frame_pointer_rtx;
9517 base_offset = toffset;
9522 gcc_assert (base_reg != NULL);
9524 return plus_constant (base_reg, base_offset);
9527 /* Emit code to save registers in the prologue. */
9530 ix86_emit_save_regs (void)
9535 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
9536 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9538 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
9539 RTX_FRAME_RELATED_P (insn) = 1;
9543 /* Emit a single register save at CFA - CFA_OFFSET. */
9546 ix86_emit_save_reg_using_mov (enum machine_mode mode, unsigned int regno,
9547 HOST_WIDE_INT cfa_offset)
9549 struct machine_function *m = cfun->machine;
9550 rtx reg = gen_rtx_REG (mode, regno);
9551 rtx mem, addr, base, insn;
9553 addr = choose_baseaddr (cfa_offset);
9554 mem = gen_frame_mem (mode, addr);
9556 /* For SSE saves, we need to indicate the 128-bit alignment. */
9557 set_mem_align (mem, GET_MODE_ALIGNMENT (mode));
9559 insn = emit_move_insn (mem, reg);
9560 RTX_FRAME_RELATED_P (insn) = 1;
9563 if (GET_CODE (base) == PLUS)
9564 base = XEXP (base, 0);
9565 gcc_checking_assert (REG_P (base));
9567 /* When saving registers into a re-aligned local stack frame, avoid
9568 any tricky guessing by dwarf2out. */
9569 if (m->fs.realigned)
9571 gcc_checking_assert (stack_realign_drap);
9573 if (regno == REGNO (crtl->drap_reg))
9575 /* A bit of a hack. We force the DRAP register to be saved in
9576 the re-aligned stack frame, which provides us with a copy
9577 of the CFA that will last past the prologue. Install it. */
9578 gcc_checking_assert (cfun->machine->fs.fp_valid);
9579 addr = plus_constant (hard_frame_pointer_rtx,
9580 cfun->machine->fs.fp_offset - cfa_offset);
9581 mem = gen_rtx_MEM (mode, addr);
9582 add_reg_note (insn, REG_CFA_DEF_CFA, mem);
9586 /* The frame pointer is a stable reference within the
9587 aligned frame. Use it. */
9588 gcc_checking_assert (cfun->machine->fs.fp_valid);
9589 addr = plus_constant (hard_frame_pointer_rtx,
9590 cfun->machine->fs.fp_offset - cfa_offset);
9591 mem = gen_rtx_MEM (mode, addr);
9592 add_reg_note (insn, REG_CFA_EXPRESSION,
9593 gen_rtx_SET (VOIDmode, mem, reg));
9597 /* The memory may not be relative to the current CFA register,
9598 which means that we may need to generate a new pattern for
9599 use by the unwind info. */
9600 else if (base != m->fs.cfa_reg)
9602 addr = plus_constant (m->fs.cfa_reg, m->fs.cfa_offset - cfa_offset);
9603 mem = gen_rtx_MEM (mode, addr);
9604 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (VOIDmode, mem, reg));
9608 /* Emit code to save registers using MOV insns.
9609 First register is stored at CFA - CFA_OFFSET. */
9611 ix86_emit_save_regs_using_mov (HOST_WIDE_INT cfa_offset)
9615 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9616 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9618 ix86_emit_save_reg_using_mov (Pmode, regno, cfa_offset);
9619 cfa_offset -= UNITS_PER_WORD;
9623 /* Emit code to save SSE registers using MOV insns.
9624 First register is stored at CFA - CFA_OFFSET. */
9626 ix86_emit_save_sse_regs_using_mov (HOST_WIDE_INT cfa_offset)
9630 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9631 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9633 ix86_emit_save_reg_using_mov (V4SFmode, regno, cfa_offset);
9638 static GTY(()) rtx queued_cfa_restores;
9640 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
9641 manipulation insn. The value is on the stack at CFA - CFA_OFFSET.
9642 Don't add the note if the previously saved value will be left untouched
9643 within stack red-zone till return, as unwinders can find the same value
9644 in the register and on the stack. */
9647 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT cfa_offset)
9649 if (cfa_offset <= cfun->machine->fs.red_zone_offset)
9654 add_reg_note (insn, REG_CFA_RESTORE, reg);
9655 RTX_FRAME_RELATED_P (insn) = 1;
9659 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
9662 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
9665 ix86_add_queued_cfa_restore_notes (rtx insn)
9668 if (!queued_cfa_restores)
9670 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
9672 XEXP (last, 1) = REG_NOTES (insn);
9673 REG_NOTES (insn) = queued_cfa_restores;
9674 queued_cfa_restores = NULL_RTX;
9675 RTX_FRAME_RELATED_P (insn) = 1;
9678 /* Expand prologue or epilogue stack adjustment.
9679 The pattern exist to put a dependency on all ebp-based memory accesses.
9680 STYLE should be negative if instructions should be marked as frame related,
9681 zero if %r11 register is live and cannot be freely used and positive
9685 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
9686 int style, bool set_cfa)
9688 struct machine_function *m = cfun->machine;
9690 bool add_frame_related_expr = false;
9693 insn = gen_pro_epilogue_adjust_stack_si_add (dest, src, offset);
9694 else if (x86_64_immediate_operand (offset, DImode))
9695 insn = gen_pro_epilogue_adjust_stack_di_add (dest, src, offset);
9699 /* r11 is used by indirect sibcall return as well, set before the
9700 epilogue and used after the epilogue. */
9702 tmp = gen_rtx_REG (DImode, R11_REG);
9705 gcc_assert (src != hard_frame_pointer_rtx
9706 && dest != hard_frame_pointer_rtx);
9707 tmp = hard_frame_pointer_rtx;
9709 insn = emit_insn (gen_rtx_SET (DImode, tmp, offset));
9711 add_frame_related_expr = true;
9713 insn = gen_pro_epilogue_adjust_stack_di_add (dest, src, tmp);
9716 insn = emit_insn (insn);
9718 ix86_add_queued_cfa_restore_notes (insn);
9724 gcc_assert (m->fs.cfa_reg == src);
9725 m->fs.cfa_offset += INTVAL (offset);
9726 m->fs.cfa_reg = dest;
9728 r = gen_rtx_PLUS (Pmode, src, offset);
9729 r = gen_rtx_SET (VOIDmode, dest, r);
9730 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9731 RTX_FRAME_RELATED_P (insn) = 1;
9735 RTX_FRAME_RELATED_P (insn) = 1;
9736 if (add_frame_related_expr)
9738 rtx r = gen_rtx_PLUS (Pmode, src, offset);
9739 r = gen_rtx_SET (VOIDmode, dest, r);
9740 add_reg_note (insn, REG_FRAME_RELATED_EXPR, r);
9744 if (dest == stack_pointer_rtx)
9746 HOST_WIDE_INT ooffset = m->fs.sp_offset;
9747 bool valid = m->fs.sp_valid;
9749 if (src == hard_frame_pointer_rtx)
9751 valid = m->fs.fp_valid;
9752 ooffset = m->fs.fp_offset;
9754 else if (src == crtl->drap_reg)
9756 valid = m->fs.drap_valid;
9761 /* Else there are two possibilities: SP itself, which we set
9762 up as the default above. Or EH_RETURN_STACKADJ_RTX, which is
9763 taken care of this by hand along the eh_return path. */
9764 gcc_checking_assert (src == stack_pointer_rtx
9765 || offset == const0_rtx);
9768 m->fs.sp_offset = ooffset - INTVAL (offset);
9769 m->fs.sp_valid = valid;
9773 /* Find an available register to be used as dynamic realign argument
9774 pointer regsiter. Such a register will be written in prologue and
9775 used in begin of body, so it must not be
9776 1. parameter passing register.
9778 We reuse static-chain register if it is available. Otherwise, we
9779 use DI for i386 and R13 for x86-64. We chose R13 since it has
9782 Return: the regno of chosen register. */
9785 find_drap_reg (void)
9787 tree decl = cfun->decl;
9791 /* Use R13 for nested function or function need static chain.
9792 Since function with tail call may use any caller-saved
9793 registers in epilogue, DRAP must not use caller-saved
9794 register in such case. */
9795 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
9802 /* Use DI for nested function or function need static chain.
9803 Since function with tail call may use any caller-saved
9804 registers in epilogue, DRAP must not use caller-saved
9805 register in such case. */
9806 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
9809 /* Reuse static chain register if it isn't used for parameter
9811 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
9812 && !lookup_attribute ("fastcall",
9813 TYPE_ATTRIBUTES (TREE_TYPE (decl)))
9814 && !lookup_attribute ("thiscall",
9815 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
9822 /* Return minimum incoming stack alignment. */
9825 ix86_minimum_incoming_stack_boundary (bool sibcall)
9827 unsigned int incoming_stack_boundary;
9829 /* Prefer the one specified at command line. */
9830 if (ix86_user_incoming_stack_boundary)
9831 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
9832 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
9833 if -mstackrealign is used, it isn't used for sibcall check and
9834 estimated stack alignment is 128bit. */
9837 && ix86_force_align_arg_pointer
9838 && crtl->stack_alignment_estimated == 128)
9839 incoming_stack_boundary = MIN_STACK_BOUNDARY;
9841 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
9843 /* Incoming stack alignment can be changed on individual functions
9844 via force_align_arg_pointer attribute. We use the smallest
9845 incoming stack boundary. */
9846 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
9847 && lookup_attribute (ix86_force_align_arg_pointer_string,
9848 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
9849 incoming_stack_boundary = MIN_STACK_BOUNDARY;
9851 /* The incoming stack frame has to be aligned at least at
9852 parm_stack_boundary. */
9853 if (incoming_stack_boundary < crtl->parm_stack_boundary)
9854 incoming_stack_boundary = crtl->parm_stack_boundary;
9856 /* Stack at entrance of main is aligned by runtime. We use the
9857 smallest incoming stack boundary. */
9858 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
9859 && DECL_NAME (current_function_decl)
9860 && MAIN_NAME_P (DECL_NAME (current_function_decl))
9861 && DECL_FILE_SCOPE_P (current_function_decl))
9862 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
9864 return incoming_stack_boundary;
9867 /* Update incoming stack boundary and estimated stack alignment. */
9870 ix86_update_stack_boundary (void)
9872 ix86_incoming_stack_boundary
9873 = ix86_minimum_incoming_stack_boundary (false);
9875 /* x86_64 vararg needs 16byte stack alignment for register save
9879 && crtl->stack_alignment_estimated < 128)
9880 crtl->stack_alignment_estimated = 128;
9883 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
9884 needed or an rtx for DRAP otherwise. */
9887 ix86_get_drap_rtx (void)
9889 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
9890 crtl->need_drap = true;
9892 if (stack_realign_drap)
9894 /* Assign DRAP to vDRAP and returns vDRAP */
9895 unsigned int regno = find_drap_reg ();
9900 arg_ptr = gen_rtx_REG (Pmode, regno);
9901 crtl->drap_reg = arg_ptr;
9904 drap_vreg = copy_to_reg (arg_ptr);
9908 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
9911 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
9912 RTX_FRAME_RELATED_P (insn) = 1;
9920 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
9923 ix86_internal_arg_pointer (void)
9925 return virtual_incoming_args_rtx;
9928 struct scratch_reg {
9933 /* Return a short-lived scratch register for use on function entry.
9934 In 32-bit mode, it is valid only after the registers are saved
9935 in the prologue. This register must be released by means of
9936 release_scratch_register_on_entry once it is dead. */
9939 get_scratch_register_on_entry (struct scratch_reg *sr)
9947 /* We always use R11 in 64-bit mode. */
9952 tree decl = current_function_decl, fntype = TREE_TYPE (decl);
9954 = lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)) != NULL_TREE;
9955 bool static_chain_p = DECL_STATIC_CHAIN (decl);
9956 int regparm = ix86_function_regparm (fntype, decl);
9958 = crtl->drap_reg ? REGNO (crtl->drap_reg) : INVALID_REGNUM;
9960 /* 'fastcall' sets regparm to 2, uses ecx/edx for arguments and eax
9961 for the static chain register. */
9962 if ((regparm < 1 || (fastcall_p && !static_chain_p))
9963 && drap_regno != AX_REG)
9965 else if (regparm < 2 && drap_regno != DX_REG)
9967 /* ecx is the static chain register. */
9968 else if (regparm < 3 && !fastcall_p && !static_chain_p
9969 && drap_regno != CX_REG)
9971 else if (ix86_save_reg (BX_REG, true))
9973 /* esi is the static chain register. */
9974 else if (!(regparm == 3 && static_chain_p)
9975 && ix86_save_reg (SI_REG, true))
9977 else if (ix86_save_reg (DI_REG, true))
9981 regno = (drap_regno == AX_REG ? DX_REG : AX_REG);
9986 sr->reg = gen_rtx_REG (Pmode, regno);
9989 rtx insn = emit_insn (gen_push (sr->reg));
9990 RTX_FRAME_RELATED_P (insn) = 1;
9994 /* Release a scratch register obtained from the preceding function. */
9997 release_scratch_register_on_entry (struct scratch_reg *sr)
10001 rtx x, insn = emit_insn (gen_pop (sr->reg));
10003 /* The RTX_FRAME_RELATED_P mechanism doesn't know about pop. */
10004 RTX_FRAME_RELATED_P (insn) = 1;
10005 x = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (UNITS_PER_WORD));
10006 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
10007 add_reg_note (insn, REG_FRAME_RELATED_EXPR, x);
10011 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
10013 /* Emit code to adjust the stack pointer by SIZE bytes while probing it. */
10016 ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
10018 /* We skip the probe for the first interval + a small dope of 4 words and
10019 probe that many bytes past the specified size to maintain a protection
10020 area at the botton of the stack. */
10021 const int dope = 4 * UNITS_PER_WORD;
10022 rtx size_rtx = GEN_INT (size), last;
10024 /* See if we have a constant small number of probes to generate. If so,
10025 that's the easy case. The run-time loop is made up of 11 insns in the
10026 generic case while the compile-time loop is made up of 3+2*(n-1) insns
10027 for n # of intervals. */
10028 if (size <= 5 * PROBE_INTERVAL)
10030 HOST_WIDE_INT i, adjust;
10031 bool first_probe = true;
10033 /* Adjust SP and probe at PROBE_INTERVAL + N * PROBE_INTERVAL for
10034 values of N from 1 until it exceeds SIZE. If only one probe is
10035 needed, this will not generate any code. Then adjust and probe
10036 to PROBE_INTERVAL + SIZE. */
10037 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
10041 adjust = 2 * PROBE_INTERVAL + dope;
10042 first_probe = false;
10045 adjust = PROBE_INTERVAL;
10047 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10048 plus_constant (stack_pointer_rtx, -adjust)));
10049 emit_stack_probe (stack_pointer_rtx);
10053 adjust = size + PROBE_INTERVAL + dope;
10055 adjust = size + PROBE_INTERVAL - i;
10057 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10058 plus_constant (stack_pointer_rtx, -adjust)));
10059 emit_stack_probe (stack_pointer_rtx);
10061 /* Adjust back to account for the additional first interval. */
10062 last = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10063 plus_constant (stack_pointer_rtx,
10064 PROBE_INTERVAL + dope)));
10067 /* Otherwise, do the same as above, but in a loop. Note that we must be
10068 extra careful with variables wrapping around because we might be at
10069 the very top (or the very bottom) of the address space and we have
10070 to be able to handle this case properly; in particular, we use an
10071 equality test for the loop condition. */
10074 HOST_WIDE_INT rounded_size;
10075 struct scratch_reg sr;
10077 get_scratch_register_on_entry (&sr);
10080 /* Step 1: round SIZE to the previous multiple of the interval. */
10082 rounded_size = size & -PROBE_INTERVAL;
10085 /* Step 2: compute initial and final value of the loop counter. */
10087 /* SP = SP_0 + PROBE_INTERVAL. */
10088 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10089 plus_constant (stack_pointer_rtx,
10090 - (PROBE_INTERVAL + dope))));
10092 /* LAST_ADDR = SP_0 + PROBE_INTERVAL + ROUNDED_SIZE. */
10093 emit_move_insn (sr.reg, GEN_INT (-rounded_size));
10094 emit_insn (gen_rtx_SET (VOIDmode, sr.reg,
10095 gen_rtx_PLUS (Pmode, sr.reg,
10096 stack_pointer_rtx)));
10099 /* Step 3: the loop
10101 while (SP != LAST_ADDR)
10103 SP = SP + PROBE_INTERVAL
10107 adjusts SP and probes to PROBE_INTERVAL + N * PROBE_INTERVAL for
10108 values of N from 1 until it is equal to ROUNDED_SIZE. */
10110 emit_insn (ix86_gen_adjust_stack_and_probe (sr.reg, sr.reg, size_rtx));
10113 /* Step 4: adjust SP and probe at PROBE_INTERVAL + SIZE if we cannot
10114 assert at compile-time that SIZE is equal to ROUNDED_SIZE. */
10116 if (size != rounded_size)
10118 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10119 plus_constant (stack_pointer_rtx,
10120 rounded_size - size)));
10121 emit_stack_probe (stack_pointer_rtx);
10124 /* Adjust back to account for the additional first interval. */
10125 last = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10126 plus_constant (stack_pointer_rtx,
10127 PROBE_INTERVAL + dope)));
10129 release_scratch_register_on_entry (&sr);
10132 gcc_assert (cfun->machine->fs.cfa_reg != stack_pointer_rtx);
10134 /* Even if the stack pointer isn't the CFA register, we need to correctly
10135 describe the adjustments made to it, in particular differentiate the
10136 frame-related ones from the frame-unrelated ones. */
10139 rtx expr = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (2));
10140 XVECEXP (expr, 0, 0)
10141 = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10142 plus_constant (stack_pointer_rtx, -size));
10143 XVECEXP (expr, 0, 1)
10144 = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10145 plus_constant (stack_pointer_rtx,
10146 PROBE_INTERVAL + dope + size));
10147 add_reg_note (last, REG_FRAME_RELATED_EXPR, expr);
10148 RTX_FRAME_RELATED_P (last) = 1;
10150 cfun->machine->fs.sp_offset += size;
10153 /* Make sure nothing is scheduled before we are done. */
10154 emit_insn (gen_blockage ());
10157 /* Adjust the stack pointer up to REG while probing it. */
10160 output_adjust_stack_and_probe (rtx reg)
10162 static int labelno = 0;
10163 char loop_lab[32], end_lab[32];
10166 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
10167 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
10169 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
10171 /* Jump to END_LAB if SP == LAST_ADDR. */
10172 xops[0] = stack_pointer_rtx;
10174 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops);
10175 fputs ("\tje\t", asm_out_file);
10176 assemble_name_raw (asm_out_file, end_lab);
10177 fputc ('\n', asm_out_file);
10179 /* SP = SP + PROBE_INTERVAL. */
10180 xops[1] = GEN_INT (PROBE_INTERVAL);
10181 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
10184 xops[1] = const0_rtx;
10185 output_asm_insn ("or%z0\t{%1, (%0)|DWORD PTR [%0], %1}", xops);
10187 fprintf (asm_out_file, "\tjmp\t");
10188 assemble_name_raw (asm_out_file, loop_lab);
10189 fputc ('\n', asm_out_file);
10191 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
10196 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
10197 inclusive. These are offsets from the current stack pointer. */
10200 ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
10202 /* See if we have a constant small number of probes to generate. If so,
10203 that's the easy case. The run-time loop is made up of 7 insns in the
10204 generic case while the compile-time loop is made up of n insns for n #
10206 if (size <= 7 * PROBE_INTERVAL)
10210 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
10211 it exceeds SIZE. If only one probe is needed, this will not
10212 generate any code. Then probe at FIRST + SIZE. */
10213 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
10214 emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + i)));
10216 emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + size)));
10219 /* Otherwise, do the same as above, but in a loop. Note that we must be
10220 extra careful with variables wrapping around because we might be at
10221 the very top (or the very bottom) of the address space and we have
10222 to be able to handle this case properly; in particular, we use an
10223 equality test for the loop condition. */
10226 HOST_WIDE_INT rounded_size, last;
10227 struct scratch_reg sr;
10229 get_scratch_register_on_entry (&sr);
10232 /* Step 1: round SIZE to the previous multiple of the interval. */
10234 rounded_size = size & -PROBE_INTERVAL;
10237 /* Step 2: compute initial and final value of the loop counter. */
10239 /* TEST_OFFSET = FIRST. */
10240 emit_move_insn (sr.reg, GEN_INT (-first));
10242 /* LAST_OFFSET = FIRST + ROUNDED_SIZE. */
10243 last = first + rounded_size;
10246 /* Step 3: the loop
10248 while (TEST_ADDR != LAST_ADDR)
10250 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
10254 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
10255 until it is equal to ROUNDED_SIZE. */
10257 emit_insn (ix86_gen_probe_stack_range (sr.reg, sr.reg, GEN_INT (-last)));
10260 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
10261 that SIZE is equal to ROUNDED_SIZE. */
10263 if (size != rounded_size)
10264 emit_stack_probe (plus_constant (gen_rtx_PLUS (Pmode,
10267 rounded_size - size));
10269 release_scratch_register_on_entry (&sr);
10272 /* Make sure nothing is scheduled before we are done. */
10273 emit_insn (gen_blockage ());
10276 /* Probe a range of stack addresses from REG to END, inclusive. These are
10277 offsets from the current stack pointer. */
10280 output_probe_stack_range (rtx reg, rtx end)
10282 static int labelno = 0;
10283 char loop_lab[32], end_lab[32];
10286 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
10287 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
10289 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
10291 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
10294 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops);
10295 fputs ("\tje\t", asm_out_file);
10296 assemble_name_raw (asm_out_file, end_lab);
10297 fputc ('\n', asm_out_file);
10299 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
10300 xops[1] = GEN_INT (PROBE_INTERVAL);
10301 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
10303 /* Probe at TEST_ADDR. */
10304 xops[0] = stack_pointer_rtx;
10306 xops[2] = const0_rtx;
10307 output_asm_insn ("or%z0\t{%2, (%0,%1)|DWORD PTR [%0+%1], %2}", xops);
10309 fprintf (asm_out_file, "\tjmp\t");
10310 assemble_name_raw (asm_out_file, loop_lab);
10311 fputc ('\n', asm_out_file);
10313 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
10318 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
10319 to be generated in correct form. */
10321 ix86_finalize_stack_realign_flags (void)
10323 /* Check if stack realign is really needed after reload, and
10324 stores result in cfun */
10325 unsigned int incoming_stack_boundary
10326 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
10327 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
10328 unsigned int stack_realign = (incoming_stack_boundary
10329 < (current_function_is_leaf
10330 ? crtl->max_used_stack_slot_alignment
10331 : crtl->stack_alignment_needed));
10333 if (crtl->stack_realign_finalized)
10335 /* After stack_realign_needed is finalized, we can't no longer
10337 gcc_assert (crtl->stack_realign_needed == stack_realign);
10341 crtl->stack_realign_needed = stack_realign;
10342 crtl->stack_realign_finalized = true;
10346 /* Expand the prologue into a bunch of separate insns. */
10349 ix86_expand_prologue (void)
10351 struct machine_function *m = cfun->machine;
10354 struct ix86_frame frame;
10355 HOST_WIDE_INT allocate;
10356 bool int_registers_saved;
10358 ix86_finalize_stack_realign_flags ();
10360 /* DRAP should not coexist with stack_realign_fp */
10361 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
10363 memset (&m->fs, 0, sizeof (m->fs));
10365 /* Initialize CFA state for before the prologue. */
10366 m->fs.cfa_reg = stack_pointer_rtx;
10367 m->fs.cfa_offset = INCOMING_FRAME_SP_OFFSET;
10369 /* Track SP offset to the CFA. We continue tracking this after we've
10370 swapped the CFA register away from SP. In the case of re-alignment
10371 this is fudged; we're interested to offsets within the local frame. */
10372 m->fs.sp_offset = INCOMING_FRAME_SP_OFFSET;
10373 m->fs.sp_valid = true;
10375 ix86_compute_frame_layout (&frame);
10377 if (!TARGET_64BIT && ix86_function_ms_hook_prologue (current_function_decl))
10379 /* We should have already generated an error for any use of
10380 ms_hook on a nested function. */
10381 gcc_checking_assert (!ix86_static_chain_on_stack);
10383 /* Check if profiling is active and we shall use profiling before
10384 prologue variant. If so sorry. */
10385 if (crtl->profile && flag_fentry != 0)
10386 sorry ("ms_hook_prologue attribute isn%'t compatible "
10387 "with -mfentry for 32-bit");
10389 /* In ix86_asm_output_function_label we emitted:
10390 8b ff movl.s %edi,%edi
10392 8b ec movl.s %esp,%ebp
10394 This matches the hookable function prologue in Win32 API
10395 functions in Microsoft Windows XP Service Pack 2 and newer.
10396 Wine uses this to enable Windows apps to hook the Win32 API
10397 functions provided by Wine.
10399 What that means is that we've already set up the frame pointer. */
10401 if (frame_pointer_needed
10402 && !(crtl->drap_reg && crtl->stack_realign_needed))
10406 /* We've decided to use the frame pointer already set up.
10407 Describe this to the unwinder by pretending that both
10408 push and mov insns happen right here.
10410 Putting the unwind info here at the end of the ms_hook
10411 is done so that we can make absolutely certain we get
10412 the required byte sequence at the start of the function,
10413 rather than relying on an assembler that can produce
10414 the exact encoding required.
10416 However it does mean (in the unpatched case) that we have
10417 a 1 insn window where the asynchronous unwind info is
10418 incorrect. However, if we placed the unwind info at
10419 its correct location we would have incorrect unwind info
10420 in the patched case. Which is probably all moot since
10421 I don't expect Wine generates dwarf2 unwind info for the
10422 system libraries that use this feature. */
10424 insn = emit_insn (gen_blockage ());
10426 push = gen_push (hard_frame_pointer_rtx);
10427 mov = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
10428 stack_pointer_rtx);
10429 RTX_FRAME_RELATED_P (push) = 1;
10430 RTX_FRAME_RELATED_P (mov) = 1;
10432 RTX_FRAME_RELATED_P (insn) = 1;
10433 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10434 gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, push, mov)));
10436 /* Note that gen_push incremented m->fs.cfa_offset, even
10437 though we didn't emit the push insn here. */
10438 m->fs.cfa_reg = hard_frame_pointer_rtx;
10439 m->fs.fp_offset = m->fs.cfa_offset;
10440 m->fs.fp_valid = true;
10444 /* The frame pointer is not needed so pop %ebp again.
10445 This leaves us with a pristine state. */
10446 emit_insn (gen_pop (hard_frame_pointer_rtx));
10450 /* The first insn of a function that accepts its static chain on the
10451 stack is to push the register that would be filled in by a direct
10452 call. This insn will be skipped by the trampoline. */
10453 else if (ix86_static_chain_on_stack)
10455 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
10456 emit_insn (gen_blockage ());
10458 /* We don't want to interpret this push insn as a register save,
10459 only as a stack adjustment. The real copy of the register as
10460 a save will be done later, if needed. */
10461 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
10462 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
10463 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
10464 RTX_FRAME_RELATED_P (insn) = 1;
10467 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
10468 of DRAP is needed and stack realignment is really needed after reload */
10469 if (stack_realign_drap)
10471 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
10473 /* Only need to push parameter pointer reg if it is caller saved. */
10474 if (!call_used_regs[REGNO (crtl->drap_reg)])
10476 /* Push arg pointer reg */
10477 insn = emit_insn (gen_push (crtl->drap_reg));
10478 RTX_FRAME_RELATED_P (insn) = 1;
10481 /* Grab the argument pointer. */
10482 t = plus_constant (stack_pointer_rtx, m->fs.sp_offset);
10483 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, t));
10484 RTX_FRAME_RELATED_P (insn) = 1;
10485 m->fs.cfa_reg = crtl->drap_reg;
10486 m->fs.cfa_offset = 0;
10488 /* Align the stack. */
10489 insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
10491 GEN_INT (-align_bytes)));
10492 RTX_FRAME_RELATED_P (insn) = 1;
10494 /* Replicate the return address on the stack so that return
10495 address can be reached via (argp - 1) slot. This is needed
10496 to implement macro RETURN_ADDR_RTX and intrinsic function
10497 expand_builtin_return_addr etc. */
10498 t = plus_constant (crtl->drap_reg, -UNITS_PER_WORD);
10499 t = gen_frame_mem (Pmode, t);
10500 insn = emit_insn (gen_push (t));
10501 RTX_FRAME_RELATED_P (insn) = 1;
10503 /* For the purposes of frame and register save area addressing,
10504 we've started over with a new frame. */
10505 m->fs.sp_offset = INCOMING_FRAME_SP_OFFSET;
10506 m->fs.realigned = true;
10509 if (frame_pointer_needed && !m->fs.fp_valid)
10511 /* Note: AT&T enter does NOT have reversed args. Enter is probably
10512 slower on all targets. Also sdb doesn't like it. */
10513 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
10514 RTX_FRAME_RELATED_P (insn) = 1;
10516 if (m->fs.sp_offset == frame.hard_frame_pointer_offset)
10518 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
10519 RTX_FRAME_RELATED_P (insn) = 1;
10521 if (m->fs.cfa_reg == stack_pointer_rtx)
10522 m->fs.cfa_reg = hard_frame_pointer_rtx;
10523 m->fs.fp_offset = m->fs.sp_offset;
10524 m->fs.fp_valid = true;
10528 int_registers_saved = (frame.nregs == 0);
10530 if (!int_registers_saved)
10532 /* If saving registers via PUSH, do so now. */
10533 if (!frame.save_regs_using_mov)
10535 ix86_emit_save_regs ();
10536 int_registers_saved = true;
10537 gcc_assert (m->fs.sp_offset == frame.reg_save_offset);
10540 /* When using red zone we may start register saving before allocating
10541 the stack frame saving one cycle of the prologue. However, avoid
10542 doing this if we have to probe the stack; at least on x86_64 the
10543 stack probe can turn into a call that clobbers a red zone location. */
10544 else if (ix86_using_red_zone ()
10545 && (! TARGET_STACK_PROBE
10546 || frame.stack_pointer_offset < CHECK_STACK_LIMIT))
10548 ix86_emit_save_regs_using_mov (frame.reg_save_offset);
10549 int_registers_saved = true;
10553 if (stack_realign_fp)
10555 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
10556 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
10558 /* The computation of the size of the re-aligned stack frame means
10559 that we must allocate the size of the register save area before
10560 performing the actual alignment. Otherwise we cannot guarantee
10561 that there's enough storage above the realignment point. */
10562 if (m->fs.sp_offset != frame.sse_reg_save_offset)
10563 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10564 GEN_INT (m->fs.sp_offset
10565 - frame.sse_reg_save_offset),
10568 /* Align the stack. */
10569 insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
10571 GEN_INT (-align_bytes)));
10573 /* For the purposes of register save area addressing, the stack
10574 pointer is no longer valid. As for the value of sp_offset,
10575 see ix86_compute_frame_layout, which we need to match in order
10576 to pass verification of stack_pointer_offset at the end. */
10577 m->fs.sp_offset = (m->fs.sp_offset + align_bytes) & -align_bytes;
10578 m->fs.sp_valid = false;
10581 allocate = frame.stack_pointer_offset - m->fs.sp_offset;
10583 if (flag_stack_usage)
10585 /* We start to count from ARG_POINTER. */
10586 HOST_WIDE_INT stack_size = frame.stack_pointer_offset;
10588 /* If it was realigned, take into account the fake frame. */
10589 if (stack_realign_drap)
10591 if (ix86_static_chain_on_stack)
10592 stack_size += UNITS_PER_WORD;
10594 if (!call_used_regs[REGNO (crtl->drap_reg)])
10595 stack_size += UNITS_PER_WORD;
10597 /* This over-estimates by 1 minimal-stack-alignment-unit but
10598 mitigates that by counting in the new return address slot. */
10599 current_function_dynamic_stack_size
10600 += crtl->stack_alignment_needed / BITS_PER_UNIT;
10603 current_function_static_stack_size = stack_size;
10606 /* The stack has already been decremented by the instruction calling us
10607 so probe if the size is non-negative to preserve the protection area. */
10608 if (allocate >= 0 && flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
10610 /* We expect the registers to be saved when probes are used. */
10611 gcc_assert (int_registers_saved);
10613 if (STACK_CHECK_MOVING_SP)
10615 ix86_adjust_stack_and_probe (allocate);
10620 HOST_WIDE_INT size = allocate;
10622 if (TARGET_64BIT && size >= (HOST_WIDE_INT) 0x80000000)
10623 size = 0x80000000 - STACK_CHECK_PROTECT - 1;
10625 if (TARGET_STACK_PROBE)
10626 ix86_emit_probe_stack_range (0, size + STACK_CHECK_PROTECT);
10628 ix86_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
10634 else if (!ix86_target_stack_probe ()
10635 || frame.stack_pointer_offset < CHECK_STACK_LIMIT)
10637 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10638 GEN_INT (-allocate), -1,
10639 m->fs.cfa_reg == stack_pointer_rtx);
10643 rtx eax = gen_rtx_REG (Pmode, AX_REG);
10645 rtx (*adjust_stack_insn)(rtx, rtx, rtx);
10647 bool eax_live = false;
10648 bool r10_live = false;
10651 r10_live = (DECL_STATIC_CHAIN (current_function_decl) != 0);
10652 if (!TARGET_64BIT_MS_ABI)
10653 eax_live = ix86_eax_live_at_start_p ();
10657 emit_insn (gen_push (eax));
10658 allocate -= UNITS_PER_WORD;
10662 r10 = gen_rtx_REG (Pmode, R10_REG);
10663 emit_insn (gen_push (r10));
10664 allocate -= UNITS_PER_WORD;
10667 emit_move_insn (eax, GEN_INT (allocate));
10668 emit_insn (ix86_gen_allocate_stack_worker (eax, eax));
10670 /* Use the fact that AX still contains ALLOCATE. */
10671 adjust_stack_insn = (TARGET_64BIT
10672 ? gen_pro_epilogue_adjust_stack_di_sub
10673 : gen_pro_epilogue_adjust_stack_si_sub);
10675 insn = emit_insn (adjust_stack_insn (stack_pointer_rtx,
10676 stack_pointer_rtx, eax));
10678 /* Note that SEH directives need to continue tracking the stack
10679 pointer even after the frame pointer has been set up. */
10680 if (m->fs.cfa_reg == stack_pointer_rtx || TARGET_SEH)
10682 if (m->fs.cfa_reg == stack_pointer_rtx)
10683 m->fs.cfa_offset += allocate;
10685 RTX_FRAME_RELATED_P (insn) = 1;
10686 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10687 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10688 plus_constant (stack_pointer_rtx,
10691 m->fs.sp_offset += allocate;
10693 if (r10_live && eax_live)
10695 t = choose_baseaddr (m->fs.sp_offset - allocate);
10696 emit_move_insn (r10, gen_frame_mem (Pmode, t));
10697 t = choose_baseaddr (m->fs.sp_offset - allocate - UNITS_PER_WORD);
10698 emit_move_insn (eax, gen_frame_mem (Pmode, t));
10700 else if (eax_live || r10_live)
10702 t = choose_baseaddr (m->fs.sp_offset - allocate);
10703 emit_move_insn ((eax_live ? eax : r10), gen_frame_mem (Pmode, t));
10706 gcc_assert (m->fs.sp_offset == frame.stack_pointer_offset);
10708 /* If we havn't already set up the frame pointer, do so now. */
10709 if (frame_pointer_needed && !m->fs.fp_valid)
10711 insn = ix86_gen_add3 (hard_frame_pointer_rtx, stack_pointer_rtx,
10712 GEN_INT (frame.stack_pointer_offset
10713 - frame.hard_frame_pointer_offset));
10714 insn = emit_insn (insn);
10715 RTX_FRAME_RELATED_P (insn) = 1;
10716 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);
10718 if (m->fs.cfa_reg == stack_pointer_rtx)
10719 m->fs.cfa_reg = hard_frame_pointer_rtx;
10720 m->fs.fp_offset = frame.hard_frame_pointer_offset;
10721 m->fs.fp_valid = true;
10724 if (!int_registers_saved)
10725 ix86_emit_save_regs_using_mov (frame.reg_save_offset);
10726 if (frame.nsseregs)
10727 ix86_emit_save_sse_regs_using_mov (frame.sse_reg_save_offset);
10729 pic_reg_used = false;
10730 if (pic_offset_table_rtx
10731 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
10734 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
10736 if (alt_pic_reg_used != INVALID_REGNUM)
10737 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
10739 pic_reg_used = true;
10746 if (ix86_cmodel == CM_LARGE_PIC)
10748 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
10749 rtx label = gen_label_rtx ();
10750 emit_label (label);
10751 LABEL_PRESERVE_P (label) = 1;
10752 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
10753 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
10754 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
10755 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
10756 pic_offset_table_rtx, tmp_reg));
10759 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
10762 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
10765 /* In the pic_reg_used case, make sure that the got load isn't deleted
10766 when mcount needs it. Blockage to avoid call movement across mcount
10767 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
10769 if (crtl->profile && !flag_fentry && pic_reg_used)
10770 emit_insn (gen_prologue_use (pic_offset_table_rtx));
10772 if (crtl->drap_reg && !crtl->stack_realign_needed)
10774 /* vDRAP is setup but after reload it turns out stack realign
10775 isn't necessary, here we will emit prologue to setup DRAP
10776 without stack realign adjustment */
10777 t = choose_baseaddr (0);
10778 emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, t));
10781 /* Prevent instructions from being scheduled into register save push
10782 sequence when access to the redzone area is done through frame pointer.
10783 The offset between the frame pointer and the stack pointer is calculated
10784 relative to the value of the stack pointer at the end of the function
10785 prologue, and moving instructions that access redzone area via frame
10786 pointer inside push sequence violates this assumption. */
10787 if (frame_pointer_needed && frame.red_zone_size)
10788 emit_insn (gen_memory_blockage ());
10790 /* Emit cld instruction if stringops are used in the function. */
10791 if (TARGET_CLD && ix86_current_function_needs_cld)
10792 emit_insn (gen_cld ());
10794 /* SEH requires that the prologue end within 256 bytes of the start of
10795 the function. Prevent instruction schedules that would extend that. */
10797 emit_insn (gen_blockage ());
10800 /* Emit code to restore REG using a POP insn. */
10803 ix86_emit_restore_reg_using_pop (rtx reg)
10805 struct machine_function *m = cfun->machine;
10806 rtx insn = emit_insn (gen_pop (reg));
10808 ix86_add_cfa_restore_note (insn, reg, m->fs.sp_offset);
10809 m->fs.sp_offset -= UNITS_PER_WORD;
10811 if (m->fs.cfa_reg == crtl->drap_reg
10812 && REGNO (reg) == REGNO (crtl->drap_reg))
10814 /* Previously we'd represented the CFA as an expression
10815 like *(%ebp - 8). We've just popped that value from
10816 the stack, which means we need to reset the CFA to
10817 the drap register. This will remain until we restore
10818 the stack pointer. */
10819 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
10820 RTX_FRAME_RELATED_P (insn) = 1;
10822 /* This means that the DRAP register is valid for addressing too. */
10823 m->fs.drap_valid = true;
10827 if (m->fs.cfa_reg == stack_pointer_rtx)
10829 rtx x = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
10830 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
10831 add_reg_note (insn, REG_CFA_ADJUST_CFA, x);
10832 RTX_FRAME_RELATED_P (insn) = 1;
10834 m->fs.cfa_offset -= UNITS_PER_WORD;
10837 /* When the frame pointer is the CFA, and we pop it, we are
10838 swapping back to the stack pointer as the CFA. This happens
10839 for stack frames that don't allocate other data, so we assume
10840 the stack pointer is now pointing at the return address, i.e.
10841 the function entry state, which makes the offset be 1 word. */
10842 if (reg == hard_frame_pointer_rtx)
10844 m->fs.fp_valid = false;
10845 if (m->fs.cfa_reg == hard_frame_pointer_rtx)
10847 m->fs.cfa_reg = stack_pointer_rtx;
10848 m->fs.cfa_offset -= UNITS_PER_WORD;
10850 add_reg_note (insn, REG_CFA_DEF_CFA,
10851 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10852 GEN_INT (m->fs.cfa_offset)));
10853 RTX_FRAME_RELATED_P (insn) = 1;
10858 /* Emit code to restore saved registers using POP insns. */
10861 ix86_emit_restore_regs_using_pop (void)
10863 unsigned int regno;
10865 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10866 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
10867 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno));
10870 /* Emit code and notes for the LEAVE instruction. */
10873 ix86_emit_leave (void)
10875 struct machine_function *m = cfun->machine;
10876 rtx insn = emit_insn (ix86_gen_leave ());
10878 ix86_add_queued_cfa_restore_notes (insn);
10880 gcc_assert (m->fs.fp_valid);
10881 m->fs.sp_valid = true;
10882 m->fs.sp_offset = m->fs.fp_offset - UNITS_PER_WORD;
10883 m->fs.fp_valid = false;
10885 if (m->fs.cfa_reg == hard_frame_pointer_rtx)
10887 m->fs.cfa_reg = stack_pointer_rtx;
10888 m->fs.cfa_offset = m->fs.sp_offset;
10890 add_reg_note (insn, REG_CFA_DEF_CFA,
10891 plus_constant (stack_pointer_rtx, m->fs.sp_offset));
10892 RTX_FRAME_RELATED_P (insn) = 1;
10893 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx,
10898 /* Emit code to restore saved registers using MOV insns.
10899 First register is restored from CFA - CFA_OFFSET. */
10901 ix86_emit_restore_regs_using_mov (HOST_WIDE_INT cfa_offset,
10902 int maybe_eh_return)
10904 struct machine_function *m = cfun->machine;
10905 unsigned int regno;
10907 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10908 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
10910 rtx reg = gen_rtx_REG (Pmode, regno);
10913 mem = choose_baseaddr (cfa_offset);
10914 mem = gen_frame_mem (Pmode, mem);
10915 insn = emit_move_insn (reg, mem);
10917 if (m->fs.cfa_reg == crtl->drap_reg && regno == REGNO (crtl->drap_reg))
10919 /* Previously we'd represented the CFA as an expression
10920 like *(%ebp - 8). We've just popped that value from
10921 the stack, which means we need to reset the CFA to
10922 the drap register. This will remain until we restore
10923 the stack pointer. */
10924 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
10925 RTX_FRAME_RELATED_P (insn) = 1;
10927 /* This means that the DRAP register is valid for addressing. */
10928 m->fs.drap_valid = true;
10931 ix86_add_cfa_restore_note (NULL_RTX, reg, cfa_offset);
10933 cfa_offset -= UNITS_PER_WORD;
10937 /* Emit code to restore saved registers using MOV insns.
10938 First register is restored from CFA - CFA_OFFSET. */
10940 ix86_emit_restore_sse_regs_using_mov (HOST_WIDE_INT cfa_offset,
10941 int maybe_eh_return)
10943 unsigned int regno;
10945 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10946 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
10948 rtx reg = gen_rtx_REG (V4SFmode, regno);
10951 mem = choose_baseaddr (cfa_offset);
10952 mem = gen_rtx_MEM (V4SFmode, mem);
10953 set_mem_align (mem, 128);
10954 emit_move_insn (reg, mem);
10956 ix86_add_cfa_restore_note (NULL_RTX, reg, cfa_offset);
10962 /* Restore function stack, frame, and registers. */
10965 ix86_expand_epilogue (int style)
10967 struct machine_function *m = cfun->machine;
10968 struct machine_frame_state frame_state_save = m->fs;
10969 struct ix86_frame frame;
10970 bool restore_regs_via_mov;
10973 ix86_finalize_stack_realign_flags ();
10974 ix86_compute_frame_layout (&frame);
10976 m->fs.sp_valid = (!frame_pointer_needed
10977 || (current_function_sp_is_unchanging
10978 && !stack_realign_fp));
10979 gcc_assert (!m->fs.sp_valid
10980 || m->fs.sp_offset == frame.stack_pointer_offset);
10982 /* The FP must be valid if the frame pointer is present. */
10983 gcc_assert (frame_pointer_needed == m->fs.fp_valid);
10984 gcc_assert (!m->fs.fp_valid
10985 || m->fs.fp_offset == frame.hard_frame_pointer_offset);
10987 /* We must have *some* valid pointer to the stack frame. */
10988 gcc_assert (m->fs.sp_valid || m->fs.fp_valid);
10990 /* The DRAP is never valid at this point. */
10991 gcc_assert (!m->fs.drap_valid);
10993 /* See the comment about red zone and frame
10994 pointer usage in ix86_expand_prologue. */
10995 if (frame_pointer_needed && frame.red_zone_size)
10996 emit_insn (gen_memory_blockage ());
10998 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
10999 gcc_assert (!using_drap || m->fs.cfa_reg == crtl->drap_reg);
11001 /* Determine the CFA offset of the end of the red-zone. */
11002 m->fs.red_zone_offset = 0;
11003 if (ix86_using_red_zone () && crtl->args.pops_args < 65536)
11005 /* The red-zone begins below the return address. */
11006 m->fs.red_zone_offset = RED_ZONE_SIZE + UNITS_PER_WORD;
11008 /* When the register save area is in the aligned portion of
11009 the stack, determine the maximum runtime displacement that
11010 matches up with the aligned frame. */
11011 if (stack_realign_drap)
11012 m->fs.red_zone_offset -= (crtl->stack_alignment_needed / BITS_PER_UNIT
11016 /* Special care must be taken for the normal return case of a function
11017 using eh_return: the eax and edx registers are marked as saved, but
11018 not restored along this path. Adjust the save location to match. */
11019 if (crtl->calls_eh_return && style != 2)
11020 frame.reg_save_offset -= 2 * UNITS_PER_WORD;
11022 /* EH_RETURN requires the use of moves to function properly. */
11023 if (crtl->calls_eh_return)
11024 restore_regs_via_mov = true;
11025 /* SEH requires the use of pops to identify the epilogue. */
11026 else if (TARGET_SEH)
11027 restore_regs_via_mov = false;
11028 /* If we're only restoring one register and sp is not valid then
11029 using a move instruction to restore the register since it's
11030 less work than reloading sp and popping the register. */
11031 else if (!m->fs.sp_valid && frame.nregs <= 1)
11032 restore_regs_via_mov = true;
11033 else if (TARGET_EPILOGUE_USING_MOVE
11034 && cfun->machine->use_fast_prologue_epilogue
11035 && (frame.nregs > 1
11036 || m->fs.sp_offset != frame.reg_save_offset))
11037 restore_regs_via_mov = true;
11038 else if (frame_pointer_needed
11040 && m->fs.sp_offset != frame.reg_save_offset)
11041 restore_regs_via_mov = true;
11042 else if (frame_pointer_needed
11043 && TARGET_USE_LEAVE
11044 && cfun->machine->use_fast_prologue_epilogue
11045 && frame.nregs == 1)
11046 restore_regs_via_mov = true;
11048 restore_regs_via_mov = false;
11050 if (restore_regs_via_mov || frame.nsseregs)
11052 /* Ensure that the entire register save area is addressable via
11053 the stack pointer, if we will restore via sp. */
11055 && m->fs.sp_offset > 0x7fffffff
11056 && !(m->fs.fp_valid || m->fs.drap_valid)
11057 && (frame.nsseregs + frame.nregs) != 0)
11059 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
11060 GEN_INT (m->fs.sp_offset
11061 - frame.sse_reg_save_offset),
11063 m->fs.cfa_reg == stack_pointer_rtx);
11067 /* If there are any SSE registers to restore, then we have to do it
11068 via moves, since there's obviously no pop for SSE regs. */
11069 if (frame.nsseregs)
11070 ix86_emit_restore_sse_regs_using_mov (frame.sse_reg_save_offset,
11073 if (restore_regs_via_mov)
11078 ix86_emit_restore_regs_using_mov (frame.reg_save_offset, style == 2);
11080 /* eh_return epilogues need %ecx added to the stack pointer. */
11083 rtx insn, sa = EH_RETURN_STACKADJ_RTX;
11085 /* Stack align doesn't work with eh_return. */
11086 gcc_assert (!stack_realign_drap);
11087 /* Neither does regparm nested functions. */
11088 gcc_assert (!ix86_static_chain_on_stack);
11090 if (frame_pointer_needed)
11092 t = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
11093 t = plus_constant (t, m->fs.fp_offset - UNITS_PER_WORD);
11094 emit_insn (gen_rtx_SET (VOIDmode, sa, t));
11096 t = gen_frame_mem (Pmode, hard_frame_pointer_rtx);
11097 insn = emit_move_insn (hard_frame_pointer_rtx, t);
11099 /* Note that we use SA as a temporary CFA, as the return
11100 address is at the proper place relative to it. We
11101 pretend this happens at the FP restore insn because
11102 prior to this insn the FP would be stored at the wrong
11103 offset relative to SA, and after this insn we have no
11104 other reasonable register to use for the CFA. We don't
11105 bother resetting the CFA to the SP for the duration of
11106 the return insn. */
11107 add_reg_note (insn, REG_CFA_DEF_CFA,
11108 plus_constant (sa, UNITS_PER_WORD));
11109 ix86_add_queued_cfa_restore_notes (insn);
11110 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
11111 RTX_FRAME_RELATED_P (insn) = 1;
11113 m->fs.cfa_reg = sa;
11114 m->fs.cfa_offset = UNITS_PER_WORD;
11115 m->fs.fp_valid = false;
11117 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
11118 const0_rtx, style, false);
11122 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
11123 t = plus_constant (t, m->fs.sp_offset - UNITS_PER_WORD);
11124 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, t));
11125 ix86_add_queued_cfa_restore_notes (insn);
11127 gcc_assert (m->fs.cfa_reg == stack_pointer_rtx);
11128 if (m->fs.cfa_offset != UNITS_PER_WORD)
11130 m->fs.cfa_offset = UNITS_PER_WORD;
11131 add_reg_note (insn, REG_CFA_DEF_CFA,
11132 plus_constant (stack_pointer_rtx,
11134 RTX_FRAME_RELATED_P (insn) = 1;
11137 m->fs.sp_offset = UNITS_PER_WORD;
11138 m->fs.sp_valid = true;
11143 /* SEH requires that the function end with (1) a stack adjustment
11144 if necessary, (2) a sequence of pops, and (3) a return or
11145 jump instruction. Prevent insns from the function body from
11146 being scheduled into this sequence. */
11149 /* Prevent a catch region from being adjacent to the standard
11150 epilogue sequence. Unfortuantely crtl->uses_eh_lsda nor
11151 several other flags that would be interesting to test are
11153 if (flag_non_call_exceptions)
11154 emit_insn (gen_nops (const1_rtx));
11156 emit_insn (gen_blockage ());
11159 /* First step is to deallocate the stack frame so that we can
11160 pop the registers. */
11161 if (!m->fs.sp_valid)
11163 pro_epilogue_adjust_stack (stack_pointer_rtx, hard_frame_pointer_rtx,
11164 GEN_INT (m->fs.fp_offset
11165 - frame.reg_save_offset),
11168 else if (m->fs.sp_offset != frame.reg_save_offset)
11170 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
11171 GEN_INT (m->fs.sp_offset
11172 - frame.reg_save_offset),
11174 m->fs.cfa_reg == stack_pointer_rtx);
11177 ix86_emit_restore_regs_using_pop ();
11180 /* If we used a stack pointer and haven't already got rid of it,
11182 if (m->fs.fp_valid)
11184 /* If the stack pointer is valid and pointing at the frame
11185 pointer store address, then we only need a pop. */
11186 if (m->fs.sp_valid && m->fs.sp_offset == frame.hfp_save_offset)
11187 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx);
11188 /* Leave results in shorter dependency chains on CPUs that are
11189 able to grok it fast. */
11190 else if (TARGET_USE_LEAVE
11191 || optimize_function_for_size_p (cfun)
11192 || !cfun->machine->use_fast_prologue_epilogue)
11193 ix86_emit_leave ();
11196 pro_epilogue_adjust_stack (stack_pointer_rtx,
11197 hard_frame_pointer_rtx,
11198 const0_rtx, style, !using_drap);
11199 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx);
11205 int param_ptr_offset = UNITS_PER_WORD;
11208 gcc_assert (stack_realign_drap);
11210 if (ix86_static_chain_on_stack)
11211 param_ptr_offset += UNITS_PER_WORD;
11212 if (!call_used_regs[REGNO (crtl->drap_reg)])
11213 param_ptr_offset += UNITS_PER_WORD;
11215 insn = emit_insn (gen_rtx_SET
11216 (VOIDmode, stack_pointer_rtx,
11217 gen_rtx_PLUS (Pmode,
11219 GEN_INT (-param_ptr_offset))));
11220 m->fs.cfa_reg = stack_pointer_rtx;
11221 m->fs.cfa_offset = param_ptr_offset;
11222 m->fs.sp_offset = param_ptr_offset;
11223 m->fs.realigned = false;
11225 add_reg_note (insn, REG_CFA_DEF_CFA,
11226 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11227 GEN_INT (param_ptr_offset)));
11228 RTX_FRAME_RELATED_P (insn) = 1;
11230 if (!call_used_regs[REGNO (crtl->drap_reg)])
11231 ix86_emit_restore_reg_using_pop (crtl->drap_reg);
11234 /* At this point the stack pointer must be valid, and we must have
11235 restored all of the registers. We may not have deallocated the
11236 entire stack frame. We've delayed this until now because it may
11237 be possible to merge the local stack deallocation with the
11238 deallocation forced by ix86_static_chain_on_stack. */
11239 gcc_assert (m->fs.sp_valid);
11240 gcc_assert (!m->fs.fp_valid);
11241 gcc_assert (!m->fs.realigned);
11242 if (m->fs.sp_offset != UNITS_PER_WORD)
11244 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
11245 GEN_INT (m->fs.sp_offset - UNITS_PER_WORD),
11249 /* Sibcall epilogues don't want a return instruction. */
11252 m->fs = frame_state_save;
11256 /* Emit vzeroupper if needed. */
11257 if (TARGET_VZEROUPPER
11258 && !TREE_THIS_VOLATILE (cfun->decl)
11259 && !cfun->machine->caller_return_avx256_p)
11260 emit_insn (gen_avx_vzeroupper (GEN_INT (call_no_avx256)));
11262 if (crtl->args.pops_args && crtl->args.size)
11264 rtx popc = GEN_INT (crtl->args.pops_args);
11266 /* i386 can only pop 64K bytes. If asked to pop more, pop return
11267 address, do explicit add, and jump indirectly to the caller. */
11269 if (crtl->args.pops_args >= 65536)
11271 rtx ecx = gen_rtx_REG (SImode, CX_REG);
11274 /* There is no "pascal" calling convention in any 64bit ABI. */
11275 gcc_assert (!TARGET_64BIT);
11277 insn = emit_insn (gen_pop (ecx));
11278 m->fs.cfa_offset -= UNITS_PER_WORD;
11279 m->fs.sp_offset -= UNITS_PER_WORD;
11281 add_reg_note (insn, REG_CFA_ADJUST_CFA,
11282 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
11283 add_reg_note (insn, REG_CFA_REGISTER,
11284 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
11285 RTX_FRAME_RELATED_P (insn) = 1;
11287 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
11289 emit_jump_insn (gen_return_indirect_internal (ecx));
11292 emit_jump_insn (gen_return_pop_internal (popc));
11295 emit_jump_insn (gen_return_internal ());
11297 /* Restore the state back to the state from the prologue,
11298 so that it's correct for the next epilogue. */
11299 m->fs = frame_state_save;
11302 /* Reset from the function's potential modifications. */
11305 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
11306 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
11308 if (pic_offset_table_rtx)
11309 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
11311 /* Mach-O doesn't support labels at the end of objects, so if
11312 it looks like we might want one, insert a NOP. */
11314 rtx insn = get_last_insn ();
11317 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
11318 insn = PREV_INSN (insn);
11322 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
11323 fputs ("\tnop\n", file);
11329 /* Return a scratch register to use in the split stack prologue. The
11330 split stack prologue is used for -fsplit-stack. It is the first
11331 instructions in the function, even before the regular prologue.
11332 The scratch register can be any caller-saved register which is not
11333 used for parameters or for the static chain. */
11335 static unsigned int
11336 split_stack_prologue_scratch_regno (void)
11345 is_fastcall = (lookup_attribute ("fastcall",
11346 TYPE_ATTRIBUTES (TREE_TYPE (cfun->decl)))
11348 regparm = ix86_function_regparm (TREE_TYPE (cfun->decl), cfun->decl);
11352 if (DECL_STATIC_CHAIN (cfun->decl))
11354 sorry ("-fsplit-stack does not support fastcall with "
11355 "nested function");
11356 return INVALID_REGNUM;
11360 else if (regparm < 3)
11362 if (!DECL_STATIC_CHAIN (cfun->decl))
11368 sorry ("-fsplit-stack does not support 2 register "
11369 " parameters for a nested function");
11370 return INVALID_REGNUM;
11377 /* FIXME: We could make this work by pushing a register
11378 around the addition and comparison. */
11379 sorry ("-fsplit-stack does not support 3 register parameters");
11380 return INVALID_REGNUM;
11385 /* A SYMBOL_REF for the function which allocates new stackspace for
11388 static GTY(()) rtx split_stack_fn;
11390 /* A SYMBOL_REF for the more stack function when using the large
11393 static GTY(()) rtx split_stack_fn_large;
11395 /* Handle -fsplit-stack. These are the first instructions in the
11396 function, even before the regular prologue. */
11399 ix86_expand_split_stack_prologue (void)
11401 struct ix86_frame frame;
11402 HOST_WIDE_INT allocate;
11403 unsigned HOST_WIDE_INT args_size;
11404 rtx label, limit, current, jump_insn, allocate_rtx, call_insn, call_fusage;
11405 rtx scratch_reg = NULL_RTX;
11406 rtx varargs_label = NULL_RTX;
11409 gcc_assert (flag_split_stack && reload_completed);
11411 ix86_finalize_stack_realign_flags ();
11412 ix86_compute_frame_layout (&frame);
11413 allocate = frame.stack_pointer_offset - INCOMING_FRAME_SP_OFFSET;
11415 /* This is the label we will branch to if we have enough stack
11416 space. We expect the basic block reordering pass to reverse this
11417 branch if optimizing, so that we branch in the unlikely case. */
11418 label = gen_label_rtx ();
11420 /* We need to compare the stack pointer minus the frame size with
11421 the stack boundary in the TCB. The stack boundary always gives
11422 us SPLIT_STACK_AVAILABLE bytes, so if we need less than that we
11423 can compare directly. Otherwise we need to do an addition. */
11425 limit = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
11426 UNSPEC_STACK_CHECK);
11427 limit = gen_rtx_CONST (Pmode, limit);
11428 limit = gen_rtx_MEM (Pmode, limit);
11429 if (allocate < SPLIT_STACK_AVAILABLE)
11430 current = stack_pointer_rtx;
11433 unsigned int scratch_regno;
11436 /* We need a scratch register to hold the stack pointer minus
11437 the required frame size. Since this is the very start of the
11438 function, the scratch register can be any caller-saved
11439 register which is not used for parameters. */
11440 offset = GEN_INT (- allocate);
11441 scratch_regno = split_stack_prologue_scratch_regno ();
11442 if (scratch_regno == INVALID_REGNUM)
11444 scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
11445 if (!TARGET_64BIT || x86_64_immediate_operand (offset, Pmode))
11447 /* We don't use ix86_gen_add3 in this case because it will
11448 want to split to lea, but when not optimizing the insn
11449 will not be split after this point. */
11450 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11451 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11456 emit_move_insn (scratch_reg, offset);
11457 emit_insn (gen_adddi3 (scratch_reg, scratch_reg,
11458 stack_pointer_rtx));
11460 current = scratch_reg;
11463 ix86_expand_branch (GEU, current, limit, label);
11464 jump_insn = get_last_insn ();
11465 JUMP_LABEL (jump_insn) = label;
11467 /* Mark the jump as very likely to be taken. */
11468 add_reg_note (jump_insn, REG_BR_PROB,
11469 GEN_INT (REG_BR_PROB_BASE - REG_BR_PROB_BASE / 100));
11471 if (split_stack_fn == NULL_RTX)
11472 split_stack_fn = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11473 fn = split_stack_fn;
11475 /* Get more stack space. We pass in the desired stack space and the
11476 size of the arguments to copy to the new stack. In 32-bit mode
11477 we push the parameters; __morestack will return on a new stack
11478 anyhow. In 64-bit mode we pass the parameters in r10 and
11480 allocate_rtx = GEN_INT (allocate);
11481 args_size = crtl->args.size >= 0 ? crtl->args.size : 0;
11482 call_fusage = NULL_RTX;
11487 reg10 = gen_rtx_REG (Pmode, R10_REG);
11488 reg11 = gen_rtx_REG (Pmode, R11_REG);
11490 /* If this function uses a static chain, it will be in %r10.
11491 Preserve it across the call to __morestack. */
11492 if (DECL_STATIC_CHAIN (cfun->decl))
11496 rax = gen_rtx_REG (Pmode, AX_REG);
11497 emit_move_insn (rax, reg10);
11498 use_reg (&call_fusage, rax);
11501 if (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
11503 HOST_WIDE_INT argval;
11505 /* When using the large model we need to load the address
11506 into a register, and we've run out of registers. So we
11507 switch to a different calling convention, and we call a
11508 different function: __morestack_large. We pass the
11509 argument size in the upper 32 bits of r10 and pass the
11510 frame size in the lower 32 bits. */
11511 gcc_assert ((allocate & (HOST_WIDE_INT) 0xffffffff) == allocate);
11512 gcc_assert ((args_size & 0xffffffff) == args_size);
11514 if (split_stack_fn_large == NULL_RTX)
11515 split_stack_fn_large =
11516 gen_rtx_SYMBOL_REF (Pmode, "__morestack_large_model");
11518 if (ix86_cmodel == CM_LARGE_PIC)
11522 label = gen_label_rtx ();
11523 emit_label (label);
11524 LABEL_PRESERVE_P (label) = 1;
11525 emit_insn (gen_set_rip_rex64 (reg10, label));
11526 emit_insn (gen_set_got_offset_rex64 (reg11, label));
11527 emit_insn (gen_adddi3 (reg10, reg10, reg11));
11528 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, split_stack_fn_large),
11530 x = gen_rtx_CONST (Pmode, x);
11531 emit_move_insn (reg11, x);
11532 x = gen_rtx_PLUS (Pmode, reg10, reg11);
11533 x = gen_const_mem (Pmode, x);
11534 emit_move_insn (reg11, x);
11537 emit_move_insn (reg11, split_stack_fn_large);
11541 argval = ((args_size << 16) << 16) + allocate;
11542 emit_move_insn (reg10, GEN_INT (argval));
11546 emit_move_insn (reg10, allocate_rtx);
11547 emit_move_insn (reg11, GEN_INT (args_size));
11548 use_reg (&call_fusage, reg11);
11551 use_reg (&call_fusage, reg10);
11555 emit_insn (gen_push (GEN_INT (args_size)));
11556 emit_insn (gen_push (allocate_rtx));
11558 call_insn = ix86_expand_call (NULL_RTX, gen_rtx_MEM (QImode, fn),
11559 GEN_INT (UNITS_PER_WORD), constm1_rtx,
11561 add_function_usage_to (call_insn, call_fusage);
11563 /* In order to make call/return prediction work right, we now need
11564 to execute a return instruction. See
11565 libgcc/config/i386/morestack.S for the details on how this works.
11567 For flow purposes gcc must not see this as a return
11568 instruction--we need control flow to continue at the subsequent
11569 label. Therefore, we use an unspec. */
11570 gcc_assert (crtl->args.pops_args < 65536);
11571 emit_insn (gen_split_stack_return (GEN_INT (crtl->args.pops_args)));
11573 /* If we are in 64-bit mode and this function uses a static chain,
11574 we saved %r10 in %rax before calling _morestack. */
11575 if (TARGET_64BIT && DECL_STATIC_CHAIN (cfun->decl))
11576 emit_move_insn (gen_rtx_REG (Pmode, R10_REG),
11577 gen_rtx_REG (Pmode, AX_REG));
11579 /* If this function calls va_start, we need to store a pointer to
11580 the arguments on the old stack, because they may not have been
11581 all copied to the new stack. At this point the old stack can be
11582 found at the frame pointer value used by __morestack, because
11583 __morestack has set that up before calling back to us. Here we
11584 store that pointer in a scratch register, and in
11585 ix86_expand_prologue we store the scratch register in a stack
11587 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11589 unsigned int scratch_regno;
11593 scratch_regno = split_stack_prologue_scratch_regno ();
11594 scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
11595 frame_reg = gen_rtx_REG (Pmode, BP_REG);
11599 return address within this function
11600 return address of caller of this function
11602 So we add three words to get to the stack arguments.
11606 return address within this function
11607 first argument to __morestack
11608 second argument to __morestack
11609 return address of caller of this function
11611 So we add five words to get to the stack arguments.
11613 words = TARGET_64BIT ? 3 : 5;
11614 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11615 gen_rtx_PLUS (Pmode, frame_reg,
11616 GEN_INT (words * UNITS_PER_WORD))));
11618 varargs_label = gen_label_rtx ();
11619 emit_jump_insn (gen_jump (varargs_label));
11620 JUMP_LABEL (get_last_insn ()) = varargs_label;
11625 emit_label (label);
11626 LABEL_NUSES (label) = 1;
11628 /* If this function calls va_start, we now have to set the scratch
11629 register for the case where we do not call __morestack. In this
11630 case we need to set it based on the stack pointer. */
11631 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11633 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11634 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11635 GEN_INT (UNITS_PER_WORD))));
11637 emit_label (varargs_label);
11638 LABEL_NUSES (varargs_label) = 1;
11642 /* We may have to tell the dataflow pass that the split stack prologue
11643 is initializing a scratch register. */
11646 ix86_live_on_entry (bitmap regs)
11648 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11650 gcc_assert (flag_split_stack);
11651 bitmap_set_bit (regs, split_stack_prologue_scratch_regno ());
11655 /* Extract the parts of an RTL expression that is a valid memory address
11656 for an instruction. Return 0 if the structure of the address is
11657 grossly off. Return -1 if the address contains ASHIFT, so it is not
11658 strictly valid, but still used for computing length of lea instruction. */
11661 ix86_decompose_address (rtx addr, struct ix86_address *out)
11663 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
11664 rtx base_reg, index_reg;
11665 HOST_WIDE_INT scale = 1;
11666 rtx scale_rtx = NULL_RTX;
11669 enum ix86_address_seg seg = SEG_DEFAULT;
11671 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
11673 else if (GET_CODE (addr) == PLUS)
11675 rtx addends[4], op;
11683 addends[n++] = XEXP (op, 1);
11686 while (GET_CODE (op) == PLUS);
11691 for (i = n; i >= 0; --i)
11694 switch (GET_CODE (op))
11699 index = XEXP (op, 0);
11700 scale_rtx = XEXP (op, 1);
11706 index = XEXP (op, 0);
11707 tmp = XEXP (op, 1);
11708 if (!CONST_INT_P (tmp))
11710 scale = INTVAL (tmp);
11711 if ((unsigned HOST_WIDE_INT) scale > 3)
11713 scale = 1 << scale;
11717 if (XINT (op, 1) == UNSPEC_TP
11718 && TARGET_TLS_DIRECT_SEG_REFS
11719 && seg == SEG_DEFAULT)
11720 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
11749 else if (GET_CODE (addr) == MULT)
11751 index = XEXP (addr, 0); /* index*scale */
11752 scale_rtx = XEXP (addr, 1);
11754 else if (GET_CODE (addr) == ASHIFT)
11756 /* We're called for lea too, which implements ashift on occasion. */
11757 index = XEXP (addr, 0);
11758 tmp = XEXP (addr, 1);
11759 if (!CONST_INT_P (tmp))
11761 scale = INTVAL (tmp);
11762 if ((unsigned HOST_WIDE_INT) scale > 3)
11764 scale = 1 << scale;
11768 disp = addr; /* displacement */
11770 /* Extract the integral value of scale. */
11773 if (!CONST_INT_P (scale_rtx))
11775 scale = INTVAL (scale_rtx);
11778 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
11779 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
11781 /* Avoid useless 0 displacement. */
11782 if (disp == const0_rtx && (base || index))
11785 /* Allow arg pointer and stack pointer as index if there is not scaling. */
11786 if (base_reg && index_reg && scale == 1
11787 && (index_reg == arg_pointer_rtx
11788 || index_reg == frame_pointer_rtx
11789 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
11792 tmp = base, base = index, index = tmp;
11793 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
11796 /* Special case: %ebp cannot be encoded as a base without a displacement.
11800 && (base_reg == hard_frame_pointer_rtx
11801 || base_reg == frame_pointer_rtx
11802 || base_reg == arg_pointer_rtx
11803 || (REG_P (base_reg)
11804 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
11805 || REGNO (base_reg) == R13_REG))))
11808 /* Special case: on K6, [%esi] makes the instruction vector decoded.
11809 Avoid this by transforming to [%esi+0].
11810 Reload calls address legitimization without cfun defined, so we need
11811 to test cfun for being non-NULL. */
11812 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
11813 && base_reg && !index_reg && !disp
11814 && REG_P (base_reg) && REGNO (base_reg) == SI_REG)
11817 /* Special case: encode reg+reg instead of reg*2. */
11818 if (!base && index && scale == 2)
11819 base = index, base_reg = index_reg, scale = 1;
11821 /* Special case: scaling cannot be encoded without base or displacement. */
11822 if (!base && !disp && index && scale != 1)
11826 out->index = index;
11828 out->scale = scale;
11834 /* Return cost of the memory address x.
11835 For i386, it is better to use a complex address than let gcc copy
11836 the address into a reg and make a new pseudo. But not if the address
11837 requires to two regs - that would mean more pseudos with longer
11840 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
11842 struct ix86_address parts;
11844 int ok = ix86_decompose_address (x, &parts);
11848 if (parts.base && GET_CODE (parts.base) == SUBREG)
11849 parts.base = SUBREG_REG (parts.base);
11850 if (parts.index && GET_CODE (parts.index) == SUBREG)
11851 parts.index = SUBREG_REG (parts.index);
11853 /* Attempt to minimize number of registers in the address. */
11855 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
11857 && (!REG_P (parts.index)
11858 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
11862 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
11864 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
11865 && parts.base != parts.index)
11868 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
11869 since it's predecode logic can't detect the length of instructions
11870 and it degenerates to vector decoded. Increase cost of such
11871 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
11872 to split such addresses or even refuse such addresses at all.
11874 Following addressing modes are affected:
11879 The first and last case may be avoidable by explicitly coding the zero in
11880 memory address, but I don't have AMD-K6 machine handy to check this
11884 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
11885 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
11886 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
11892 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
11893 this is used for to form addresses to local data when -fPIC is in
11897 darwin_local_data_pic (rtx disp)
11899 return (GET_CODE (disp) == UNSPEC
11900 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
11903 /* Determine if a given RTX is a valid constant. We already know this
11904 satisfies CONSTANT_P. */
11907 legitimate_constant_p (rtx x)
11909 switch (GET_CODE (x))
11914 if (GET_CODE (x) == PLUS)
11916 if (!CONST_INT_P (XEXP (x, 1)))
11921 if (TARGET_MACHO && darwin_local_data_pic (x))
11924 /* Only some unspecs are valid as "constants". */
11925 if (GET_CODE (x) == UNSPEC)
11926 switch (XINT (x, 1))
11929 case UNSPEC_GOTOFF:
11930 case UNSPEC_PLTOFF:
11931 return TARGET_64BIT;
11933 case UNSPEC_NTPOFF:
11934 x = XVECEXP (x, 0, 0);
11935 return (GET_CODE (x) == SYMBOL_REF
11936 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
11937 case UNSPEC_DTPOFF:
11938 x = XVECEXP (x, 0, 0);
11939 return (GET_CODE (x) == SYMBOL_REF
11940 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
11945 /* We must have drilled down to a symbol. */
11946 if (GET_CODE (x) == LABEL_REF)
11948 if (GET_CODE (x) != SYMBOL_REF)
11953 /* TLS symbols are never valid. */
11954 if (SYMBOL_REF_TLS_MODEL (x))
11957 /* DLLIMPORT symbols are never valid. */
11958 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
11959 && SYMBOL_REF_DLLIMPORT_P (x))
11963 /* mdynamic-no-pic */
11964 if (MACHO_DYNAMIC_NO_PIC_P)
11965 return machopic_symbol_defined_p (x);
11970 if (GET_MODE (x) == TImode
11971 && x != CONST0_RTX (TImode)
11977 if (!standard_sse_constant_p (x))
11984 /* Otherwise we handle everything else in the move patterns. */
11988 /* Determine if it's legal to put X into the constant pool. This
11989 is not possible for the address of thread-local symbols, which
11990 is checked above. */
11993 ix86_cannot_force_const_mem (rtx x)
11995 /* We can always put integral constants and vectors in memory. */
11996 switch (GET_CODE (x))
12006 return !legitimate_constant_p (x);
12010 /* Nonzero if the constant value X is a legitimate general operand
12011 when generating PIC code. It is given that flag_pic is on and
12012 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
12015 legitimate_pic_operand_p (rtx x)
12019 switch (GET_CODE (x))
12022 inner = XEXP (x, 0);
12023 if (GET_CODE (inner) == PLUS
12024 && CONST_INT_P (XEXP (inner, 1)))
12025 inner = XEXP (inner, 0);
12027 /* Only some unspecs are valid as "constants". */
12028 if (GET_CODE (inner) == UNSPEC)
12029 switch (XINT (inner, 1))
12032 case UNSPEC_GOTOFF:
12033 case UNSPEC_PLTOFF:
12034 return TARGET_64BIT;
12036 x = XVECEXP (inner, 0, 0);
12037 return (GET_CODE (x) == SYMBOL_REF
12038 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
12039 case UNSPEC_MACHOPIC_OFFSET:
12040 return legitimate_pic_address_disp_p (x);
12048 return legitimate_pic_address_disp_p (x);
12055 /* Determine if a given CONST RTX is a valid memory displacement
12059 legitimate_pic_address_disp_p (rtx disp)
12063 /* In 64bit mode we can allow direct addresses of symbols and labels
12064 when they are not dynamic symbols. */
12067 rtx op0 = disp, op1;
12069 switch (GET_CODE (disp))
12075 if (GET_CODE (XEXP (disp, 0)) != PLUS)
12077 op0 = XEXP (XEXP (disp, 0), 0);
12078 op1 = XEXP (XEXP (disp, 0), 1);
12079 if (!CONST_INT_P (op1)
12080 || INTVAL (op1) >= 16*1024*1024
12081 || INTVAL (op1) < -16*1024*1024)
12083 if (GET_CODE (op0) == LABEL_REF)
12085 if (GET_CODE (op0) != SYMBOL_REF)
12090 /* TLS references should always be enclosed in UNSPEC. */
12091 if (SYMBOL_REF_TLS_MODEL (op0))
12093 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
12094 && ix86_cmodel != CM_LARGE_PIC)
12102 if (GET_CODE (disp) != CONST)
12104 disp = XEXP (disp, 0);
12108 /* We are unsafe to allow PLUS expressions. This limit allowed distance
12109 of GOT tables. We should not need these anyway. */
12110 if (GET_CODE (disp) != UNSPEC
12111 || (XINT (disp, 1) != UNSPEC_GOTPCREL
12112 && XINT (disp, 1) != UNSPEC_GOTOFF
12113 && XINT (disp, 1) != UNSPEC_PCREL
12114 && XINT (disp, 1) != UNSPEC_PLTOFF))
12117 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
12118 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
12124 if (GET_CODE (disp) == PLUS)
12126 if (!CONST_INT_P (XEXP (disp, 1)))
12128 disp = XEXP (disp, 0);
12132 if (TARGET_MACHO && darwin_local_data_pic (disp))
12135 if (GET_CODE (disp) != UNSPEC)
12138 switch (XINT (disp, 1))
12143 /* We need to check for both symbols and labels because VxWorks loads
12144 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
12146 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
12147 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
12148 case UNSPEC_GOTOFF:
12149 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
12150 While ABI specify also 32bit relocation but we don't produce it in
12151 small PIC model at all. */
12152 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
12153 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
12155 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
12157 case UNSPEC_GOTTPOFF:
12158 case UNSPEC_GOTNTPOFF:
12159 case UNSPEC_INDNTPOFF:
12162 disp = XVECEXP (disp, 0, 0);
12163 return (GET_CODE (disp) == SYMBOL_REF
12164 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
12165 case UNSPEC_NTPOFF:
12166 disp = XVECEXP (disp, 0, 0);
12167 return (GET_CODE (disp) == SYMBOL_REF
12168 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
12169 case UNSPEC_DTPOFF:
12170 disp = XVECEXP (disp, 0, 0);
12171 return (GET_CODE (disp) == SYMBOL_REF
12172 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
12178 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
12179 replace the input X, or the original X if no replacement is called for.
12180 The output parameter *WIN is 1 if the calling macro should goto WIN,
12181 0 if it should not. */
12184 ix86_legitimize_reload_address (rtx x,
12185 enum machine_mode mode ATTRIBUTE_UNUSED,
12186 int opnum, int type,
12187 int ind_levels ATTRIBUTE_UNUSED)
12189 /* Reload can generate:
12191 (plus:DI (plus:DI (unspec:DI [(const_int 0 [0])] UNSPEC_TP)
12195 This RTX is rejected from ix86_legitimate_address_p due to
12196 non-strictness of base register 97. Following this rejection,
12197 reload pushes all three components into separate registers,
12198 creating invalid memory address RTX.
12200 Following code reloads only the invalid part of the
12201 memory address RTX. */
12203 if (GET_CODE (x) == PLUS
12204 && REG_P (XEXP (x, 1))
12205 && GET_CODE (XEXP (x, 0)) == PLUS
12206 && REG_P (XEXP (XEXP (x, 0), 1)))
12209 bool something_reloaded = false;
12211 base = XEXP (XEXP (x, 0), 1);
12212 if (!REG_OK_FOR_BASE_STRICT_P (base))
12214 push_reload (base, NULL_RTX, &XEXP (XEXP (x, 0), 1), NULL,
12215 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
12216 opnum, (enum reload_type)type);
12217 something_reloaded = true;
12220 index = XEXP (x, 1);
12221 if (!REG_OK_FOR_INDEX_STRICT_P (index))
12223 push_reload (index, NULL_RTX, &XEXP (x, 1), NULL,
12224 INDEX_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
12225 opnum, (enum reload_type)type);
12226 something_reloaded = true;
12229 gcc_assert (something_reloaded);
12236 /* Recognizes RTL expressions that are valid memory addresses for an
12237 instruction. The MODE argument is the machine mode for the MEM
12238 expression that wants to use this address.
12240 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
12241 convert common non-canonical forms to canonical form so that they will
12245 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
12246 rtx addr, bool strict)
12248 struct ix86_address parts;
12249 rtx base, index, disp;
12250 HOST_WIDE_INT scale;
12252 if (ix86_decompose_address (addr, &parts) <= 0)
12253 /* Decomposition failed. */
12257 index = parts.index;
12259 scale = parts.scale;
12261 /* Validate base register.
12263 Don't allow SUBREG's that span more than a word here. It can lead to spill
12264 failures when the base is one word out of a two word structure, which is
12265 represented internally as a DImode int. */
12273 else if (GET_CODE (base) == SUBREG
12274 && REG_P (SUBREG_REG (base))
12275 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
12277 reg = SUBREG_REG (base);
12279 /* Base is not a register. */
12282 if (GET_MODE (base) != Pmode)
12283 /* Base is not in Pmode. */
12286 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
12287 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
12288 /* Base is not valid. */
12292 /* Validate index register.
12294 Don't allow SUBREG's that span more than a word here -- same as above. */
12302 else if (GET_CODE (index) == SUBREG
12303 && REG_P (SUBREG_REG (index))
12304 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
12306 reg = SUBREG_REG (index);
12308 /* Index is not a register. */
12311 if (GET_MODE (index) != Pmode)
12312 /* Index is not in Pmode. */
12315 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
12316 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
12317 /* Index is not valid. */
12321 /* Validate scale factor. */
12325 /* Scale without index. */
12328 if (scale != 2 && scale != 4 && scale != 8)
12329 /* Scale is not a valid multiplier. */
12333 /* Validate displacement. */
12336 if (GET_CODE (disp) == CONST
12337 && GET_CODE (XEXP (disp, 0)) == UNSPEC
12338 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
12339 switch (XINT (XEXP (disp, 0), 1))
12341 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
12342 used. While ABI specify also 32bit relocations, we don't produce
12343 them at all and use IP relative instead. */
12345 case UNSPEC_GOTOFF:
12346 gcc_assert (flag_pic);
12348 goto is_legitimate_pic;
12350 /* 64bit address unspec. */
12353 case UNSPEC_GOTPCREL:
12355 gcc_assert (flag_pic);
12356 goto is_legitimate_pic;
12358 case UNSPEC_GOTTPOFF:
12359 case UNSPEC_GOTNTPOFF:
12360 case UNSPEC_INDNTPOFF:
12361 case UNSPEC_NTPOFF:
12362 case UNSPEC_DTPOFF:
12365 case UNSPEC_STACK_CHECK:
12366 gcc_assert (flag_split_stack);
12370 /* Invalid address unspec. */
12374 else if (SYMBOLIC_CONST (disp)
12378 && MACHOPIC_INDIRECT
12379 && !machopic_operand_p (disp)
12385 if (TARGET_64BIT && (index || base))
12387 /* foo@dtpoff(%rX) is ok. */
12388 if (GET_CODE (disp) != CONST
12389 || GET_CODE (XEXP (disp, 0)) != PLUS
12390 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
12391 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
12392 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
12393 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
12394 /* Non-constant pic memory reference. */
12397 else if ((!TARGET_MACHO || flag_pic)
12398 && ! legitimate_pic_address_disp_p (disp))
12399 /* Displacement is an invalid pic construct. */
12402 else if (MACHO_DYNAMIC_NO_PIC_P && !legitimate_constant_p (disp))
12403 /* displacment must be referenced via non_lazy_pointer */
12407 /* This code used to verify that a symbolic pic displacement
12408 includes the pic_offset_table_rtx register.
12410 While this is good idea, unfortunately these constructs may
12411 be created by "adds using lea" optimization for incorrect
12420 This code is nonsensical, but results in addressing
12421 GOT table with pic_offset_table_rtx base. We can't
12422 just refuse it easily, since it gets matched by
12423 "addsi3" pattern, that later gets split to lea in the
12424 case output register differs from input. While this
12425 can be handled by separate addsi pattern for this case
12426 that never results in lea, this seems to be easier and
12427 correct fix for crash to disable this test. */
12429 else if (GET_CODE (disp) != LABEL_REF
12430 && !CONST_INT_P (disp)
12431 && (GET_CODE (disp) != CONST
12432 || !legitimate_constant_p (disp))
12433 && (GET_CODE (disp) != SYMBOL_REF
12434 || !legitimate_constant_p (disp)))
12435 /* Displacement is not constant. */
12437 else if (TARGET_64BIT
12438 && !x86_64_immediate_operand (disp, VOIDmode))
12439 /* Displacement is out of range. */
12443 /* Everything looks valid. */
12447 /* Determine if a given RTX is a valid constant address. */
12450 constant_address_p (rtx x)
12452 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
12455 /* Return a unique alias set for the GOT. */
12457 static alias_set_type
12458 ix86_GOT_alias_set (void)
12460 static alias_set_type set = -1;
12462 set = new_alias_set ();
12466 /* Return a legitimate reference for ORIG (an address) using the
12467 register REG. If REG is 0, a new pseudo is generated.
12469 There are two types of references that must be handled:
12471 1. Global data references must load the address from the GOT, via
12472 the PIC reg. An insn is emitted to do this load, and the reg is
12475 2. Static data references, constant pool addresses, and code labels
12476 compute the address as an offset from the GOT, whose base is in
12477 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
12478 differentiate them from global data objects. The returned
12479 address is the PIC reg + an unspec constant.
12481 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
12482 reg also appears in the address. */
12485 legitimize_pic_address (rtx orig, rtx reg)
12488 rtx new_rtx = orig;
12492 if (TARGET_MACHO && !TARGET_64BIT)
12495 reg = gen_reg_rtx (Pmode);
12496 /* Use the generic Mach-O PIC machinery. */
12497 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
12501 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
12503 else if (TARGET_64BIT
12504 && ix86_cmodel != CM_SMALL_PIC
12505 && gotoff_operand (addr, Pmode))
12508 /* This symbol may be referenced via a displacement from the PIC
12509 base address (@GOTOFF). */
12511 if (reload_in_progress)
12512 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12513 if (GET_CODE (addr) == CONST)
12514 addr = XEXP (addr, 0);
12515 if (GET_CODE (addr) == PLUS)
12517 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
12519 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
12522 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
12523 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12525 tmpreg = gen_reg_rtx (Pmode);
12528 emit_move_insn (tmpreg, new_rtx);
12532 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
12533 tmpreg, 1, OPTAB_DIRECT);
12536 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
12538 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
12540 /* This symbol may be referenced via a displacement from the PIC
12541 base address (@GOTOFF). */
12543 if (reload_in_progress)
12544 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12545 if (GET_CODE (addr) == CONST)
12546 addr = XEXP (addr, 0);
12547 if (GET_CODE (addr) == PLUS)
12549 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
12551 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
12554 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
12555 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12556 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12560 emit_move_insn (reg, new_rtx);
12564 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
12565 /* We can't use @GOTOFF for text labels on VxWorks;
12566 see gotoff_operand. */
12567 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
12569 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
12571 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
12572 return legitimize_dllimport_symbol (addr, true);
12573 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
12574 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
12575 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
12577 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
12578 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
12582 /* For x64 PE-COFF there is no GOT table. So we use address
12584 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
12586 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_PCREL);
12587 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12590 reg = gen_reg_rtx (Pmode);
12591 emit_move_insn (reg, new_rtx);
12594 else if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
12596 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
12597 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12598 new_rtx = gen_const_mem (Pmode, new_rtx);
12599 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
12602 reg = gen_reg_rtx (Pmode);
12603 /* Use directly gen_movsi, otherwise the address is loaded
12604 into register for CSE. We don't want to CSE this addresses,
12605 instead we CSE addresses from the GOT table, so skip this. */
12606 emit_insn (gen_movsi (reg, new_rtx));
12611 /* This symbol must be referenced via a load from the
12612 Global Offset Table (@GOT). */
12614 if (reload_in_progress)
12615 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12616 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
12617 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12619 new_rtx = force_reg (Pmode, new_rtx);
12620 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12621 new_rtx = gen_const_mem (Pmode, new_rtx);
12622 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
12625 reg = gen_reg_rtx (Pmode);
12626 emit_move_insn (reg, new_rtx);
12632 if (CONST_INT_P (addr)
12633 && !x86_64_immediate_operand (addr, VOIDmode))
12637 emit_move_insn (reg, addr);
12641 new_rtx = force_reg (Pmode, addr);
12643 else if (GET_CODE (addr) == CONST)
12645 addr = XEXP (addr, 0);
12647 /* We must match stuff we generate before. Assume the only
12648 unspecs that can get here are ours. Not that we could do
12649 anything with them anyway.... */
12650 if (GET_CODE (addr) == UNSPEC
12651 || (GET_CODE (addr) == PLUS
12652 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
12654 gcc_assert (GET_CODE (addr) == PLUS);
12656 if (GET_CODE (addr) == PLUS)
12658 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
12660 /* Check first to see if this is a constant offset from a @GOTOFF
12661 symbol reference. */
12662 if (gotoff_operand (op0, Pmode)
12663 && CONST_INT_P (op1))
12667 if (reload_in_progress)
12668 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12669 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
12671 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
12672 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12673 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12677 emit_move_insn (reg, new_rtx);
12683 if (INTVAL (op1) < -16*1024*1024
12684 || INTVAL (op1) >= 16*1024*1024)
12686 if (!x86_64_immediate_operand (op1, Pmode))
12687 op1 = force_reg (Pmode, op1);
12688 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
12694 base = legitimize_pic_address (XEXP (addr, 0), reg);
12695 new_rtx = legitimize_pic_address (XEXP (addr, 1),
12696 base == reg ? NULL_RTX : reg);
12698 if (CONST_INT_P (new_rtx))
12699 new_rtx = plus_constant (base, INTVAL (new_rtx));
12702 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
12704 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
12705 new_rtx = XEXP (new_rtx, 1);
12707 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
12715 /* Load the thread pointer. If TO_REG is true, force it into a register. */
12718 get_thread_pointer (int to_reg)
12722 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
12726 reg = gen_reg_rtx (Pmode);
12727 insn = gen_rtx_SET (VOIDmode, reg, tp);
12728 insn = emit_insn (insn);
12733 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
12734 false if we expect this to be used for a memory address and true if
12735 we expect to load the address into a register. */
12738 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
12740 rtx dest, base, off, pic, tp;
12745 case TLS_MODEL_GLOBAL_DYNAMIC:
12746 dest = gen_reg_rtx (Pmode);
12747 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
12749 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
12751 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
12754 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
12755 insns = get_insns ();
12758 RTL_CONST_CALL_P (insns) = 1;
12759 emit_libcall_block (insns, dest, rax, x);
12761 else if (TARGET_64BIT && TARGET_GNU2_TLS)
12762 emit_insn (gen_tls_global_dynamic_64 (dest, x));
12764 emit_insn (gen_tls_global_dynamic_32 (dest, x));
12766 if (TARGET_GNU2_TLS)
12768 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
12770 set_unique_reg_note (get_last_insn (), REG_EQUAL, x);
12774 case TLS_MODEL_LOCAL_DYNAMIC:
12775 base = gen_reg_rtx (Pmode);
12776 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
12778 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
12780 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
12783 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
12784 insns = get_insns ();
12787 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
12788 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
12789 RTL_CONST_CALL_P (insns) = 1;
12790 emit_libcall_block (insns, base, rax, note);
12792 else if (TARGET_64BIT && TARGET_GNU2_TLS)
12793 emit_insn (gen_tls_local_dynamic_base_64 (base));
12795 emit_insn (gen_tls_local_dynamic_base_32 (base));
12797 if (TARGET_GNU2_TLS)
12799 rtx x = ix86_tls_module_base ();
12801 set_unique_reg_note (get_last_insn (), REG_EQUAL,
12802 gen_rtx_MINUS (Pmode, x, tp));
12805 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
12806 off = gen_rtx_CONST (Pmode, off);
12808 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
12810 if (TARGET_GNU2_TLS)
12812 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
12814 set_unique_reg_note (get_last_insn (), REG_EQUAL, x);
12819 case TLS_MODEL_INITIAL_EXEC:
12822 if (TARGET_SUN_TLS)
12824 /* The Sun linker took the AMD64 TLS spec literally
12825 and can only handle %rax as destination of the
12826 initial executable code sequence. */
12828 dest = gen_reg_rtx (Pmode);
12829 emit_insn (gen_tls_initial_exec_64_sun (dest, x));
12834 type = UNSPEC_GOTNTPOFF;
12838 if (reload_in_progress)
12839 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12840 pic = pic_offset_table_rtx;
12841 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
12843 else if (!TARGET_ANY_GNU_TLS)
12845 pic = gen_reg_rtx (Pmode);
12846 emit_insn (gen_set_got (pic));
12847 type = UNSPEC_GOTTPOFF;
12852 type = UNSPEC_INDNTPOFF;
12855 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
12856 off = gen_rtx_CONST (Pmode, off);
12858 off = gen_rtx_PLUS (Pmode, pic, off);
12859 off = gen_const_mem (Pmode, off);
12860 set_mem_alias_set (off, ix86_GOT_alias_set ());
12862 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12864 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
12865 off = force_reg (Pmode, off);
12866 return gen_rtx_PLUS (Pmode, base, off);
12870 base = get_thread_pointer (true);
12871 dest = gen_reg_rtx (Pmode);
12872 emit_insn (gen_subsi3 (dest, base, off));
12876 case TLS_MODEL_LOCAL_EXEC:
12877 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
12878 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12879 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
12880 off = gen_rtx_CONST (Pmode, off);
12882 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12884 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
12885 return gen_rtx_PLUS (Pmode, base, off);
12889 base = get_thread_pointer (true);
12890 dest = gen_reg_rtx (Pmode);
12891 emit_insn (gen_subsi3 (dest, base, off));
12896 gcc_unreachable ();
12902 /* Create or return the unique __imp_DECL dllimport symbol corresponding
12905 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
12906 htab_t dllimport_map;
12909 get_dllimport_decl (tree decl)
12911 struct tree_map *h, in;
12914 const char *prefix;
12915 size_t namelen, prefixlen;
12920 if (!dllimport_map)
12921 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
12923 in.hash = htab_hash_pointer (decl);
12924 in.base.from = decl;
12925 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
12926 h = (struct tree_map *) *loc;
12930 *loc = h = ggc_alloc_tree_map ();
12932 h->base.from = decl;
12933 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
12934 VAR_DECL, NULL, ptr_type_node);
12935 DECL_ARTIFICIAL (to) = 1;
12936 DECL_IGNORED_P (to) = 1;
12937 DECL_EXTERNAL (to) = 1;
12938 TREE_READONLY (to) = 1;
12940 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
12941 name = targetm.strip_name_encoding (name);
12942 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
12943 ? "*__imp_" : "*__imp__";
12944 namelen = strlen (name);
12945 prefixlen = strlen (prefix);
12946 imp_name = (char *) alloca (namelen + prefixlen + 1);
12947 memcpy (imp_name, prefix, prefixlen);
12948 memcpy (imp_name + prefixlen, name, namelen + 1);
12950 name = ggc_alloc_string (imp_name, namelen + prefixlen);
12951 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
12952 SET_SYMBOL_REF_DECL (rtl, to);
12953 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
12955 rtl = gen_const_mem (Pmode, rtl);
12956 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
12958 SET_DECL_RTL (to, rtl);
12959 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
12964 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
12965 true if we require the result be a register. */
12968 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
12973 gcc_assert (SYMBOL_REF_DECL (symbol));
12974 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
12976 x = DECL_RTL (imp_decl);
12978 x = force_reg (Pmode, x);
12982 /* Try machine-dependent ways of modifying an illegitimate address
12983 to be legitimate. If we find one, return the new, valid address.
12984 This macro is used in only one place: `memory_address' in explow.c.
12986 OLDX is the address as it was before break_out_memory_refs was called.
12987 In some cases it is useful to look at this to decide what needs to be done.
12989 It is always safe for this macro to do nothing. It exists to recognize
12990 opportunities to optimize the output.
12992 For the 80386, we handle X+REG by loading X into a register R and
12993 using R+REG. R will go in a general reg and indexing will be used.
12994 However, if REG is a broken-out memory address or multiplication,
12995 nothing needs to be done because REG can certainly go in a general reg.
12997 When -fpic is used, special handling is needed for symbolic references.
12998 See comments by legitimize_pic_address in i386.c for details. */
13001 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
13002 enum machine_mode mode)
13007 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
13009 return legitimize_tls_address (x, (enum tls_model) log, false);
13010 if (GET_CODE (x) == CONST
13011 && GET_CODE (XEXP (x, 0)) == PLUS
13012 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
13013 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
13015 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
13016 (enum tls_model) log, false);
13017 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
13020 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
13022 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
13023 return legitimize_dllimport_symbol (x, true);
13024 if (GET_CODE (x) == CONST
13025 && GET_CODE (XEXP (x, 0)) == PLUS
13026 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
13027 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
13029 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
13030 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
13034 if (flag_pic && SYMBOLIC_CONST (x))
13035 return legitimize_pic_address (x, 0);
13038 if (MACHO_DYNAMIC_NO_PIC_P && SYMBOLIC_CONST (x))
13039 return machopic_indirect_data_reference (x, 0);
13042 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
13043 if (GET_CODE (x) == ASHIFT
13044 && CONST_INT_P (XEXP (x, 1))
13045 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
13048 log = INTVAL (XEXP (x, 1));
13049 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
13050 GEN_INT (1 << log));
13053 if (GET_CODE (x) == PLUS)
13055 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
13057 if (GET_CODE (XEXP (x, 0)) == ASHIFT
13058 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
13059 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
13062 log = INTVAL (XEXP (XEXP (x, 0), 1));
13063 XEXP (x, 0) = gen_rtx_MULT (Pmode,
13064 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
13065 GEN_INT (1 << log));
13068 if (GET_CODE (XEXP (x, 1)) == ASHIFT
13069 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
13070 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
13073 log = INTVAL (XEXP (XEXP (x, 1), 1));
13074 XEXP (x, 1) = gen_rtx_MULT (Pmode,
13075 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
13076 GEN_INT (1 << log));
13079 /* Put multiply first if it isn't already. */
13080 if (GET_CODE (XEXP (x, 1)) == MULT)
13082 rtx tmp = XEXP (x, 0);
13083 XEXP (x, 0) = XEXP (x, 1);
13088 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
13089 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
13090 created by virtual register instantiation, register elimination, and
13091 similar optimizations. */
13092 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
13095 x = gen_rtx_PLUS (Pmode,
13096 gen_rtx_PLUS (Pmode, XEXP (x, 0),
13097 XEXP (XEXP (x, 1), 0)),
13098 XEXP (XEXP (x, 1), 1));
13102 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
13103 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
13104 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
13105 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
13106 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
13107 && CONSTANT_P (XEXP (x, 1)))
13110 rtx other = NULL_RTX;
13112 if (CONST_INT_P (XEXP (x, 1)))
13114 constant = XEXP (x, 1);
13115 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
13117 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
13119 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
13120 other = XEXP (x, 1);
13128 x = gen_rtx_PLUS (Pmode,
13129 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
13130 XEXP (XEXP (XEXP (x, 0), 1), 0)),
13131 plus_constant (other, INTVAL (constant)));
13135 if (changed && ix86_legitimate_address_p (mode, x, false))
13138 if (GET_CODE (XEXP (x, 0)) == MULT)
13141 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
13144 if (GET_CODE (XEXP (x, 1)) == MULT)
13147 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
13151 && REG_P (XEXP (x, 1))
13152 && REG_P (XEXP (x, 0)))
13155 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
13158 x = legitimize_pic_address (x, 0);
13161 if (changed && ix86_legitimate_address_p (mode, x, false))
13164 if (REG_P (XEXP (x, 0)))
13166 rtx temp = gen_reg_rtx (Pmode);
13167 rtx val = force_operand (XEXP (x, 1), temp);
13169 emit_move_insn (temp, val);
13171 XEXP (x, 1) = temp;
13175 else if (REG_P (XEXP (x, 1)))
13177 rtx temp = gen_reg_rtx (Pmode);
13178 rtx val = force_operand (XEXP (x, 0), temp);
13180 emit_move_insn (temp, val);
13182 XEXP (x, 0) = temp;
13190 /* Print an integer constant expression in assembler syntax. Addition
13191 and subtraction are the only arithmetic that may appear in these
13192 expressions. FILE is the stdio stream to write to, X is the rtx, and
13193 CODE is the operand print code from the output string. */
13196 output_pic_addr_const (FILE *file, rtx x, int code)
13200 switch (GET_CODE (x))
13203 gcc_assert (flag_pic);
13208 if (TARGET_64BIT || ! TARGET_MACHO_BRANCH_ISLANDS)
13209 output_addr_const (file, x);
13212 const char *name = XSTR (x, 0);
13214 /* Mark the decl as referenced so that cgraph will
13215 output the function. */
13216 if (SYMBOL_REF_DECL (x))
13217 mark_decl_referenced (SYMBOL_REF_DECL (x));
13220 if (MACHOPIC_INDIRECT
13221 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
13222 name = machopic_indirection_name (x, /*stub_p=*/true);
13224 assemble_name (file, name);
13226 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
13227 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
13228 fputs ("@PLT", file);
13235 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
13236 assemble_name (asm_out_file, buf);
13240 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
13244 /* This used to output parentheses around the expression,
13245 but that does not work on the 386 (either ATT or BSD assembler). */
13246 output_pic_addr_const (file, XEXP (x, 0), code);
13250 if (GET_MODE (x) == VOIDmode)
13252 /* We can use %d if the number is <32 bits and positive. */
13253 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
13254 fprintf (file, "0x%lx%08lx",
13255 (unsigned long) CONST_DOUBLE_HIGH (x),
13256 (unsigned long) CONST_DOUBLE_LOW (x));
13258 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
13261 /* We can't handle floating point constants;
13262 TARGET_PRINT_OPERAND must handle them. */
13263 output_operand_lossage ("floating constant misused");
13267 /* Some assemblers need integer constants to appear first. */
13268 if (CONST_INT_P (XEXP (x, 0)))
13270 output_pic_addr_const (file, XEXP (x, 0), code);
13272 output_pic_addr_const (file, XEXP (x, 1), code);
13276 gcc_assert (CONST_INT_P (XEXP (x, 1)));
13277 output_pic_addr_const (file, XEXP (x, 1), code);
13279 output_pic_addr_const (file, XEXP (x, 0), code);
13285 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
13286 output_pic_addr_const (file, XEXP (x, 0), code);
13288 output_pic_addr_const (file, XEXP (x, 1), code);
13290 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
13294 if (XINT (x, 1) == UNSPEC_STACK_CHECK)
13296 bool f = i386_asm_output_addr_const_extra (file, x);
13301 gcc_assert (XVECLEN (x, 0) == 1);
13302 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
13303 switch (XINT (x, 1))
13306 fputs ("@GOT", file);
13308 case UNSPEC_GOTOFF:
13309 fputs ("@GOTOFF", file);
13311 case UNSPEC_PLTOFF:
13312 fputs ("@PLTOFF", file);
13315 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
13316 "(%rip)" : "[rip]", file);
13318 case UNSPEC_GOTPCREL:
13319 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
13320 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
13322 case UNSPEC_GOTTPOFF:
13323 /* FIXME: This might be @TPOFF in Sun ld too. */
13324 fputs ("@gottpoff", file);
13327 fputs ("@tpoff", file);
13329 case UNSPEC_NTPOFF:
13331 fputs ("@tpoff", file);
13333 fputs ("@ntpoff", file);
13335 case UNSPEC_DTPOFF:
13336 fputs ("@dtpoff", file);
13338 case UNSPEC_GOTNTPOFF:
13340 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
13341 "@gottpoff(%rip)": "@gottpoff[rip]", file);
13343 fputs ("@gotntpoff", file);
13345 case UNSPEC_INDNTPOFF:
13346 fputs ("@indntpoff", file);
13349 case UNSPEC_MACHOPIC_OFFSET:
13351 machopic_output_function_base_name (file);
13355 output_operand_lossage ("invalid UNSPEC as operand");
13361 output_operand_lossage ("invalid expression as operand");
13365 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
13366 We need to emit DTP-relative relocations. */
13368 static void ATTRIBUTE_UNUSED
13369 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
13371 fputs (ASM_LONG, file);
13372 output_addr_const (file, x);
13373 fputs ("@dtpoff", file);
13379 fputs (", 0", file);
13382 gcc_unreachable ();
13386 /* Return true if X is a representation of the PIC register. This copes
13387 with calls from ix86_find_base_term, where the register might have
13388 been replaced by a cselib value. */
13391 ix86_pic_register_p (rtx x)
13393 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
13394 return (pic_offset_table_rtx
13395 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
13397 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
13400 /* Helper function for ix86_delegitimize_address.
13401 Attempt to delegitimize TLS local-exec accesses. */
13404 ix86_delegitimize_tls_address (rtx orig_x)
13406 rtx x = orig_x, unspec;
13407 struct ix86_address addr;
13409 if (!TARGET_TLS_DIRECT_SEG_REFS)
13413 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
13415 if (ix86_decompose_address (x, &addr) == 0
13416 || addr.seg != (TARGET_64BIT ? SEG_FS : SEG_GS)
13417 || addr.disp == NULL_RTX
13418 || GET_CODE (addr.disp) != CONST)
13420 unspec = XEXP (addr.disp, 0);
13421 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
13422 unspec = XEXP (unspec, 0);
13423 if (GET_CODE (unspec) != UNSPEC || XINT (unspec, 1) != UNSPEC_NTPOFF)
13425 x = XVECEXP (unspec, 0, 0);
13426 gcc_assert (GET_CODE (x) == SYMBOL_REF);
13427 if (unspec != XEXP (addr.disp, 0))
13428 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.disp, 0), 1));
13431 rtx idx = addr.index;
13432 if (addr.scale != 1)
13433 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
13434 x = gen_rtx_PLUS (Pmode, idx, x);
13437 x = gen_rtx_PLUS (Pmode, addr.base, x);
13438 if (MEM_P (orig_x))
13439 x = replace_equiv_address_nv (orig_x, x);
13443 /* In the name of slightly smaller debug output, and to cater to
13444 general assembler lossage, recognize PIC+GOTOFF and turn it back
13445 into a direct symbol reference.
13447 On Darwin, this is necessary to avoid a crash, because Darwin
13448 has a different PIC label for each routine but the DWARF debugging
13449 information is not associated with any particular routine, so it's
13450 necessary to remove references to the PIC label from RTL stored by
13451 the DWARF output code. */
13454 ix86_delegitimize_address (rtx x)
13456 rtx orig_x = delegitimize_mem_from_attrs (x);
13457 /* addend is NULL or some rtx if x is something+GOTOFF where
13458 something doesn't include the PIC register. */
13459 rtx addend = NULL_RTX;
13460 /* reg_addend is NULL or a multiple of some register. */
13461 rtx reg_addend = NULL_RTX;
13462 /* const_addend is NULL or a const_int. */
13463 rtx const_addend = NULL_RTX;
13464 /* This is the result, or NULL. */
13465 rtx result = NULL_RTX;
13474 if (GET_CODE (x) != CONST
13475 || GET_CODE (XEXP (x, 0)) != UNSPEC
13476 || (XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
13477 && XINT (XEXP (x, 0), 1) != UNSPEC_PCREL)
13478 || !MEM_P (orig_x))
13479 return ix86_delegitimize_tls_address (orig_x);
13480 x = XVECEXP (XEXP (x, 0), 0, 0);
13481 if (GET_MODE (orig_x) != Pmode)
13483 x = simplify_gen_subreg (GET_MODE (orig_x), x, Pmode, 0);
13490 if (GET_CODE (x) != PLUS
13491 || GET_CODE (XEXP (x, 1)) != CONST)
13492 return ix86_delegitimize_tls_address (orig_x);
13494 if (ix86_pic_register_p (XEXP (x, 0)))
13495 /* %ebx + GOT/GOTOFF */
13497 else if (GET_CODE (XEXP (x, 0)) == PLUS)
13499 /* %ebx + %reg * scale + GOT/GOTOFF */
13500 reg_addend = XEXP (x, 0);
13501 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
13502 reg_addend = XEXP (reg_addend, 1);
13503 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
13504 reg_addend = XEXP (reg_addend, 0);
13507 reg_addend = NULL_RTX;
13508 addend = XEXP (x, 0);
13512 addend = XEXP (x, 0);
13514 x = XEXP (XEXP (x, 1), 0);
13515 if (GET_CODE (x) == PLUS
13516 && CONST_INT_P (XEXP (x, 1)))
13518 const_addend = XEXP (x, 1);
13522 if (GET_CODE (x) == UNSPEC
13523 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
13524 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
13525 result = XVECEXP (x, 0, 0);
13527 if (TARGET_MACHO && darwin_local_data_pic (x)
13528 && !MEM_P (orig_x))
13529 result = XVECEXP (x, 0, 0);
13532 return ix86_delegitimize_tls_address (orig_x);
13535 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
13537 result = gen_rtx_PLUS (Pmode, reg_addend, result);
13540 /* If the rest of original X doesn't involve the PIC register, add
13541 addend and subtract pic_offset_table_rtx. This can happen e.g.
13543 leal (%ebx, %ecx, 4), %ecx
13545 movl foo@GOTOFF(%ecx), %edx
13546 in which case we return (%ecx - %ebx) + foo. */
13547 if (pic_offset_table_rtx)
13548 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
13549 pic_offset_table_rtx),
13554 if (GET_MODE (orig_x) != Pmode && MEM_P (orig_x))
13556 result = simplify_gen_subreg (GET_MODE (orig_x), result, Pmode, 0);
13557 if (result == NULL_RTX)
13563 /* If X is a machine specific address (i.e. a symbol or label being
13564 referenced as a displacement from the GOT implemented using an
13565 UNSPEC), then return the base term. Otherwise return X. */
13568 ix86_find_base_term (rtx x)
13574 if (GET_CODE (x) != CONST)
13576 term = XEXP (x, 0);
13577 if (GET_CODE (term) == PLUS
13578 && (CONST_INT_P (XEXP (term, 1))
13579 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
13580 term = XEXP (term, 0);
13581 if (GET_CODE (term) != UNSPEC
13582 || (XINT (term, 1) != UNSPEC_GOTPCREL
13583 && XINT (term, 1) != UNSPEC_PCREL))
13586 return XVECEXP (term, 0, 0);
13589 return ix86_delegitimize_address (x);
13593 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
13594 int fp, FILE *file)
13596 const char *suffix;
13598 if (mode == CCFPmode || mode == CCFPUmode)
13600 code = ix86_fp_compare_code_to_integer (code);
13604 code = reverse_condition (code);
13655 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
13659 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
13660 Those same assemblers have the same but opposite lossage on cmov. */
13661 if (mode == CCmode)
13662 suffix = fp ? "nbe" : "a";
13663 else if (mode == CCCmode)
13666 gcc_unreachable ();
13682 gcc_unreachable ();
13686 gcc_assert (mode == CCmode || mode == CCCmode);
13703 gcc_unreachable ();
13707 /* ??? As above. */
13708 gcc_assert (mode == CCmode || mode == CCCmode);
13709 suffix = fp ? "nb" : "ae";
13712 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
13716 /* ??? As above. */
13717 if (mode == CCmode)
13719 else if (mode == CCCmode)
13720 suffix = fp ? "nb" : "ae";
13722 gcc_unreachable ();
13725 suffix = fp ? "u" : "p";
13728 suffix = fp ? "nu" : "np";
13731 gcc_unreachable ();
13733 fputs (suffix, file);
13736 /* Print the name of register X to FILE based on its machine mode and number.
13737 If CODE is 'w', pretend the mode is HImode.
13738 If CODE is 'b', pretend the mode is QImode.
13739 If CODE is 'k', pretend the mode is SImode.
13740 If CODE is 'q', pretend the mode is DImode.
13741 If CODE is 'x', pretend the mode is V4SFmode.
13742 If CODE is 't', pretend the mode is V8SFmode.
13743 If CODE is 'h', pretend the reg is the 'high' byte register.
13744 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
13745 If CODE is 'd', duplicate the operand for AVX instruction.
13749 print_reg (rtx x, int code, FILE *file)
13752 bool duplicated = code == 'd' && TARGET_AVX;
13754 gcc_assert (x == pc_rtx
13755 || (REGNO (x) != ARG_POINTER_REGNUM
13756 && REGNO (x) != FRAME_POINTER_REGNUM
13757 && REGNO (x) != FLAGS_REG
13758 && REGNO (x) != FPSR_REG
13759 && REGNO (x) != FPCR_REG));
13761 if (ASSEMBLER_DIALECT == ASM_ATT)
13766 gcc_assert (TARGET_64BIT);
13767 fputs ("rip", file);
13771 if (code == 'w' || MMX_REG_P (x))
13773 else if (code == 'b')
13775 else if (code == 'k')
13777 else if (code == 'q')
13779 else if (code == 'y')
13781 else if (code == 'h')
13783 else if (code == 'x')
13785 else if (code == 't')
13788 code = GET_MODE_SIZE (GET_MODE (x));
13790 /* Irritatingly, AMD extended registers use different naming convention
13791 from the normal registers. */
13792 if (REX_INT_REG_P (x))
13794 gcc_assert (TARGET_64BIT);
13798 error ("extended registers have no high halves");
13801 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
13804 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
13807 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
13810 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
13813 error ("unsupported operand size for extended register");
13823 if (STACK_TOP_P (x))
13832 if (! ANY_FP_REG_P (x))
13833 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
13838 reg = hi_reg_name[REGNO (x)];
13841 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
13843 reg = qi_reg_name[REGNO (x)];
13846 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
13848 reg = qi_high_reg_name[REGNO (x)];
13853 gcc_assert (!duplicated);
13855 fputs (hi_reg_name[REGNO (x)] + 1, file);
13860 gcc_unreachable ();
13866 if (ASSEMBLER_DIALECT == ASM_ATT)
13867 fprintf (file, ", %%%s", reg);
13869 fprintf (file, ", %s", reg);
13873 /* Locate some local-dynamic symbol still in use by this function
13874 so that we can print its name in some tls_local_dynamic_base
13878 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
13882 if (GET_CODE (x) == SYMBOL_REF
13883 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
13885 cfun->machine->some_ld_name = XSTR (x, 0);
13892 static const char *
13893 get_some_local_dynamic_name (void)
13897 if (cfun->machine->some_ld_name)
13898 return cfun->machine->some_ld_name;
13900 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
13901 if (NONDEBUG_INSN_P (insn)
13902 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
13903 return cfun->machine->some_ld_name;
13908 /* Meaning of CODE:
13909 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
13910 C -- print opcode suffix for set/cmov insn.
13911 c -- like C, but print reversed condition
13912 F,f -- likewise, but for floating-point.
13913 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
13915 R -- print the prefix for register names.
13916 z -- print the opcode suffix for the size of the current operand.
13917 Z -- likewise, with special suffixes for x87 instructions.
13918 * -- print a star (in certain assembler syntax)
13919 A -- print an absolute memory reference.
13920 w -- print the operand as if it's a "word" (HImode) even if it isn't.
13921 s -- print a shift double count, followed by the assemblers argument
13923 b -- print the QImode name of the register for the indicated operand.
13924 %b0 would print %al if operands[0] is reg 0.
13925 w -- likewise, print the HImode name of the register.
13926 k -- likewise, print the SImode name of the register.
13927 q -- likewise, print the DImode name of the register.
13928 x -- likewise, print the V4SFmode name of the register.
13929 t -- likewise, print the V8SFmode name of the register.
13930 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
13931 y -- print "st(0)" instead of "st" as a register.
13932 d -- print duplicated register operand for AVX instruction.
13933 D -- print condition for SSE cmp instruction.
13934 P -- if PIC, print an @PLT suffix.
13935 X -- don't print any sort of PIC '@' suffix for a symbol.
13936 & -- print some in-use local-dynamic symbol name.
13937 H -- print a memory address offset by 8; used for sse high-parts
13938 Y -- print condition for XOP pcom* instruction.
13939 + -- print a branch hint as 'cs' or 'ds' prefix
13940 ; -- print a semicolon (after prefixes due to bug in older gas).
13941 @ -- print a segment register of thread base pointer load
13945 ix86_print_operand (FILE *file, rtx x, int code)
13952 if (ASSEMBLER_DIALECT == ASM_ATT)
13958 const char *name = get_some_local_dynamic_name ();
13960 output_operand_lossage ("'%%&' used without any "
13961 "local dynamic TLS references");
13963 assemble_name (file, name);
13968 switch (ASSEMBLER_DIALECT)
13975 /* Intel syntax. For absolute addresses, registers should not
13976 be surrounded by braces. */
13980 ix86_print_operand (file, x, 0);
13987 gcc_unreachable ();
13990 ix86_print_operand (file, x, 0);
13995 if (ASSEMBLER_DIALECT == ASM_ATT)
14000 if (ASSEMBLER_DIALECT == ASM_ATT)
14005 if (ASSEMBLER_DIALECT == ASM_ATT)
14010 if (ASSEMBLER_DIALECT == ASM_ATT)
14015 if (ASSEMBLER_DIALECT == ASM_ATT)
14020 if (ASSEMBLER_DIALECT == ASM_ATT)
14025 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
14027 /* Opcodes don't get size suffixes if using Intel opcodes. */
14028 if (ASSEMBLER_DIALECT == ASM_INTEL)
14031 switch (GET_MODE_SIZE (GET_MODE (x)))
14050 output_operand_lossage
14051 ("invalid operand size for operand code '%c'", code);
14056 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
14058 (0, "non-integer operand used with operand code '%c'", code);
14062 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
14063 if (ASSEMBLER_DIALECT == ASM_INTEL)
14066 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
14068 switch (GET_MODE_SIZE (GET_MODE (x)))
14071 #ifdef HAVE_AS_IX86_FILDS
14081 #ifdef HAVE_AS_IX86_FILDQ
14084 fputs ("ll", file);
14092 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
14094 /* 387 opcodes don't get size suffixes
14095 if the operands are registers. */
14096 if (STACK_REG_P (x))
14099 switch (GET_MODE_SIZE (GET_MODE (x)))
14120 output_operand_lossage
14121 ("invalid operand type used with operand code '%c'", code);
14125 output_operand_lossage
14126 ("invalid operand size for operand code '%c'", code);
14143 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
14145 ix86_print_operand (file, x, 0);
14146 fputs (", ", file);
14151 /* Little bit of braindamage here. The SSE compare instructions
14152 does use completely different names for the comparisons that the
14153 fp conditional moves. */
14156 switch (GET_CODE (x))
14159 fputs ("eq", file);
14162 fputs ("eq_us", file);
14165 fputs ("lt", file);
14168 fputs ("nge", file);
14171 fputs ("le", file);
14174 fputs ("ngt", file);
14177 fputs ("unord", file);
14180 fputs ("neq", file);
14183 fputs ("neq_oq", file);
14186 fputs ("ge", file);
14189 fputs ("nlt", file);
14192 fputs ("gt", file);
14195 fputs ("nle", file);
14198 fputs ("ord", file);
14201 output_operand_lossage ("operand is not a condition code, "
14202 "invalid operand code 'D'");
14208 switch (GET_CODE (x))
14212 fputs ("eq", file);
14216 fputs ("lt", file);
14220 fputs ("le", file);
14223 fputs ("unord", file);
14227 fputs ("neq", file);
14231 fputs ("nlt", file);
14235 fputs ("nle", file);
14238 fputs ("ord", file);
14241 output_operand_lossage ("operand is not a condition code, "
14242 "invalid operand code 'D'");
14248 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14249 if (ASSEMBLER_DIALECT == ASM_ATT)
14251 switch (GET_MODE (x))
14253 case HImode: putc ('w', file); break;
14255 case SFmode: putc ('l', file); break;
14257 case DFmode: putc ('q', file); break;
14258 default: gcc_unreachable ();
14265 if (!COMPARISON_P (x))
14267 output_operand_lossage ("operand is neither a constant nor a "
14268 "condition code, invalid operand code "
14272 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
14275 if (!COMPARISON_P (x))
14277 output_operand_lossage ("operand is neither a constant nor a "
14278 "condition code, invalid operand code "
14282 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14283 if (ASSEMBLER_DIALECT == ASM_ATT)
14286 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
14289 /* Like above, but reverse condition */
14291 /* Check to see if argument to %c is really a constant
14292 and not a condition code which needs to be reversed. */
14293 if (!COMPARISON_P (x))
14295 output_operand_lossage ("operand is neither a constant nor a "
14296 "condition code, invalid operand "
14300 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
14303 if (!COMPARISON_P (x))
14305 output_operand_lossage ("operand is neither a constant nor a "
14306 "condition code, invalid operand "
14310 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14311 if (ASSEMBLER_DIALECT == ASM_ATT)
14314 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
14318 if (!offsettable_memref_p (x))
14320 output_operand_lossage ("operand is not an offsettable memory "
14321 "reference, invalid operand "
14325 /* It doesn't actually matter what mode we use here, as we're
14326 only going to use this for printing. */
14327 x = adjust_address_nv (x, DImode, 8);
14335 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
14338 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
14341 int pred_val = INTVAL (XEXP (x, 0));
14343 if (pred_val < REG_BR_PROB_BASE * 45 / 100
14344 || pred_val > REG_BR_PROB_BASE * 55 / 100)
14346 int taken = pred_val > REG_BR_PROB_BASE / 2;
14347 int cputaken = final_forward_branch_p (current_output_insn) == 0;
14349 /* Emit hints only in the case default branch prediction
14350 heuristics would fail. */
14351 if (taken != cputaken)
14353 /* We use 3e (DS) prefix for taken branches and
14354 2e (CS) prefix for not taken branches. */
14356 fputs ("ds ; ", file);
14358 fputs ("cs ; ", file);
14366 switch (GET_CODE (x))
14369 fputs ("neq", file);
14372 fputs ("eq", file);
14376 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
14380 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
14384 fputs ("le", file);
14388 fputs ("lt", file);
14391 fputs ("unord", file);
14394 fputs ("ord", file);
14397 fputs ("ueq", file);
14400 fputs ("nlt", file);
14403 fputs ("nle", file);
14406 fputs ("ule", file);
14409 fputs ("ult", file);
14412 fputs ("une", file);
14415 output_operand_lossage ("operand is not a condition code, "
14416 "invalid operand code 'Y'");
14422 #ifndef HAVE_AS_IX86_REP_LOCK_PREFIX
14428 if (ASSEMBLER_DIALECT == ASM_ATT)
14431 /* The kernel uses a different segment register for performance
14432 reasons; a system call would not have to trash the userspace
14433 segment register, which would be expensive. */
14434 if (TARGET_64BIT && ix86_cmodel != CM_KERNEL)
14435 fputs ("fs", file);
14437 fputs ("gs", file);
14441 output_operand_lossage ("invalid operand code '%c'", code);
14446 print_reg (x, code, file);
14448 else if (MEM_P (x))
14450 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
14451 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
14452 && GET_MODE (x) != BLKmode)
14455 switch (GET_MODE_SIZE (GET_MODE (x)))
14457 case 1: size = "BYTE"; break;
14458 case 2: size = "WORD"; break;
14459 case 4: size = "DWORD"; break;
14460 case 8: size = "QWORD"; break;
14461 case 12: size = "TBYTE"; break;
14463 if (GET_MODE (x) == XFmode)
14468 case 32: size = "YMMWORD"; break;
14470 gcc_unreachable ();
14473 /* Check for explicit size override (codes 'b', 'w' and 'k') */
14476 else if (code == 'w')
14478 else if (code == 'k')
14481 fputs (size, file);
14482 fputs (" PTR ", file);
14486 /* Avoid (%rip) for call operands. */
14487 if (CONSTANT_ADDRESS_P (x) && code == 'P'
14488 && !CONST_INT_P (x))
14489 output_addr_const (file, x);
14490 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
14491 output_operand_lossage ("invalid constraints for operand");
14493 output_address (x);
14496 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
14501 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
14502 REAL_VALUE_TO_TARGET_SINGLE (r, l);
14504 if (ASSEMBLER_DIALECT == ASM_ATT)
14506 /* Sign extend 32bit SFmode immediate to 8 bytes. */
14508 fprintf (file, "0x%08llx", (unsigned long long) (int) l);
14510 fprintf (file, "0x%08x", (unsigned int) l);
14513 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
14518 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
14519 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
14521 if (ASSEMBLER_DIALECT == ASM_ATT)
14523 fprintf (file, "0x%lx%08lx", l[1] & 0xffffffff, l[0] & 0xffffffff);
14526 /* These float cases don't actually occur as immediate operands. */
14527 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == XFmode)
14531 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
14532 fputs (dstr, file);
14537 /* We have patterns that allow zero sets of memory, for instance.
14538 In 64-bit mode, we should probably support all 8-byte vectors,
14539 since we can in fact encode that into an immediate. */
14540 if (GET_CODE (x) == CONST_VECTOR)
14542 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
14548 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
14550 if (ASSEMBLER_DIALECT == ASM_ATT)
14553 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
14554 || GET_CODE (x) == LABEL_REF)
14556 if (ASSEMBLER_DIALECT == ASM_ATT)
14559 fputs ("OFFSET FLAT:", file);
14562 if (CONST_INT_P (x))
14563 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
14564 else if (flag_pic || MACHOPIC_INDIRECT)
14565 output_pic_addr_const (file, x, code);
14567 output_addr_const (file, x);
14572 ix86_print_operand_punct_valid_p (unsigned char code)
14574 return (code == '@' || code == '*' || code == '+'
14575 || code == '&' || code == ';');
14578 /* Print a memory operand whose address is ADDR. */
14581 ix86_print_operand_address (FILE *file, rtx addr)
14583 struct ix86_address parts;
14584 rtx base, index, disp;
14586 int ok = ix86_decompose_address (addr, &parts);
14591 index = parts.index;
14593 scale = parts.scale;
14601 if (ASSEMBLER_DIALECT == ASM_ATT)
14603 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
14606 gcc_unreachable ();
14609 /* Use one byte shorter RIP relative addressing for 64bit mode. */
14610 if (TARGET_64BIT && !base && !index)
14614 if (GET_CODE (disp) == CONST
14615 && GET_CODE (XEXP (disp, 0)) == PLUS
14616 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
14617 symbol = XEXP (XEXP (disp, 0), 0);
14619 if (GET_CODE (symbol) == LABEL_REF
14620 || (GET_CODE (symbol) == SYMBOL_REF
14621 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
14624 if (!base && !index)
14626 /* Displacement only requires special attention. */
14628 if (CONST_INT_P (disp))
14630 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
14631 fputs ("ds:", file);
14632 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
14635 output_pic_addr_const (file, disp, 0);
14637 output_addr_const (file, disp);
14641 if (ASSEMBLER_DIALECT == ASM_ATT)
14646 output_pic_addr_const (file, disp, 0);
14647 else if (GET_CODE (disp) == LABEL_REF)
14648 output_asm_label (disp);
14650 output_addr_const (file, disp);
14655 print_reg (base, 0, file);
14659 print_reg (index, 0, file);
14661 fprintf (file, ",%d", scale);
14667 rtx offset = NULL_RTX;
14671 /* Pull out the offset of a symbol; print any symbol itself. */
14672 if (GET_CODE (disp) == CONST
14673 && GET_CODE (XEXP (disp, 0)) == PLUS
14674 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
14676 offset = XEXP (XEXP (disp, 0), 1);
14677 disp = gen_rtx_CONST (VOIDmode,
14678 XEXP (XEXP (disp, 0), 0));
14682 output_pic_addr_const (file, disp, 0);
14683 else if (GET_CODE (disp) == LABEL_REF)
14684 output_asm_label (disp);
14685 else if (CONST_INT_P (disp))
14688 output_addr_const (file, disp);
14694 print_reg (base, 0, file);
14697 if (INTVAL (offset) >= 0)
14699 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
14703 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
14710 print_reg (index, 0, file);
14712 fprintf (file, "*%d", scale);
14719 /* Implementation of TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
14722 i386_asm_output_addr_const_extra (FILE *file, rtx x)
14726 if (GET_CODE (x) != UNSPEC)
14729 op = XVECEXP (x, 0, 0);
14730 switch (XINT (x, 1))
14732 case UNSPEC_GOTTPOFF:
14733 output_addr_const (file, op);
14734 /* FIXME: This might be @TPOFF in Sun ld. */
14735 fputs ("@gottpoff", file);
14738 output_addr_const (file, op);
14739 fputs ("@tpoff", file);
14741 case UNSPEC_NTPOFF:
14742 output_addr_const (file, op);
14744 fputs ("@tpoff", file);
14746 fputs ("@ntpoff", file);
14748 case UNSPEC_DTPOFF:
14749 output_addr_const (file, op);
14750 fputs ("@dtpoff", file);
14752 case UNSPEC_GOTNTPOFF:
14753 output_addr_const (file, op);
14755 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
14756 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
14758 fputs ("@gotntpoff", file);
14760 case UNSPEC_INDNTPOFF:
14761 output_addr_const (file, op);
14762 fputs ("@indntpoff", file);
14765 case UNSPEC_MACHOPIC_OFFSET:
14766 output_addr_const (file, op);
14768 machopic_output_function_base_name (file);
14772 case UNSPEC_STACK_CHECK:
14776 gcc_assert (flag_split_stack);
14778 #ifdef TARGET_THREAD_SPLIT_STACK_OFFSET
14779 offset = TARGET_THREAD_SPLIT_STACK_OFFSET;
14781 gcc_unreachable ();
14784 fprintf (file, "%s:%d", TARGET_64BIT ? "%fs" : "%gs", offset);
14795 /* Split one or more double-mode RTL references into pairs of half-mode
14796 references. The RTL can be REG, offsettable MEM, integer constant, or
14797 CONST_DOUBLE. "operands" is a pointer to an array of double-mode RTLs to
14798 split and "num" is its length. lo_half and hi_half are output arrays
14799 that parallel "operands". */
14802 split_double_mode (enum machine_mode mode, rtx operands[],
14803 int num, rtx lo_half[], rtx hi_half[])
14805 enum machine_mode half_mode;
14811 half_mode = DImode;
14814 half_mode = SImode;
14817 gcc_unreachable ();
14820 byte = GET_MODE_SIZE (half_mode);
14824 rtx op = operands[num];
14826 /* simplify_subreg refuse to split volatile memory addresses,
14827 but we still have to handle it. */
14830 lo_half[num] = adjust_address (op, half_mode, 0);
14831 hi_half[num] = adjust_address (op, half_mode, byte);
14835 lo_half[num] = simplify_gen_subreg (half_mode, op,
14836 GET_MODE (op) == VOIDmode
14837 ? mode : GET_MODE (op), 0);
14838 hi_half[num] = simplify_gen_subreg (half_mode, op,
14839 GET_MODE (op) == VOIDmode
14840 ? mode : GET_MODE (op), byte);
14845 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
14846 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
14847 is the expression of the binary operation. The output may either be
14848 emitted here, or returned to the caller, like all output_* functions.
14850 There is no guarantee that the operands are the same mode, as they
14851 might be within FLOAT or FLOAT_EXTEND expressions. */
14853 #ifndef SYSV386_COMPAT
14854 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
14855 wants to fix the assemblers because that causes incompatibility
14856 with gcc. No-one wants to fix gcc because that causes
14857 incompatibility with assemblers... You can use the option of
14858 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
14859 #define SYSV386_COMPAT 1
14863 output_387_binary_op (rtx insn, rtx *operands)
14865 static char buf[40];
14868 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
14870 #ifdef ENABLE_CHECKING
14871 /* Even if we do not want to check the inputs, this documents input
14872 constraints. Which helps in understanding the following code. */
14873 if (STACK_REG_P (operands[0])
14874 && ((REG_P (operands[1])
14875 && REGNO (operands[0]) == REGNO (operands[1])
14876 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
14877 || (REG_P (operands[2])
14878 && REGNO (operands[0]) == REGNO (operands[2])
14879 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
14880 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
14883 gcc_assert (is_sse);
14886 switch (GET_CODE (operands[3]))
14889 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14890 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14898 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14899 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14907 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14908 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14916 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14917 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14925 gcc_unreachable ();
14932 strcpy (buf, ssep);
14933 if (GET_MODE (operands[0]) == SFmode)
14934 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
14936 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
14940 strcpy (buf, ssep + 1);
14941 if (GET_MODE (operands[0]) == SFmode)
14942 strcat (buf, "ss\t{%2, %0|%0, %2}");
14944 strcat (buf, "sd\t{%2, %0|%0, %2}");
14950 switch (GET_CODE (operands[3]))
14954 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
14956 rtx temp = operands[2];
14957 operands[2] = operands[1];
14958 operands[1] = temp;
14961 /* know operands[0] == operands[1]. */
14963 if (MEM_P (operands[2]))
14969 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
14971 if (STACK_TOP_P (operands[0]))
14972 /* How is it that we are storing to a dead operand[2]?
14973 Well, presumably operands[1] is dead too. We can't
14974 store the result to st(0) as st(0) gets popped on this
14975 instruction. Instead store to operands[2] (which I
14976 think has to be st(1)). st(1) will be popped later.
14977 gcc <= 2.8.1 didn't have this check and generated
14978 assembly code that the Unixware assembler rejected. */
14979 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
14981 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
14985 if (STACK_TOP_P (operands[0]))
14986 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
14988 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
14993 if (MEM_P (operands[1]))
14999 if (MEM_P (operands[2]))
15005 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
15008 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
15009 derived assemblers, confusingly reverse the direction of
15010 the operation for fsub{r} and fdiv{r} when the
15011 destination register is not st(0). The Intel assembler
15012 doesn't have this brain damage. Read !SYSV386_COMPAT to
15013 figure out what the hardware really does. */
15014 if (STACK_TOP_P (operands[0]))
15015 p = "{p\t%0, %2|rp\t%2, %0}";
15017 p = "{rp\t%2, %0|p\t%0, %2}";
15019 if (STACK_TOP_P (operands[0]))
15020 /* As above for fmul/fadd, we can't store to st(0). */
15021 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
15023 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
15028 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
15031 if (STACK_TOP_P (operands[0]))
15032 p = "{rp\t%0, %1|p\t%1, %0}";
15034 p = "{p\t%1, %0|rp\t%0, %1}";
15036 if (STACK_TOP_P (operands[0]))
15037 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
15039 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
15044 if (STACK_TOP_P (operands[0]))
15046 if (STACK_TOP_P (operands[1]))
15047 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
15049 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
15052 else if (STACK_TOP_P (operands[1]))
15055 p = "{\t%1, %0|r\t%0, %1}";
15057 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
15063 p = "{r\t%2, %0|\t%0, %2}";
15065 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
15071 gcc_unreachable ();
15078 /* Return needed mode for entity in optimize_mode_switching pass. */
15081 ix86_mode_needed (int entity, rtx insn)
15083 enum attr_i387_cw mode;
15085 /* The mode UNINITIALIZED is used to store control word after a
15086 function call or ASM pattern. The mode ANY specify that function
15087 has no requirements on the control word and make no changes in the
15088 bits we are interested in. */
15091 || (NONJUMP_INSN_P (insn)
15092 && (asm_noperands (PATTERN (insn)) >= 0
15093 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
15094 return I387_CW_UNINITIALIZED;
15096 if (recog_memoized (insn) < 0)
15097 return I387_CW_ANY;
15099 mode = get_attr_i387_cw (insn);
15104 if (mode == I387_CW_TRUNC)
15109 if (mode == I387_CW_FLOOR)
15114 if (mode == I387_CW_CEIL)
15119 if (mode == I387_CW_MASK_PM)
15124 gcc_unreachable ();
15127 return I387_CW_ANY;
15130 /* Output code to initialize control word copies used by trunc?f?i and
15131 rounding patterns. CURRENT_MODE is set to current control word,
15132 while NEW_MODE is set to new control word. */
15135 emit_i387_cw_initialization (int mode)
15137 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
15140 enum ix86_stack_slot slot;
15142 rtx reg = gen_reg_rtx (HImode);
15144 emit_insn (gen_x86_fnstcw_1 (stored_mode));
15145 emit_move_insn (reg, copy_rtx (stored_mode));
15147 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
15148 || optimize_function_for_size_p (cfun))
15152 case I387_CW_TRUNC:
15153 /* round toward zero (truncate) */
15154 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
15155 slot = SLOT_CW_TRUNC;
15158 case I387_CW_FLOOR:
15159 /* round down toward -oo */
15160 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
15161 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
15162 slot = SLOT_CW_FLOOR;
15166 /* round up toward +oo */
15167 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
15168 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
15169 slot = SLOT_CW_CEIL;
15172 case I387_CW_MASK_PM:
15173 /* mask precision exception for nearbyint() */
15174 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
15175 slot = SLOT_CW_MASK_PM;
15179 gcc_unreachable ();
15186 case I387_CW_TRUNC:
15187 /* round toward zero (truncate) */
15188 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
15189 slot = SLOT_CW_TRUNC;
15192 case I387_CW_FLOOR:
15193 /* round down toward -oo */
15194 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
15195 slot = SLOT_CW_FLOOR;
15199 /* round up toward +oo */
15200 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
15201 slot = SLOT_CW_CEIL;
15204 case I387_CW_MASK_PM:
15205 /* mask precision exception for nearbyint() */
15206 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
15207 slot = SLOT_CW_MASK_PM;
15211 gcc_unreachable ();
15215 gcc_assert (slot < MAX_386_STACK_LOCALS);
15217 new_mode = assign_386_stack_local (HImode, slot);
15218 emit_move_insn (new_mode, reg);
15221 /* Output code for INSN to convert a float to a signed int. OPERANDS
15222 are the insn operands. The output may be [HSD]Imode and the input
15223 operand may be [SDX]Fmode. */
15226 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
15228 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
15229 int dimode_p = GET_MODE (operands[0]) == DImode;
15230 int round_mode = get_attr_i387_cw (insn);
15232 /* Jump through a hoop or two for DImode, since the hardware has no
15233 non-popping instruction. We used to do this a different way, but
15234 that was somewhat fragile and broke with post-reload splitters. */
15235 if ((dimode_p || fisttp) && !stack_top_dies)
15236 output_asm_insn ("fld\t%y1", operands);
15238 gcc_assert (STACK_TOP_P (operands[1]));
15239 gcc_assert (MEM_P (operands[0]));
15240 gcc_assert (GET_MODE (operands[1]) != TFmode);
15243 output_asm_insn ("fisttp%Z0\t%0", operands);
15246 if (round_mode != I387_CW_ANY)
15247 output_asm_insn ("fldcw\t%3", operands);
15248 if (stack_top_dies || dimode_p)
15249 output_asm_insn ("fistp%Z0\t%0", operands);
15251 output_asm_insn ("fist%Z0\t%0", operands);
15252 if (round_mode != I387_CW_ANY)
15253 output_asm_insn ("fldcw\t%2", operands);
15259 /* Output code for x87 ffreep insn. The OPNO argument, which may only
15260 have the values zero or one, indicates the ffreep insn's operand
15261 from the OPERANDS array. */
15263 static const char *
15264 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
15266 if (TARGET_USE_FFREEP)
15267 #ifdef HAVE_AS_IX86_FFREEP
15268 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
15271 static char retval[32];
15272 int regno = REGNO (operands[opno]);
15274 gcc_assert (FP_REGNO_P (regno));
15276 regno -= FIRST_STACK_REG;
15278 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
15283 return opno ? "fstp\t%y1" : "fstp\t%y0";
15287 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
15288 should be used. UNORDERED_P is true when fucom should be used. */
15291 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
15293 int stack_top_dies;
15294 rtx cmp_op0, cmp_op1;
15295 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
15299 cmp_op0 = operands[0];
15300 cmp_op1 = operands[1];
15304 cmp_op0 = operands[1];
15305 cmp_op1 = operands[2];
15310 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
15311 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
15312 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
15313 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
15315 if (GET_MODE (operands[0]) == SFmode)
15317 return &ucomiss[TARGET_AVX ? 0 : 1];
15319 return &comiss[TARGET_AVX ? 0 : 1];
15322 return &ucomisd[TARGET_AVX ? 0 : 1];
15324 return &comisd[TARGET_AVX ? 0 : 1];
15327 gcc_assert (STACK_TOP_P (cmp_op0));
15329 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
15331 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
15333 if (stack_top_dies)
15335 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
15336 return output_387_ffreep (operands, 1);
15339 return "ftst\n\tfnstsw\t%0";
15342 if (STACK_REG_P (cmp_op1)
15344 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
15345 && REGNO (cmp_op1) != FIRST_STACK_REG)
15347 /* If both the top of the 387 stack dies, and the other operand
15348 is also a stack register that dies, then this must be a
15349 `fcompp' float compare */
15353 /* There is no double popping fcomi variant. Fortunately,
15354 eflags is immune from the fstp's cc clobbering. */
15356 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
15358 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
15359 return output_387_ffreep (operands, 0);
15364 return "fucompp\n\tfnstsw\t%0";
15366 return "fcompp\n\tfnstsw\t%0";
15371 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
15373 static const char * const alt[16] =
15375 "fcom%Z2\t%y2\n\tfnstsw\t%0",
15376 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
15377 "fucom%Z2\t%y2\n\tfnstsw\t%0",
15378 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
15380 "ficom%Z2\t%y2\n\tfnstsw\t%0",
15381 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
15385 "fcomi\t{%y1, %0|%0, %y1}",
15386 "fcomip\t{%y1, %0|%0, %y1}",
15387 "fucomi\t{%y1, %0|%0, %y1}",
15388 "fucomip\t{%y1, %0|%0, %y1}",
15399 mask = eflags_p << 3;
15400 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
15401 mask |= unordered_p << 1;
15402 mask |= stack_top_dies;
15404 gcc_assert (mask < 16);
15413 ix86_output_addr_vec_elt (FILE *file, int value)
15415 const char *directive = ASM_LONG;
15419 directive = ASM_QUAD;
15421 gcc_assert (!TARGET_64BIT);
15424 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
15428 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
15430 const char *directive = ASM_LONG;
15433 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
15434 directive = ASM_QUAD;
15436 gcc_assert (!TARGET_64BIT);
15438 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
15439 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
15440 fprintf (file, "%s%s%d-%s%d\n",
15441 directive, LPREFIX, value, LPREFIX, rel);
15442 else if (HAVE_AS_GOTOFF_IN_DATA)
15443 fprintf (file, ASM_LONG "%s%d@GOTOFF\n", LPREFIX, value);
15445 else if (TARGET_MACHO)
15447 fprintf (file, ASM_LONG "%s%d-", LPREFIX, value);
15448 machopic_output_function_base_name (file);
15453 asm_fprintf (file, ASM_LONG "%U%s+[.-%s%d]\n",
15454 GOT_SYMBOL_NAME, LPREFIX, value);
15457 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
15461 ix86_expand_clear (rtx dest)
15465 /* We play register width games, which are only valid after reload. */
15466 gcc_assert (reload_completed);
15468 /* Avoid HImode and its attendant prefix byte. */
15469 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
15470 dest = gen_rtx_REG (SImode, REGNO (dest));
15471 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
15473 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
15474 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
15476 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
15477 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
15483 /* X is an unchanging MEM. If it is a constant pool reference, return
15484 the constant pool rtx, else NULL. */
15487 maybe_get_pool_constant (rtx x)
15489 x = ix86_delegitimize_address (XEXP (x, 0));
15491 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
15492 return get_pool_constant (x);
15498 ix86_expand_move (enum machine_mode mode, rtx operands[])
15501 enum tls_model model;
15506 if (GET_CODE (op1) == SYMBOL_REF)
15508 model = SYMBOL_REF_TLS_MODEL (op1);
15511 op1 = legitimize_tls_address (op1, model, true);
15512 op1 = force_operand (op1, op0);
15516 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
15517 && SYMBOL_REF_DLLIMPORT_P (op1))
15518 op1 = legitimize_dllimport_symbol (op1, false);
15520 else if (GET_CODE (op1) == CONST
15521 && GET_CODE (XEXP (op1, 0)) == PLUS
15522 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
15524 rtx addend = XEXP (XEXP (op1, 0), 1);
15525 rtx symbol = XEXP (XEXP (op1, 0), 0);
15528 model = SYMBOL_REF_TLS_MODEL (symbol);
15530 tmp = legitimize_tls_address (symbol, model, true);
15531 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
15532 && SYMBOL_REF_DLLIMPORT_P (symbol))
15533 tmp = legitimize_dllimport_symbol (symbol, true);
15537 tmp = force_operand (tmp, NULL);
15538 op1 = expand_simple_binop (Pmode, PLUS, tmp, addend,
15539 op0, 1, OPTAB_DIRECT);
15545 if ((flag_pic || MACHOPIC_INDIRECT)
15546 && mode == Pmode && symbolic_operand (op1, Pmode))
15548 if (TARGET_MACHO && !TARGET_64BIT)
15551 /* dynamic-no-pic */
15552 if (MACHOPIC_INDIRECT)
15554 rtx temp = ((reload_in_progress
15555 || ((op0 && REG_P (op0))
15557 ? op0 : gen_reg_rtx (Pmode));
15558 op1 = machopic_indirect_data_reference (op1, temp);
15560 op1 = machopic_legitimize_pic_address (op1, mode,
15561 temp == op1 ? 0 : temp);
15563 if (op0 != op1 && GET_CODE (op0) != MEM)
15565 rtx insn = gen_rtx_SET (VOIDmode, op0, op1);
15569 if (GET_CODE (op0) == MEM)
15570 op1 = force_reg (Pmode, op1);
15574 if (GET_CODE (temp) != REG)
15575 temp = gen_reg_rtx (Pmode);
15576 temp = legitimize_pic_address (op1, temp);
15581 /* dynamic-no-pic */
15587 op1 = force_reg (Pmode, op1);
15588 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
15590 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
15591 op1 = legitimize_pic_address (op1, reg);
15600 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
15601 || !push_operand (op0, mode))
15603 op1 = force_reg (mode, op1);
15605 if (push_operand (op0, mode)
15606 && ! general_no_elim_operand (op1, mode))
15607 op1 = copy_to_mode_reg (mode, op1);
15609 /* Force large constants in 64bit compilation into register
15610 to get them CSEed. */
15611 if (can_create_pseudo_p ()
15612 && (mode == DImode) && TARGET_64BIT
15613 && immediate_operand (op1, mode)
15614 && !x86_64_zext_immediate_operand (op1, VOIDmode)
15615 && !register_operand (op0, mode)
15617 op1 = copy_to_mode_reg (mode, op1);
15619 if (can_create_pseudo_p ()
15620 && FLOAT_MODE_P (mode)
15621 && GET_CODE (op1) == CONST_DOUBLE)
15623 /* If we are loading a floating point constant to a register,
15624 force the value to memory now, since we'll get better code
15625 out the back end. */
15627 op1 = validize_mem (force_const_mem (mode, op1));
15628 if (!register_operand (op0, mode))
15630 rtx temp = gen_reg_rtx (mode);
15631 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
15632 emit_move_insn (op0, temp);
15638 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
15642 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
15644 rtx op0 = operands[0], op1 = operands[1];
15645 unsigned int align = GET_MODE_ALIGNMENT (mode);
15647 /* Force constants other than zero into memory. We do not know how
15648 the instructions used to build constants modify the upper 64 bits
15649 of the register, once we have that information we may be able
15650 to handle some of them more efficiently. */
15651 if (can_create_pseudo_p ()
15652 && register_operand (op0, mode)
15653 && (CONSTANT_P (op1)
15654 || (GET_CODE (op1) == SUBREG
15655 && CONSTANT_P (SUBREG_REG (op1))))
15656 && !standard_sse_constant_p (op1))
15657 op1 = validize_mem (force_const_mem (mode, op1));
15659 /* We need to check memory alignment for SSE mode since attribute
15660 can make operands unaligned. */
15661 if (can_create_pseudo_p ()
15662 && SSE_REG_MODE_P (mode)
15663 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
15664 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
15668 /* ix86_expand_vector_move_misalign() does not like constants ... */
15669 if (CONSTANT_P (op1)
15670 || (GET_CODE (op1) == SUBREG
15671 && CONSTANT_P (SUBREG_REG (op1))))
15672 op1 = validize_mem (force_const_mem (mode, op1));
15674 /* ... nor both arguments in memory. */
15675 if (!register_operand (op0, mode)
15676 && !register_operand (op1, mode))
15677 op1 = force_reg (mode, op1);
15679 tmp[0] = op0; tmp[1] = op1;
15680 ix86_expand_vector_move_misalign (mode, tmp);
15684 /* Make operand1 a register if it isn't already. */
15685 if (can_create_pseudo_p ()
15686 && !register_operand (op0, mode)
15687 && !register_operand (op1, mode))
15689 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
15693 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
15696 /* Split 32-byte AVX unaligned load and store if needed. */
15699 ix86_avx256_split_vector_move_misalign (rtx op0, rtx op1)
15702 rtx (*extract) (rtx, rtx, rtx);
15703 rtx (*move_unaligned) (rtx, rtx);
15704 enum machine_mode mode;
15706 switch (GET_MODE (op0))
15709 gcc_unreachable ();
15711 extract = gen_avx_vextractf128v32qi;
15712 move_unaligned = gen_avx_movdqu256;
15716 extract = gen_avx_vextractf128v8sf;
15717 move_unaligned = gen_avx_movups256;
15721 extract = gen_avx_vextractf128v4df;
15722 move_unaligned = gen_avx_movupd256;
15727 if (MEM_P (op1) && TARGET_AVX256_SPLIT_UNALIGNED_LOAD)
15729 rtx r = gen_reg_rtx (mode);
15730 m = adjust_address (op1, mode, 0);
15731 emit_move_insn (r, m);
15732 m = adjust_address (op1, mode, 16);
15733 r = gen_rtx_VEC_CONCAT (GET_MODE (op0), r, m);
15734 emit_move_insn (op0, r);
15736 else if (MEM_P (op0) && TARGET_AVX256_SPLIT_UNALIGNED_STORE)
15738 m = adjust_address (op0, mode, 0);
15739 emit_insn (extract (m, op1, const0_rtx));
15740 m = adjust_address (op0, mode, 16);
15741 emit_insn (extract (m, op1, const1_rtx));
15744 emit_insn (move_unaligned (op0, op1));
15747 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
15748 straight to ix86_expand_vector_move. */
15749 /* Code generation for scalar reg-reg moves of single and double precision data:
15750 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
15754 if (x86_sse_partial_reg_dependency == true)
15759 Code generation for scalar loads of double precision data:
15760 if (x86_sse_split_regs == true)
15761 movlpd mem, reg (gas syntax)
15765 Code generation for unaligned packed loads of single precision data
15766 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
15767 if (x86_sse_unaligned_move_optimal)
15770 if (x86_sse_partial_reg_dependency == true)
15782 Code generation for unaligned packed loads of double precision data
15783 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
15784 if (x86_sse_unaligned_move_optimal)
15787 if (x86_sse_split_regs == true)
15800 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
15809 switch (GET_MODE_CLASS (mode))
15811 case MODE_VECTOR_INT:
15813 switch (GET_MODE_SIZE (mode))
15816 /* If we're optimizing for size, movups is the smallest. */
15817 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15819 op0 = gen_lowpart (V4SFmode, op0);
15820 op1 = gen_lowpart (V4SFmode, op1);
15821 emit_insn (gen_avx_movups (op0, op1));
15824 op0 = gen_lowpart (V16QImode, op0);
15825 op1 = gen_lowpart (V16QImode, op1);
15826 emit_insn (gen_avx_movdqu (op0, op1));
15829 op0 = gen_lowpart (V32QImode, op0);
15830 op1 = gen_lowpart (V32QImode, op1);
15831 ix86_avx256_split_vector_move_misalign (op0, op1);
15834 gcc_unreachable ();
15837 case MODE_VECTOR_FLOAT:
15838 op0 = gen_lowpart (mode, op0);
15839 op1 = gen_lowpart (mode, op1);
15844 emit_insn (gen_avx_movups (op0, op1));
15847 ix86_avx256_split_vector_move_misalign (op0, op1);
15850 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15852 op0 = gen_lowpart (V4SFmode, op0);
15853 op1 = gen_lowpart (V4SFmode, op1);
15854 emit_insn (gen_avx_movups (op0, op1));
15857 emit_insn (gen_avx_movupd (op0, op1));
15860 ix86_avx256_split_vector_move_misalign (op0, op1);
15863 gcc_unreachable ();
15868 gcc_unreachable ();
15876 /* If we're optimizing for size, movups is the smallest. */
15877 if (optimize_insn_for_size_p ()
15878 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15880 op0 = gen_lowpart (V4SFmode, op0);
15881 op1 = gen_lowpart (V4SFmode, op1);
15882 emit_insn (gen_sse_movups (op0, op1));
15886 /* ??? If we have typed data, then it would appear that using
15887 movdqu is the only way to get unaligned data loaded with
15889 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
15891 op0 = gen_lowpart (V16QImode, op0);
15892 op1 = gen_lowpart (V16QImode, op1);
15893 emit_insn (gen_sse2_movdqu (op0, op1));
15897 if (TARGET_SSE2 && mode == V2DFmode)
15901 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
15903 op0 = gen_lowpart (V2DFmode, op0);
15904 op1 = gen_lowpart (V2DFmode, op1);
15905 emit_insn (gen_sse2_movupd (op0, op1));
15909 /* When SSE registers are split into halves, we can avoid
15910 writing to the top half twice. */
15911 if (TARGET_SSE_SPLIT_REGS)
15913 emit_clobber (op0);
15918 /* ??? Not sure about the best option for the Intel chips.
15919 The following would seem to satisfy; the register is
15920 entirely cleared, breaking the dependency chain. We
15921 then store to the upper half, with a dependency depth
15922 of one. A rumor has it that Intel recommends two movsd
15923 followed by an unpacklpd, but this is unconfirmed. And
15924 given that the dependency depth of the unpacklpd would
15925 still be one, I'm not sure why this would be better. */
15926 zero = CONST0_RTX (V2DFmode);
15929 m = adjust_address (op1, DFmode, 0);
15930 emit_insn (gen_sse2_loadlpd (op0, zero, m));
15931 m = adjust_address (op1, DFmode, 8);
15932 emit_insn (gen_sse2_loadhpd (op0, op0, m));
15936 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
15938 op0 = gen_lowpart (V4SFmode, op0);
15939 op1 = gen_lowpart (V4SFmode, op1);
15940 emit_insn (gen_sse_movups (op0, op1));
15944 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
15945 emit_move_insn (op0, CONST0_RTX (mode));
15947 emit_clobber (op0);
15949 if (mode != V4SFmode)
15950 op0 = gen_lowpart (V4SFmode, op0);
15951 m = adjust_address (op1, V2SFmode, 0);
15952 emit_insn (gen_sse_loadlps (op0, op0, m));
15953 m = adjust_address (op1, V2SFmode, 8);
15954 emit_insn (gen_sse_loadhps (op0, op0, m));
15957 else if (MEM_P (op0))
15959 /* If we're optimizing for size, movups is the smallest. */
15960 if (optimize_insn_for_size_p ()
15961 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15963 op0 = gen_lowpart (V4SFmode, op0);
15964 op1 = gen_lowpart (V4SFmode, op1);
15965 emit_insn (gen_sse_movups (op0, op1));
15969 /* ??? Similar to above, only less clear because of quote
15970 typeless stores unquote. */
15971 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
15972 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
15974 op0 = gen_lowpart (V16QImode, op0);
15975 op1 = gen_lowpart (V16QImode, op1);
15976 emit_insn (gen_sse2_movdqu (op0, op1));
15980 if (TARGET_SSE2 && mode == V2DFmode)
15982 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
15984 op0 = gen_lowpart (V2DFmode, op0);
15985 op1 = gen_lowpart (V2DFmode, op1);
15986 emit_insn (gen_sse2_movupd (op0, op1));
15990 m = adjust_address (op0, DFmode, 0);
15991 emit_insn (gen_sse2_storelpd (m, op1));
15992 m = adjust_address (op0, DFmode, 8);
15993 emit_insn (gen_sse2_storehpd (m, op1));
15998 if (mode != V4SFmode)
15999 op1 = gen_lowpart (V4SFmode, op1);
16001 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
16003 op0 = gen_lowpart (V4SFmode, op0);
16004 emit_insn (gen_sse_movups (op0, op1));
16008 m = adjust_address (op0, V2SFmode, 0);
16009 emit_insn (gen_sse_storelps (m, op1));
16010 m = adjust_address (op0, V2SFmode, 8);
16011 emit_insn (gen_sse_storehps (m, op1));
16016 gcc_unreachable ();
16019 /* Expand a push in MODE. This is some mode for which we do not support
16020 proper push instructions, at least from the registers that we expect
16021 the value to live in. */
16024 ix86_expand_push (enum machine_mode mode, rtx x)
16028 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
16029 GEN_INT (-GET_MODE_SIZE (mode)),
16030 stack_pointer_rtx, 1, OPTAB_DIRECT);
16031 if (tmp != stack_pointer_rtx)
16032 emit_move_insn (stack_pointer_rtx, tmp);
16034 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
16036 /* When we push an operand onto stack, it has to be aligned at least
16037 at the function argument boundary. However since we don't have
16038 the argument type, we can't determine the actual argument
16040 emit_move_insn (tmp, x);
16043 /* Helper function of ix86_fixup_binary_operands to canonicalize
16044 operand order. Returns true if the operands should be swapped. */
16047 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
16050 rtx dst = operands[0];
16051 rtx src1 = operands[1];
16052 rtx src2 = operands[2];
16054 /* If the operation is not commutative, we can't do anything. */
16055 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
16058 /* Highest priority is that src1 should match dst. */
16059 if (rtx_equal_p (dst, src1))
16061 if (rtx_equal_p (dst, src2))
16064 /* Next highest priority is that immediate constants come second. */
16065 if (immediate_operand (src2, mode))
16067 if (immediate_operand (src1, mode))
16070 /* Lowest priority is that memory references should come second. */
16080 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
16081 destination to use for the operation. If different from the true
16082 destination in operands[0], a copy operation will be required. */
16085 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
16088 rtx dst = operands[0];
16089 rtx src1 = operands[1];
16090 rtx src2 = operands[2];
16092 /* Canonicalize operand order. */
16093 if (ix86_swap_binary_operands_p (code, mode, operands))
16097 /* It is invalid to swap operands of different modes. */
16098 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
16105 /* Both source operands cannot be in memory. */
16106 if (MEM_P (src1) && MEM_P (src2))
16108 /* Optimization: Only read from memory once. */
16109 if (rtx_equal_p (src1, src2))
16111 src2 = force_reg (mode, src2);
16115 src2 = force_reg (mode, src2);
16118 /* If the destination is memory, and we do not have matching source
16119 operands, do things in registers. */
16120 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
16121 dst = gen_reg_rtx (mode);
16123 /* Source 1 cannot be a constant. */
16124 if (CONSTANT_P (src1))
16125 src1 = force_reg (mode, src1);
16127 /* Source 1 cannot be a non-matching memory. */
16128 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
16129 src1 = force_reg (mode, src1);
16131 operands[1] = src1;
16132 operands[2] = src2;
16136 /* Similarly, but assume that the destination has already been
16137 set up properly. */
16140 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
16141 enum machine_mode mode, rtx operands[])
16143 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
16144 gcc_assert (dst == operands[0]);
16147 /* Attempt to expand a binary operator. Make the expansion closer to the
16148 actual machine, then just general_operand, which will allow 3 separate
16149 memory references (one output, two input) in a single insn. */
16152 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
16155 rtx src1, src2, dst, op, clob;
16157 dst = ix86_fixup_binary_operands (code, mode, operands);
16158 src1 = operands[1];
16159 src2 = operands[2];
16161 /* Emit the instruction. */
16163 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
16164 if (reload_in_progress)
16166 /* Reload doesn't know about the flags register, and doesn't know that
16167 it doesn't want to clobber it. We can only do this with PLUS. */
16168 gcc_assert (code == PLUS);
16171 else if (reload_completed
16173 && !rtx_equal_p (dst, src1))
16175 /* This is going to be an LEA; avoid splitting it later. */
16180 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
16181 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
16184 /* Fix up the destination if needed. */
16185 if (dst != operands[0])
16186 emit_move_insn (operands[0], dst);
16189 /* Return TRUE or FALSE depending on whether the binary operator meets the
16190 appropriate constraints. */
16193 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
16196 rtx dst = operands[0];
16197 rtx src1 = operands[1];
16198 rtx src2 = operands[2];
16200 /* Both source operands cannot be in memory. */
16201 if (MEM_P (src1) && MEM_P (src2))
16204 /* Canonicalize operand order for commutative operators. */
16205 if (ix86_swap_binary_operands_p (code, mode, operands))
16212 /* If the destination is memory, we must have a matching source operand. */
16213 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
16216 /* Source 1 cannot be a constant. */
16217 if (CONSTANT_P (src1))
16220 /* Source 1 cannot be a non-matching memory. */
16221 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
16223 /* Support "andhi/andsi/anddi" as a zero-extending move. */
16224 return (code == AND
16227 || (TARGET_64BIT && mode == DImode))
16228 && CONST_INT_P (src2)
16229 && (INTVAL (src2) == 0xff
16230 || INTVAL (src2) == 0xffff));
16236 /* Attempt to expand a unary operator. Make the expansion closer to the
16237 actual machine, then just general_operand, which will allow 2 separate
16238 memory references (one output, one input) in a single insn. */
16241 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
16244 int matching_memory;
16245 rtx src, dst, op, clob;
16250 /* If the destination is memory, and we do not have matching source
16251 operands, do things in registers. */
16252 matching_memory = 0;
16255 if (rtx_equal_p (dst, src))
16256 matching_memory = 1;
16258 dst = gen_reg_rtx (mode);
16261 /* When source operand is memory, destination must match. */
16262 if (MEM_P (src) && !matching_memory)
16263 src = force_reg (mode, src);
16265 /* Emit the instruction. */
16267 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
16268 if (reload_in_progress || code == NOT)
16270 /* Reload doesn't know about the flags register, and doesn't know that
16271 it doesn't want to clobber it. */
16272 gcc_assert (code == NOT);
16277 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
16278 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
16281 /* Fix up the destination if needed. */
16282 if (dst != operands[0])
16283 emit_move_insn (operands[0], dst);
16286 /* Split 32bit/64bit divmod with 8bit unsigned divmod if dividend and
16287 divisor are within the the range [0-255]. */
16290 ix86_split_idivmod (enum machine_mode mode, rtx operands[],
16293 rtx end_label, qimode_label;
16294 rtx insn, div, mod;
16295 rtx scratch, tmp0, tmp1, tmp2;
16296 rtx (*gen_divmod4_1) (rtx, rtx, rtx, rtx);
16297 rtx (*gen_zero_extend) (rtx, rtx);
16298 rtx (*gen_test_ccno_1) (rtx, rtx);
16303 gen_divmod4_1 = signed_p ? gen_divmodsi4_1 : gen_udivmodsi4_1;
16304 gen_test_ccno_1 = gen_testsi_ccno_1;
16305 gen_zero_extend = gen_zero_extendqisi2;
16308 gen_divmod4_1 = signed_p ? gen_divmoddi4_1 : gen_udivmoddi4_1;
16309 gen_test_ccno_1 = gen_testdi_ccno_1;
16310 gen_zero_extend = gen_zero_extendqidi2;
16313 gcc_unreachable ();
16316 end_label = gen_label_rtx ();
16317 qimode_label = gen_label_rtx ();
16319 scratch = gen_reg_rtx (mode);
16321 /* Use 8bit unsigned divimod if dividend and divisor are within the
16322 the range [0-255]. */
16323 emit_move_insn (scratch, operands[2]);
16324 scratch = expand_simple_binop (mode, IOR, scratch, operands[3],
16325 scratch, 1, OPTAB_DIRECT);
16326 emit_insn (gen_test_ccno_1 (scratch, GEN_INT (-0x100)));
16327 tmp0 = gen_rtx_REG (CCNOmode, FLAGS_REG);
16328 tmp0 = gen_rtx_EQ (VOIDmode, tmp0, const0_rtx);
16329 tmp0 = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp0,
16330 gen_rtx_LABEL_REF (VOIDmode, qimode_label),
16332 insn = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp0));
16333 predict_jump (REG_BR_PROB_BASE * 50 / 100);
16334 JUMP_LABEL (insn) = qimode_label;
16336 /* Generate original signed/unsigned divimod. */
16337 div = gen_divmod4_1 (operands[0], operands[1],
16338 operands[2], operands[3]);
16341 /* Branch to the end. */
16342 emit_jump_insn (gen_jump (end_label));
16345 /* Generate 8bit unsigned divide. */
16346 emit_label (qimode_label);
16347 /* Don't use operands[0] for result of 8bit divide since not all
16348 registers support QImode ZERO_EXTRACT. */
16349 tmp0 = simplify_gen_subreg (HImode, scratch, mode, 0);
16350 tmp1 = simplify_gen_subreg (HImode, operands[2], mode, 0);
16351 tmp2 = simplify_gen_subreg (QImode, operands[3], mode, 0);
16352 emit_insn (gen_udivmodhiqi3 (tmp0, tmp1, tmp2));
16356 div = gen_rtx_DIV (SImode, operands[2], operands[3]);
16357 mod = gen_rtx_MOD (SImode, operands[2], operands[3]);
16361 div = gen_rtx_UDIV (SImode, operands[2], operands[3]);
16362 mod = gen_rtx_UMOD (SImode, operands[2], operands[3]);
16365 /* Extract remainder from AH. */
16366 tmp1 = gen_rtx_ZERO_EXTRACT (mode, tmp0, GEN_INT (8), GEN_INT (8));
16367 if (REG_P (operands[1]))
16368 insn = emit_move_insn (operands[1], tmp1);
16371 /* Need a new scratch register since the old one has result
16373 scratch = gen_reg_rtx (mode);
16374 emit_move_insn (scratch, tmp1);
16375 insn = emit_move_insn (operands[1], scratch);
16377 set_unique_reg_note (insn, REG_EQUAL, mod);
16379 /* Zero extend quotient from AL. */
16380 tmp1 = gen_lowpart (QImode, tmp0);
16381 insn = emit_insn (gen_zero_extend (operands[0], tmp1));
16382 set_unique_reg_note (insn, REG_EQUAL, div);
16384 emit_label (end_label);
16387 #define LEA_SEARCH_THRESHOLD 12
16389 /* Search backward for non-agu definition of register number REGNO1
16390 or register number REGNO2 in INSN's basic block until
16391 1. Pass LEA_SEARCH_THRESHOLD instructions, or
16392 2. Reach BB boundary, or
16393 3. Reach agu definition.
16394 Returns the distance between the non-agu definition point and INSN.
16395 If no definition point, returns -1. */
16398 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
16401 basic_block bb = BLOCK_FOR_INSN (insn);
16405 if (insn != BB_HEAD (bb))
16407 rtx prev = PREV_INSN (insn);
16408 while (prev && distance < LEA_SEARCH_THRESHOLD)
16410 if (NONDEBUG_INSN_P (prev))
16413 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
16414 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16415 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16416 && (regno1 == DF_REF_REGNO (*def_rec)
16417 || regno2 == DF_REF_REGNO (*def_rec)))
16419 if (recog_memoized (prev) < 0
16420 || get_attr_type (prev) != TYPE_LEA)
16424 if (prev == BB_HEAD (bb))
16426 prev = PREV_INSN (prev);
16430 if (distance < LEA_SEARCH_THRESHOLD)
16434 bool simple_loop = false;
16436 FOR_EACH_EDGE (e, ei, bb->preds)
16439 simple_loop = true;
16445 rtx prev = BB_END (bb);
16448 && distance < LEA_SEARCH_THRESHOLD)
16450 if (NONDEBUG_INSN_P (prev))
16453 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
16454 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16455 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16456 && (regno1 == DF_REF_REGNO (*def_rec)
16457 || regno2 == DF_REF_REGNO (*def_rec)))
16459 if (recog_memoized (prev) < 0
16460 || get_attr_type (prev) != TYPE_LEA)
16464 prev = PREV_INSN (prev);
16472 /* get_attr_type may modify recog data. We want to make sure
16473 that recog data is valid for instruction INSN, on which
16474 distance_non_agu_define is called. INSN is unchanged here. */
16475 extract_insn_cached (insn);
16479 /* Return the distance between INSN and the next insn that uses
16480 register number REGNO0 in memory address. Return -1 if no such
16481 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
16484 distance_agu_use (unsigned int regno0, rtx insn)
16486 basic_block bb = BLOCK_FOR_INSN (insn);
16491 if (insn != BB_END (bb))
16493 rtx next = NEXT_INSN (insn);
16494 while (next && distance < LEA_SEARCH_THRESHOLD)
16496 if (NONDEBUG_INSN_P (next))
16500 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
16501 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
16502 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
16503 && regno0 == DF_REF_REGNO (*use_rec))
16505 /* Return DISTANCE if OP0 is used in memory
16506 address in NEXT. */
16510 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
16511 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16512 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16513 && regno0 == DF_REF_REGNO (*def_rec))
16515 /* Return -1 if OP0 is set in NEXT. */
16519 if (next == BB_END (bb))
16521 next = NEXT_INSN (next);
16525 if (distance < LEA_SEARCH_THRESHOLD)
16529 bool simple_loop = false;
16531 FOR_EACH_EDGE (e, ei, bb->succs)
16534 simple_loop = true;
16540 rtx next = BB_HEAD (bb);
16543 && distance < LEA_SEARCH_THRESHOLD)
16545 if (NONDEBUG_INSN_P (next))
16549 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
16550 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
16551 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
16552 && regno0 == DF_REF_REGNO (*use_rec))
16554 /* Return DISTANCE if OP0 is used in memory
16555 address in NEXT. */
16559 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
16560 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16561 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16562 && regno0 == DF_REF_REGNO (*def_rec))
16564 /* Return -1 if OP0 is set in NEXT. */
16569 next = NEXT_INSN (next);
16577 /* Define this macro to tune LEA priority vs ADD, it take effect when
16578 there is a dilemma of choicing LEA or ADD
16579 Negative value: ADD is more preferred than LEA
16581 Positive value: LEA is more preferred than ADD*/
16582 #define IX86_LEA_PRIORITY 2
16584 /* Return true if it is ok to optimize an ADD operation to LEA
16585 operation to avoid flag register consumation. For most processors,
16586 ADD is faster than LEA. For the processors like ATOM, if the
16587 destination register of LEA holds an actual address which will be
16588 used soon, LEA is better and otherwise ADD is better. */
16591 ix86_lea_for_add_ok (rtx insn, rtx operands[])
16593 unsigned int regno0 = true_regnum (operands[0]);
16594 unsigned int regno1 = true_regnum (operands[1]);
16595 unsigned int regno2 = true_regnum (operands[2]);
16597 /* If a = b + c, (a!=b && a!=c), must use lea form. */
16598 if (regno0 != regno1 && regno0 != regno2)
16601 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
16605 int dist_define, dist_use;
16607 /* Return false if REGNO0 isn't used in memory address. */
16608 dist_use = distance_agu_use (regno0, insn);
16612 dist_define = distance_non_agu_define (regno1, regno2, insn);
16613 if (dist_define <= 0)
16616 /* If this insn has both backward non-agu dependence and forward
16617 agu dependence, the one with short distance take effect. */
16618 if ((dist_define + IX86_LEA_PRIORITY) < dist_use)
16625 /* Return true if destination reg of SET_BODY is shift count of
16629 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
16635 /* Retrieve destination of SET_BODY. */
16636 switch (GET_CODE (set_body))
16639 set_dest = SET_DEST (set_body);
16640 if (!set_dest || !REG_P (set_dest))
16644 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
16645 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
16653 /* Retrieve shift count of USE_BODY. */
16654 switch (GET_CODE (use_body))
16657 shift_rtx = XEXP (use_body, 1);
16660 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
16661 if (ix86_dep_by_shift_count_body (set_body,
16662 XVECEXP (use_body, 0, i)))
16670 && (GET_CODE (shift_rtx) == ASHIFT
16671 || GET_CODE (shift_rtx) == LSHIFTRT
16672 || GET_CODE (shift_rtx) == ASHIFTRT
16673 || GET_CODE (shift_rtx) == ROTATE
16674 || GET_CODE (shift_rtx) == ROTATERT))
16676 rtx shift_count = XEXP (shift_rtx, 1);
16678 /* Return true if shift count is dest of SET_BODY. */
16679 if (REG_P (shift_count)
16680 && true_regnum (set_dest) == true_regnum (shift_count))
16687 /* Return true if destination reg of SET_INSN is shift count of
16691 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
16693 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
16694 PATTERN (use_insn));
16697 /* Return TRUE or FALSE depending on whether the unary operator meets the
16698 appropriate constraints. */
16701 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
16702 enum machine_mode mode ATTRIBUTE_UNUSED,
16703 rtx operands[2] ATTRIBUTE_UNUSED)
16705 /* If one of operands is memory, source and destination must match. */
16706 if ((MEM_P (operands[0])
16707 || MEM_P (operands[1]))
16708 && ! rtx_equal_p (operands[0], operands[1]))
16713 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
16714 are ok, keeping in mind the possible movddup alternative. */
16717 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
16719 if (MEM_P (operands[0]))
16720 return rtx_equal_p (operands[0], operands[1 + high]);
16721 if (MEM_P (operands[1]) && MEM_P (operands[2]))
16722 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
16726 /* Post-reload splitter for converting an SF or DFmode value in an
16727 SSE register into an unsigned SImode. */
16730 ix86_split_convert_uns_si_sse (rtx operands[])
16732 enum machine_mode vecmode;
16733 rtx value, large, zero_or_two31, input, two31, x;
16735 large = operands[1];
16736 zero_or_two31 = operands[2];
16737 input = operands[3];
16738 two31 = operands[4];
16739 vecmode = GET_MODE (large);
16740 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
16742 /* Load up the value into the low element. We must ensure that the other
16743 elements are valid floats -- zero is the easiest such value. */
16746 if (vecmode == V4SFmode)
16747 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
16749 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
16753 input = gen_rtx_REG (vecmode, REGNO (input));
16754 emit_move_insn (value, CONST0_RTX (vecmode));
16755 if (vecmode == V4SFmode)
16756 emit_insn (gen_sse_movss (value, value, input));
16758 emit_insn (gen_sse2_movsd (value, value, input));
16761 emit_move_insn (large, two31);
16762 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
16764 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
16765 emit_insn (gen_rtx_SET (VOIDmode, large, x));
16767 x = gen_rtx_AND (vecmode, zero_or_two31, large);
16768 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
16770 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
16771 emit_insn (gen_rtx_SET (VOIDmode, value, x));
16773 large = gen_rtx_REG (V4SImode, REGNO (large));
16774 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
16776 x = gen_rtx_REG (V4SImode, REGNO (value));
16777 if (vecmode == V4SFmode)
16778 emit_insn (gen_sse2_cvttps2dq (x, value));
16780 emit_insn (gen_sse2_cvttpd2dq (x, value));
16783 emit_insn (gen_xorv4si3 (value, value, large));
16786 /* Convert an unsigned DImode value into a DFmode, using only SSE.
16787 Expects the 64-bit DImode to be supplied in a pair of integral
16788 registers. Requires SSE2; will use SSE3 if available. For x86_32,
16789 -mfpmath=sse, !optimize_size only. */
16792 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
16794 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
16795 rtx int_xmm, fp_xmm;
16796 rtx biases, exponents;
16799 int_xmm = gen_reg_rtx (V4SImode);
16800 if (TARGET_INTER_UNIT_MOVES)
16801 emit_insn (gen_movdi_to_sse (int_xmm, input));
16802 else if (TARGET_SSE_SPLIT_REGS)
16804 emit_clobber (int_xmm);
16805 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
16809 x = gen_reg_rtx (V2DImode);
16810 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
16811 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
16814 x = gen_rtx_CONST_VECTOR (V4SImode,
16815 gen_rtvec (4, GEN_INT (0x43300000UL),
16816 GEN_INT (0x45300000UL),
16817 const0_rtx, const0_rtx));
16818 exponents = validize_mem (force_const_mem (V4SImode, x));
16820 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
16821 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
16823 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
16824 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
16825 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
16826 (0x1.0p84 + double(fp_value_hi_xmm)).
16827 Note these exponents differ by 32. */
16829 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
16831 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
16832 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
16833 real_ldexp (&bias_lo_rvt, &dconst1, 52);
16834 real_ldexp (&bias_hi_rvt, &dconst1, 84);
16835 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
16836 x = const_double_from_real_value (bias_hi_rvt, DFmode);
16837 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
16838 biases = validize_mem (force_const_mem (V2DFmode, biases));
16839 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
16841 /* Add the upper and lower DFmode values together. */
16843 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
16846 x = copy_to_mode_reg (V2DFmode, fp_xmm);
16847 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
16848 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
16851 ix86_expand_vector_extract (false, target, fp_xmm, 0);
16854 /* Not used, but eases macroization of patterns. */
16856 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
16857 rtx input ATTRIBUTE_UNUSED)
16859 gcc_unreachable ();
16862 /* Convert an unsigned SImode value into a DFmode. Only currently used
16863 for SSE, but applicable anywhere. */
16866 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
16868 REAL_VALUE_TYPE TWO31r;
16871 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
16872 NULL, 1, OPTAB_DIRECT);
16874 fp = gen_reg_rtx (DFmode);
16875 emit_insn (gen_floatsidf2 (fp, x));
16877 real_ldexp (&TWO31r, &dconst1, 31);
16878 x = const_double_from_real_value (TWO31r, DFmode);
16880 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
16882 emit_move_insn (target, x);
16885 /* Convert a signed DImode value into a DFmode. Only used for SSE in
16886 32-bit mode; otherwise we have a direct convert instruction. */
16889 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
16891 REAL_VALUE_TYPE TWO32r;
16892 rtx fp_lo, fp_hi, x;
16894 fp_lo = gen_reg_rtx (DFmode);
16895 fp_hi = gen_reg_rtx (DFmode);
16897 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
16899 real_ldexp (&TWO32r, &dconst1, 32);
16900 x = const_double_from_real_value (TWO32r, DFmode);
16901 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
16903 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
16905 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
16908 emit_move_insn (target, x);
16911 /* Convert an unsigned SImode value into a SFmode, using only SSE.
16912 For x86_32, -mfpmath=sse, !optimize_size only. */
16914 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
16916 REAL_VALUE_TYPE ONE16r;
16917 rtx fp_hi, fp_lo, int_hi, int_lo, x;
16919 real_ldexp (&ONE16r, &dconst1, 16);
16920 x = const_double_from_real_value (ONE16r, SFmode);
16921 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
16922 NULL, 0, OPTAB_DIRECT);
16923 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
16924 NULL, 0, OPTAB_DIRECT);
16925 fp_hi = gen_reg_rtx (SFmode);
16926 fp_lo = gen_reg_rtx (SFmode);
16927 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
16928 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
16929 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
16931 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
16933 if (!rtx_equal_p (target, fp_hi))
16934 emit_move_insn (target, fp_hi);
16937 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
16938 then replicate the value for all elements of the vector
16942 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
16949 v = gen_rtvec (4, value, value, value, value);
16950 return gen_rtx_CONST_VECTOR (V4SImode, v);
16954 v = gen_rtvec (2, value, value);
16955 return gen_rtx_CONST_VECTOR (V2DImode, v);
16959 v = gen_rtvec (8, value, value, value, value,
16960 value, value, value, value);
16962 v = gen_rtvec (8, value, CONST0_RTX (SFmode),
16963 CONST0_RTX (SFmode), CONST0_RTX (SFmode),
16964 CONST0_RTX (SFmode), CONST0_RTX (SFmode),
16965 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
16966 return gen_rtx_CONST_VECTOR (V8SFmode, v);
16970 v = gen_rtvec (4, value, value, value, value);
16972 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
16973 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
16974 return gen_rtx_CONST_VECTOR (V4SFmode, v);
16978 v = gen_rtvec (4, value, value, value, value);
16980 v = gen_rtvec (4, value, CONST0_RTX (DFmode),
16981 CONST0_RTX (DFmode), CONST0_RTX (DFmode));
16982 return gen_rtx_CONST_VECTOR (V4DFmode, v);
16986 v = gen_rtvec (2, value, value);
16988 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
16989 return gen_rtx_CONST_VECTOR (V2DFmode, v);
16992 gcc_unreachable ();
16996 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
16997 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
16998 for an SSE register. If VECT is true, then replicate the mask for
16999 all elements of the vector register. If INVERT is true, then create
17000 a mask excluding the sign bit. */
17003 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
17005 enum machine_mode vec_mode, imode;
17006 HOST_WIDE_INT hi, lo;
17011 /* Find the sign bit, sign extended to 2*HWI. */
17018 mode = GET_MODE_INNER (mode);
17020 lo = 0x80000000, hi = lo < 0;
17027 mode = GET_MODE_INNER (mode);
17029 if (HOST_BITS_PER_WIDE_INT >= 64)
17030 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
17032 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
17037 vec_mode = VOIDmode;
17038 if (HOST_BITS_PER_WIDE_INT >= 64)
17041 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
17048 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
17052 lo = ~lo, hi = ~hi;
17058 mask = immed_double_const (lo, hi, imode);
17060 vec = gen_rtvec (2, v, mask);
17061 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
17062 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
17069 gcc_unreachable ();
17073 lo = ~lo, hi = ~hi;
17075 /* Force this value into the low part of a fp vector constant. */
17076 mask = immed_double_const (lo, hi, imode);
17077 mask = gen_lowpart (mode, mask);
17079 if (vec_mode == VOIDmode)
17080 return force_reg (mode, mask);
17082 v = ix86_build_const_vector (vec_mode, vect, mask);
17083 return force_reg (vec_mode, v);
17086 /* Generate code for floating point ABS or NEG. */
17089 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
17092 rtx mask, set, dst, src;
17093 bool use_sse = false;
17094 bool vector_mode = VECTOR_MODE_P (mode);
17095 enum machine_mode vmode = mode;
17099 else if (mode == TFmode)
17101 else if (TARGET_SSE_MATH)
17103 use_sse = SSE_FLOAT_MODE_P (mode);
17104 if (mode == SFmode)
17106 else if (mode == DFmode)
17110 /* NEG and ABS performed with SSE use bitwise mask operations.
17111 Create the appropriate mask now. */
17113 mask = ix86_build_signbit_mask (vmode, vector_mode, code == ABS);
17120 set = gen_rtx_fmt_e (code, mode, src);
17121 set = gen_rtx_SET (VOIDmode, dst, set);
17128 use = gen_rtx_USE (VOIDmode, mask);
17130 par = gen_rtvec (2, set, use);
17133 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
17134 par = gen_rtvec (3, set, use, clob);
17136 emit_insn (gen_rtx_PARALLEL (VOIDmode, par));
17142 /* Expand a copysign operation. Special case operand 0 being a constant. */
17145 ix86_expand_copysign (rtx operands[])
17147 enum machine_mode mode, vmode;
17148 rtx dest, op0, op1, mask, nmask;
17150 dest = operands[0];
17154 mode = GET_MODE (dest);
17156 if (mode == SFmode)
17158 else if (mode == DFmode)
17163 if (GET_CODE (op0) == CONST_DOUBLE)
17165 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
17167 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
17168 op0 = simplify_unary_operation (ABS, mode, op0, mode);
17170 if (mode == SFmode || mode == DFmode)
17172 if (op0 == CONST0_RTX (mode))
17173 op0 = CONST0_RTX (vmode);
17176 rtx v = ix86_build_const_vector (vmode, false, op0);
17178 op0 = force_reg (vmode, v);
17181 else if (op0 != CONST0_RTX (mode))
17182 op0 = force_reg (mode, op0);
17184 mask = ix86_build_signbit_mask (vmode, 0, 0);
17186 if (mode == SFmode)
17187 copysign_insn = gen_copysignsf3_const;
17188 else if (mode == DFmode)
17189 copysign_insn = gen_copysigndf3_const;
17191 copysign_insn = gen_copysigntf3_const;
17193 emit_insn (copysign_insn (dest, op0, op1, mask));
17197 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
17199 nmask = ix86_build_signbit_mask (vmode, 0, 1);
17200 mask = ix86_build_signbit_mask (vmode, 0, 0);
17202 if (mode == SFmode)
17203 copysign_insn = gen_copysignsf3_var;
17204 else if (mode == DFmode)
17205 copysign_insn = gen_copysigndf3_var;
17207 copysign_insn = gen_copysigntf3_var;
17209 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
17213 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
17214 be a constant, and so has already been expanded into a vector constant. */
17217 ix86_split_copysign_const (rtx operands[])
17219 enum machine_mode mode, vmode;
17220 rtx dest, op0, mask, x;
17222 dest = operands[0];
17224 mask = operands[3];
17226 mode = GET_MODE (dest);
17227 vmode = GET_MODE (mask);
17229 dest = simplify_gen_subreg (vmode, dest, mode, 0);
17230 x = gen_rtx_AND (vmode, dest, mask);
17231 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17233 if (op0 != CONST0_RTX (vmode))
17235 x = gen_rtx_IOR (vmode, dest, op0);
17236 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17240 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
17241 so we have to do two masks. */
17244 ix86_split_copysign_var (rtx operands[])
17246 enum machine_mode mode, vmode;
17247 rtx dest, scratch, op0, op1, mask, nmask, x;
17249 dest = operands[0];
17250 scratch = operands[1];
17253 nmask = operands[4];
17254 mask = operands[5];
17256 mode = GET_MODE (dest);
17257 vmode = GET_MODE (mask);
17259 if (rtx_equal_p (op0, op1))
17261 /* Shouldn't happen often (it's useless, obviously), but when it does
17262 we'd generate incorrect code if we continue below. */
17263 emit_move_insn (dest, op0);
17267 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
17269 gcc_assert (REGNO (op1) == REGNO (scratch));
17271 x = gen_rtx_AND (vmode, scratch, mask);
17272 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
17275 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
17276 x = gen_rtx_NOT (vmode, dest);
17277 x = gen_rtx_AND (vmode, x, op0);
17278 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17282 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
17284 x = gen_rtx_AND (vmode, scratch, mask);
17286 else /* alternative 2,4 */
17288 gcc_assert (REGNO (mask) == REGNO (scratch));
17289 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
17290 x = gen_rtx_AND (vmode, scratch, op1);
17292 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
17294 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
17296 dest = simplify_gen_subreg (vmode, op0, mode, 0);
17297 x = gen_rtx_AND (vmode, dest, nmask);
17299 else /* alternative 3,4 */
17301 gcc_assert (REGNO (nmask) == REGNO (dest));
17303 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
17304 x = gen_rtx_AND (vmode, dest, op0);
17306 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17309 x = gen_rtx_IOR (vmode, dest, scratch);
17310 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17313 /* Return TRUE or FALSE depending on whether the first SET in INSN
17314 has source and destination with matching CC modes, and that the
17315 CC mode is at least as constrained as REQ_MODE. */
17318 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
17321 enum machine_mode set_mode;
17323 set = PATTERN (insn);
17324 if (GET_CODE (set) == PARALLEL)
17325 set = XVECEXP (set, 0, 0);
17326 gcc_assert (GET_CODE (set) == SET);
17327 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
17329 set_mode = GET_MODE (SET_DEST (set));
17333 if (req_mode != CCNOmode
17334 && (req_mode != CCmode
17335 || XEXP (SET_SRC (set), 1) != const0_rtx))
17339 if (req_mode == CCGCmode)
17343 if (req_mode == CCGOCmode || req_mode == CCNOmode)
17347 if (req_mode == CCZmode)
17357 if (set_mode != req_mode)
17362 gcc_unreachable ();
17365 return GET_MODE (SET_SRC (set)) == set_mode;
17368 /* Generate insn patterns to do an integer compare of OPERANDS. */
17371 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
17373 enum machine_mode cmpmode;
17376 cmpmode = SELECT_CC_MODE (code, op0, op1);
17377 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
17379 /* This is very simple, but making the interface the same as in the
17380 FP case makes the rest of the code easier. */
17381 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
17382 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
17384 /* Return the test that should be put into the flags user, i.e.
17385 the bcc, scc, or cmov instruction. */
17386 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
17389 /* Figure out whether to use ordered or unordered fp comparisons.
17390 Return the appropriate mode to use. */
17393 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
17395 /* ??? In order to make all comparisons reversible, we do all comparisons
17396 non-trapping when compiling for IEEE. Once gcc is able to distinguish
17397 all forms trapping and nontrapping comparisons, we can make inequality
17398 comparisons trapping again, since it results in better code when using
17399 FCOM based compares. */
17400 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
17404 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
17406 enum machine_mode mode = GET_MODE (op0);
17408 if (SCALAR_FLOAT_MODE_P (mode))
17410 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
17411 return ix86_fp_compare_mode (code);
17416 /* Only zero flag is needed. */
17417 case EQ: /* ZF=0 */
17418 case NE: /* ZF!=0 */
17420 /* Codes needing carry flag. */
17421 case GEU: /* CF=0 */
17422 case LTU: /* CF=1 */
17423 /* Detect overflow checks. They need just the carry flag. */
17424 if (GET_CODE (op0) == PLUS
17425 && rtx_equal_p (op1, XEXP (op0, 0)))
17429 case GTU: /* CF=0 & ZF=0 */
17430 case LEU: /* CF=1 | ZF=1 */
17431 /* Detect overflow checks. They need just the carry flag. */
17432 if (GET_CODE (op0) == MINUS
17433 && rtx_equal_p (op1, XEXP (op0, 0)))
17437 /* Codes possibly doable only with sign flag when
17438 comparing against zero. */
17439 case GE: /* SF=OF or SF=0 */
17440 case LT: /* SF<>OF or SF=1 */
17441 if (op1 == const0_rtx)
17444 /* For other cases Carry flag is not required. */
17446 /* Codes doable only with sign flag when comparing
17447 against zero, but we miss jump instruction for it
17448 so we need to use relational tests against overflow
17449 that thus needs to be zero. */
17450 case GT: /* ZF=0 & SF=OF */
17451 case LE: /* ZF=1 | SF<>OF */
17452 if (op1 == const0_rtx)
17456 /* strcmp pattern do (use flags) and combine may ask us for proper
17461 gcc_unreachable ();
17465 /* Return the fixed registers used for condition codes. */
17468 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
17475 /* If two condition code modes are compatible, return a condition code
17476 mode which is compatible with both. Otherwise, return
17479 static enum machine_mode
17480 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
17485 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
17488 if ((m1 == CCGCmode && m2 == CCGOCmode)
17489 || (m1 == CCGOCmode && m2 == CCGCmode))
17495 gcc_unreachable ();
17525 /* These are only compatible with themselves, which we already
17532 /* Return a comparison we can do and that it is equivalent to
17533 swap_condition (code) apart possibly from orderedness.
17534 But, never change orderedness if TARGET_IEEE_FP, returning
17535 UNKNOWN in that case if necessary. */
17537 static enum rtx_code
17538 ix86_fp_swap_condition (enum rtx_code code)
17542 case GT: /* GTU - CF=0 & ZF=0 */
17543 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
17544 case GE: /* GEU - CF=0 */
17545 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
17546 case UNLT: /* LTU - CF=1 */
17547 return TARGET_IEEE_FP ? UNKNOWN : GT;
17548 case UNLE: /* LEU - CF=1 | ZF=1 */
17549 return TARGET_IEEE_FP ? UNKNOWN : GE;
17551 return swap_condition (code);
17555 /* Return cost of comparison CODE using the best strategy for performance.
17556 All following functions do use number of instructions as a cost metrics.
17557 In future this should be tweaked to compute bytes for optimize_size and
17558 take into account performance of various instructions on various CPUs. */
17561 ix86_fp_comparison_cost (enum rtx_code code)
17565 /* The cost of code using bit-twiddling on %ah. */
17582 arith_cost = TARGET_IEEE_FP ? 5 : 4;
17586 arith_cost = TARGET_IEEE_FP ? 6 : 4;
17589 gcc_unreachable ();
17592 switch (ix86_fp_comparison_strategy (code))
17594 case IX86_FPCMP_COMI:
17595 return arith_cost > 4 ? 3 : 2;
17596 case IX86_FPCMP_SAHF:
17597 return arith_cost > 4 ? 4 : 3;
17603 /* Return strategy to use for floating-point. We assume that fcomi is always
17604 preferrable where available, since that is also true when looking at size
17605 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
17607 enum ix86_fpcmp_strategy
17608 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
17610 /* Do fcomi/sahf based test when profitable. */
17613 return IX86_FPCMP_COMI;
17615 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
17616 return IX86_FPCMP_SAHF;
17618 return IX86_FPCMP_ARITH;
17621 /* Swap, force into registers, or otherwise massage the two operands
17622 to a fp comparison. The operands are updated in place; the new
17623 comparison code is returned. */
17625 static enum rtx_code
17626 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
17628 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
17629 rtx op0 = *pop0, op1 = *pop1;
17630 enum machine_mode op_mode = GET_MODE (op0);
17631 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
17633 /* All of the unordered compare instructions only work on registers.
17634 The same is true of the fcomi compare instructions. The XFmode
17635 compare instructions require registers except when comparing
17636 against zero or when converting operand 1 from fixed point to
17640 && (fpcmp_mode == CCFPUmode
17641 || (op_mode == XFmode
17642 && ! (standard_80387_constant_p (op0) == 1
17643 || standard_80387_constant_p (op1) == 1)
17644 && GET_CODE (op1) != FLOAT)
17645 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
17647 op0 = force_reg (op_mode, op0);
17648 op1 = force_reg (op_mode, op1);
17652 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
17653 things around if they appear profitable, otherwise force op0
17654 into a register. */
17656 if (standard_80387_constant_p (op0) == 0
17658 && ! (standard_80387_constant_p (op1) == 0
17661 enum rtx_code new_code = ix86_fp_swap_condition (code);
17662 if (new_code != UNKNOWN)
17665 tmp = op0, op0 = op1, op1 = tmp;
17671 op0 = force_reg (op_mode, op0);
17673 if (CONSTANT_P (op1))
17675 int tmp = standard_80387_constant_p (op1);
17677 op1 = validize_mem (force_const_mem (op_mode, op1));
17681 op1 = force_reg (op_mode, op1);
17684 op1 = force_reg (op_mode, op1);
17688 /* Try to rearrange the comparison to make it cheaper. */
17689 if (ix86_fp_comparison_cost (code)
17690 > ix86_fp_comparison_cost (swap_condition (code))
17691 && (REG_P (op1) || can_create_pseudo_p ()))
17694 tmp = op0, op0 = op1, op1 = tmp;
17695 code = swap_condition (code);
17697 op0 = force_reg (op_mode, op0);
17705 /* Convert comparison codes we use to represent FP comparison to integer
17706 code that will result in proper branch. Return UNKNOWN if no such code
17710 ix86_fp_compare_code_to_integer (enum rtx_code code)
17739 /* Generate insn patterns to do a floating point compare of OPERANDS. */
17742 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
17744 enum machine_mode fpcmp_mode, intcmp_mode;
17747 fpcmp_mode = ix86_fp_compare_mode (code);
17748 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
17750 /* Do fcomi/sahf based test when profitable. */
17751 switch (ix86_fp_comparison_strategy (code))
17753 case IX86_FPCMP_COMI:
17754 intcmp_mode = fpcmp_mode;
17755 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
17756 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
17761 case IX86_FPCMP_SAHF:
17762 intcmp_mode = fpcmp_mode;
17763 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
17764 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
17768 scratch = gen_reg_rtx (HImode);
17769 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
17770 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
17773 case IX86_FPCMP_ARITH:
17774 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
17775 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
17776 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
17778 scratch = gen_reg_rtx (HImode);
17779 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
17781 /* In the unordered case, we have to check C2 for NaN's, which
17782 doesn't happen to work out to anything nice combination-wise.
17783 So do some bit twiddling on the value we've got in AH to come
17784 up with an appropriate set of condition codes. */
17786 intcmp_mode = CCNOmode;
17791 if (code == GT || !TARGET_IEEE_FP)
17793 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
17798 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17799 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
17800 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
17801 intcmp_mode = CCmode;
17807 if (code == LT && TARGET_IEEE_FP)
17809 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17810 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
17811 intcmp_mode = CCmode;
17816 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
17822 if (code == GE || !TARGET_IEEE_FP)
17824 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
17829 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17830 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
17836 if (code == LE && TARGET_IEEE_FP)
17838 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17839 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
17840 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
17841 intcmp_mode = CCmode;
17846 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
17852 if (code == EQ && TARGET_IEEE_FP)
17854 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17855 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
17856 intcmp_mode = CCmode;
17861 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
17867 if (code == NE && TARGET_IEEE_FP)
17869 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17870 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
17876 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
17882 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
17886 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
17891 gcc_unreachable ();
17899 /* Return the test that should be put into the flags user, i.e.
17900 the bcc, scc, or cmov instruction. */
17901 return gen_rtx_fmt_ee (code, VOIDmode,
17902 gen_rtx_REG (intcmp_mode, FLAGS_REG),
17907 ix86_expand_compare (enum rtx_code code, rtx op0, rtx op1)
17911 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
17912 ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
17914 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
17916 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
17917 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
17920 ret = ix86_expand_int_compare (code, op0, op1);
17926 ix86_expand_branch (enum rtx_code code, rtx op0, rtx op1, rtx label)
17928 enum machine_mode mode = GET_MODE (op0);
17940 tmp = ix86_expand_compare (code, op0, op1);
17941 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
17942 gen_rtx_LABEL_REF (VOIDmode, label),
17944 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
17951 /* Expand DImode branch into multiple compare+branch. */
17953 rtx lo[2], hi[2], label2;
17954 enum rtx_code code1, code2, code3;
17955 enum machine_mode submode;
17957 if (CONSTANT_P (op0) && !CONSTANT_P (op1))
17959 tmp = op0, op0 = op1, op1 = tmp;
17960 code = swap_condition (code);
17963 split_double_mode (mode, &op0, 1, lo+0, hi+0);
17964 split_double_mode (mode, &op1, 1, lo+1, hi+1);
17966 submode = mode == DImode ? SImode : DImode;
17968 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
17969 avoid two branches. This costs one extra insn, so disable when
17970 optimizing for size. */
17972 if ((code == EQ || code == NE)
17973 && (!optimize_insn_for_size_p ()
17974 || hi[1] == const0_rtx || lo[1] == const0_rtx))
17979 if (hi[1] != const0_rtx)
17980 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
17981 NULL_RTX, 0, OPTAB_WIDEN);
17984 if (lo[1] != const0_rtx)
17985 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
17986 NULL_RTX, 0, OPTAB_WIDEN);
17988 tmp = expand_binop (submode, ior_optab, xor1, xor0,
17989 NULL_RTX, 0, OPTAB_WIDEN);
17991 ix86_expand_branch (code, tmp, const0_rtx, label);
17995 /* Otherwise, if we are doing less-than or greater-or-equal-than,
17996 op1 is a constant and the low word is zero, then we can just
17997 examine the high word. Similarly for low word -1 and
17998 less-or-equal-than or greater-than. */
18000 if (CONST_INT_P (hi[1]))
18003 case LT: case LTU: case GE: case GEU:
18004 if (lo[1] == const0_rtx)
18006 ix86_expand_branch (code, hi[0], hi[1], label);
18010 case LE: case LEU: case GT: case GTU:
18011 if (lo[1] == constm1_rtx)
18013 ix86_expand_branch (code, hi[0], hi[1], label);
18021 /* Otherwise, we need two or three jumps. */
18023 label2 = gen_label_rtx ();
18026 code2 = swap_condition (code);
18027 code3 = unsigned_condition (code);
18031 case LT: case GT: case LTU: case GTU:
18034 case LE: code1 = LT; code2 = GT; break;
18035 case GE: code1 = GT; code2 = LT; break;
18036 case LEU: code1 = LTU; code2 = GTU; break;
18037 case GEU: code1 = GTU; code2 = LTU; break;
18039 case EQ: code1 = UNKNOWN; code2 = NE; break;
18040 case NE: code2 = UNKNOWN; break;
18043 gcc_unreachable ();
18048 * if (hi(a) < hi(b)) goto true;
18049 * if (hi(a) > hi(b)) goto false;
18050 * if (lo(a) < lo(b)) goto true;
18054 if (code1 != UNKNOWN)
18055 ix86_expand_branch (code1, hi[0], hi[1], label);
18056 if (code2 != UNKNOWN)
18057 ix86_expand_branch (code2, hi[0], hi[1], label2);
18059 ix86_expand_branch (code3, lo[0], lo[1], label);
18061 if (code2 != UNKNOWN)
18062 emit_label (label2);
18067 gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC);
18072 /* Split branch based on floating point condition. */
18074 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
18075 rtx target1, rtx target2, rtx tmp, rtx pushed)
18080 if (target2 != pc_rtx)
18083 code = reverse_condition_maybe_unordered (code);
18088 condition = ix86_expand_fp_compare (code, op1, op2,
18091 /* Remove pushed operand from stack. */
18093 ix86_free_from_memory (GET_MODE (pushed));
18095 i = emit_jump_insn (gen_rtx_SET
18097 gen_rtx_IF_THEN_ELSE (VOIDmode,
18098 condition, target1, target2)));
18099 if (split_branch_probability >= 0)
18100 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
18104 ix86_expand_setcc (rtx dest, enum rtx_code code, rtx op0, rtx op1)
18108 gcc_assert (GET_MODE (dest) == QImode);
18110 ret = ix86_expand_compare (code, op0, op1);
18111 PUT_MODE (ret, QImode);
18112 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
18115 /* Expand comparison setting or clearing carry flag. Return true when
18116 successful and set pop for the operation. */
18118 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
18120 enum machine_mode mode =
18121 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
18123 /* Do not handle double-mode compares that go through special path. */
18124 if (mode == (TARGET_64BIT ? TImode : DImode))
18127 if (SCALAR_FLOAT_MODE_P (mode))
18129 rtx compare_op, compare_seq;
18131 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
18133 /* Shortcut: following common codes never translate
18134 into carry flag compares. */
18135 if (code == EQ || code == NE || code == UNEQ || code == LTGT
18136 || code == ORDERED || code == UNORDERED)
18139 /* These comparisons require zero flag; swap operands so they won't. */
18140 if ((code == GT || code == UNLE || code == LE || code == UNGT)
18141 && !TARGET_IEEE_FP)
18146 code = swap_condition (code);
18149 /* Try to expand the comparison and verify that we end up with
18150 carry flag based comparison. This fails to be true only when
18151 we decide to expand comparison using arithmetic that is not
18152 too common scenario. */
18154 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
18155 compare_seq = get_insns ();
18158 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
18159 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
18160 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
18162 code = GET_CODE (compare_op);
18164 if (code != LTU && code != GEU)
18167 emit_insn (compare_seq);
18172 if (!INTEGRAL_MODE_P (mode))
18181 /* Convert a==0 into (unsigned)a<1. */
18184 if (op1 != const0_rtx)
18187 code = (code == EQ ? LTU : GEU);
18190 /* Convert a>b into b<a or a>=b-1. */
18193 if (CONST_INT_P (op1))
18195 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
18196 /* Bail out on overflow. We still can swap operands but that
18197 would force loading of the constant into register. */
18198 if (op1 == const0_rtx
18199 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
18201 code = (code == GTU ? GEU : LTU);
18208 code = (code == GTU ? LTU : GEU);
18212 /* Convert a>=0 into (unsigned)a<0x80000000. */
18215 if (mode == DImode || op1 != const0_rtx)
18217 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
18218 code = (code == LT ? GEU : LTU);
18222 if (mode == DImode || op1 != constm1_rtx)
18224 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
18225 code = (code == LE ? GEU : LTU);
18231 /* Swapping operands may cause constant to appear as first operand. */
18232 if (!nonimmediate_operand (op0, VOIDmode))
18234 if (!can_create_pseudo_p ())
18236 op0 = force_reg (mode, op0);
18238 *pop = ix86_expand_compare (code, op0, op1);
18239 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
18244 ix86_expand_int_movcc (rtx operands[])
18246 enum rtx_code code = GET_CODE (operands[1]), compare_code;
18247 rtx compare_seq, compare_op;
18248 enum machine_mode mode = GET_MODE (operands[0]);
18249 bool sign_bit_compare_p = false;
18250 rtx op0 = XEXP (operands[1], 0);
18251 rtx op1 = XEXP (operands[1], 1);
18254 compare_op = ix86_expand_compare (code, op0, op1);
18255 compare_seq = get_insns ();
18258 compare_code = GET_CODE (compare_op);
18260 if ((op1 == const0_rtx && (code == GE || code == LT))
18261 || (op1 == constm1_rtx && (code == GT || code == LE)))
18262 sign_bit_compare_p = true;
18264 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
18265 HImode insns, we'd be swallowed in word prefix ops. */
18267 if ((mode != HImode || TARGET_FAST_PREFIX)
18268 && (mode != (TARGET_64BIT ? TImode : DImode))
18269 && CONST_INT_P (operands[2])
18270 && CONST_INT_P (operands[3]))
18272 rtx out = operands[0];
18273 HOST_WIDE_INT ct = INTVAL (operands[2]);
18274 HOST_WIDE_INT cf = INTVAL (operands[3]);
18275 HOST_WIDE_INT diff;
18278 /* Sign bit compares are better done using shifts than we do by using
18280 if (sign_bit_compare_p
18281 || ix86_expand_carry_flag_compare (code, op0, op1, &compare_op))
18283 /* Detect overlap between destination and compare sources. */
18286 if (!sign_bit_compare_p)
18289 bool fpcmp = false;
18291 compare_code = GET_CODE (compare_op);
18293 flags = XEXP (compare_op, 0);
18295 if (GET_MODE (flags) == CCFPmode
18296 || GET_MODE (flags) == CCFPUmode)
18300 = ix86_fp_compare_code_to_integer (compare_code);
18303 /* To simplify rest of code, restrict to the GEU case. */
18304 if (compare_code == LTU)
18306 HOST_WIDE_INT tmp = ct;
18309 compare_code = reverse_condition (compare_code);
18310 code = reverse_condition (code);
18315 PUT_CODE (compare_op,
18316 reverse_condition_maybe_unordered
18317 (GET_CODE (compare_op)));
18319 PUT_CODE (compare_op,
18320 reverse_condition (GET_CODE (compare_op)));
18324 if (reg_overlap_mentioned_p (out, op0)
18325 || reg_overlap_mentioned_p (out, op1))
18326 tmp = gen_reg_rtx (mode);
18328 if (mode == DImode)
18329 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
18331 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
18332 flags, compare_op));
18336 if (code == GT || code == GE)
18337 code = reverse_condition (code);
18340 HOST_WIDE_INT tmp = ct;
18345 tmp = emit_store_flag (tmp, code, op0, op1, VOIDmode, 0, -1);
18358 tmp = expand_simple_binop (mode, PLUS,
18360 copy_rtx (tmp), 1, OPTAB_DIRECT);
18371 tmp = expand_simple_binop (mode, IOR,
18373 copy_rtx (tmp), 1, OPTAB_DIRECT);
18375 else if (diff == -1 && ct)
18385 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
18387 tmp = expand_simple_binop (mode, PLUS,
18388 copy_rtx (tmp), GEN_INT (cf),
18389 copy_rtx (tmp), 1, OPTAB_DIRECT);
18397 * andl cf - ct, dest
18407 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
18410 tmp = expand_simple_binop (mode, AND,
18412 gen_int_mode (cf - ct, mode),
18413 copy_rtx (tmp), 1, OPTAB_DIRECT);
18415 tmp = expand_simple_binop (mode, PLUS,
18416 copy_rtx (tmp), GEN_INT (ct),
18417 copy_rtx (tmp), 1, OPTAB_DIRECT);
18420 if (!rtx_equal_p (tmp, out))
18421 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
18428 enum machine_mode cmp_mode = GET_MODE (op0);
18431 tmp = ct, ct = cf, cf = tmp;
18434 if (SCALAR_FLOAT_MODE_P (cmp_mode))
18436 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
18438 /* We may be reversing unordered compare to normal compare, that
18439 is not valid in general (we may convert non-trapping condition
18440 to trapping one), however on i386 we currently emit all
18441 comparisons unordered. */
18442 compare_code = reverse_condition_maybe_unordered (compare_code);
18443 code = reverse_condition_maybe_unordered (code);
18447 compare_code = reverse_condition (compare_code);
18448 code = reverse_condition (code);
18452 compare_code = UNKNOWN;
18453 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
18454 && CONST_INT_P (op1))
18456 if (op1 == const0_rtx
18457 && (code == LT || code == GE))
18458 compare_code = code;
18459 else if (op1 == constm1_rtx)
18463 else if (code == GT)
18468 /* Optimize dest = (op0 < 0) ? -1 : cf. */
18469 if (compare_code != UNKNOWN
18470 && GET_MODE (op0) == GET_MODE (out)
18471 && (cf == -1 || ct == -1))
18473 /* If lea code below could be used, only optimize
18474 if it results in a 2 insn sequence. */
18476 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
18477 || diff == 3 || diff == 5 || diff == 9)
18478 || (compare_code == LT && ct == -1)
18479 || (compare_code == GE && cf == -1))
18482 * notl op1 (if necessary)
18490 code = reverse_condition (code);
18493 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, -1);
18495 out = expand_simple_binop (mode, IOR,
18497 out, 1, OPTAB_DIRECT);
18498 if (out != operands[0])
18499 emit_move_insn (operands[0], out);
18506 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
18507 || diff == 3 || diff == 5 || diff == 9)
18508 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
18510 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
18516 * lea cf(dest*(ct-cf)),dest
18520 * This also catches the degenerate setcc-only case.
18526 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, 1);
18529 /* On x86_64 the lea instruction operates on Pmode, so we need
18530 to get arithmetics done in proper mode to match. */
18532 tmp = copy_rtx (out);
18536 out1 = copy_rtx (out);
18537 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
18541 tmp = gen_rtx_PLUS (mode, tmp, out1);
18547 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
18550 if (!rtx_equal_p (tmp, out))
18553 out = force_operand (tmp, copy_rtx (out));
18555 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
18557 if (!rtx_equal_p (out, operands[0]))
18558 emit_move_insn (operands[0], copy_rtx (out));
18564 * General case: Jumpful:
18565 * xorl dest,dest cmpl op1, op2
18566 * cmpl op1, op2 movl ct, dest
18567 * setcc dest jcc 1f
18568 * decl dest movl cf, dest
18569 * andl (cf-ct),dest 1:
18572 * Size 20. Size 14.
18574 * This is reasonably steep, but branch mispredict costs are
18575 * high on modern cpus, so consider failing only if optimizing
18579 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
18580 && BRANCH_COST (optimize_insn_for_speed_p (),
18585 enum machine_mode cmp_mode = GET_MODE (op0);
18590 if (SCALAR_FLOAT_MODE_P (cmp_mode))
18592 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
18594 /* We may be reversing unordered compare to normal compare,
18595 that is not valid in general (we may convert non-trapping
18596 condition to trapping one), however on i386 we currently
18597 emit all comparisons unordered. */
18598 code = reverse_condition_maybe_unordered (code);
18602 code = reverse_condition (code);
18603 if (compare_code != UNKNOWN)
18604 compare_code = reverse_condition (compare_code);
18608 if (compare_code != UNKNOWN)
18610 /* notl op1 (if needed)
18615 For x < 0 (resp. x <= -1) there will be no notl,
18616 so if possible swap the constants to get rid of the
18618 True/false will be -1/0 while code below (store flag
18619 followed by decrement) is 0/-1, so the constants need
18620 to be exchanged once more. */
18622 if (compare_code == GE || !cf)
18624 code = reverse_condition (code);
18629 HOST_WIDE_INT tmp = cf;
18634 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, -1);
18638 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, 1);
18640 out = expand_simple_binop (mode, PLUS, copy_rtx (out),
18642 copy_rtx (out), 1, OPTAB_DIRECT);
18645 out = expand_simple_binop (mode, AND, copy_rtx (out),
18646 gen_int_mode (cf - ct, mode),
18647 copy_rtx (out), 1, OPTAB_DIRECT);
18649 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
18650 copy_rtx (out), 1, OPTAB_DIRECT);
18651 if (!rtx_equal_p (out, operands[0]))
18652 emit_move_insn (operands[0], copy_rtx (out));
18658 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
18660 /* Try a few things more with specific constants and a variable. */
18663 rtx var, orig_out, out, tmp;
18665 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
18668 /* If one of the two operands is an interesting constant, load a
18669 constant with the above and mask it in with a logical operation. */
18671 if (CONST_INT_P (operands[2]))
18674 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
18675 operands[3] = constm1_rtx, op = and_optab;
18676 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
18677 operands[3] = const0_rtx, op = ior_optab;
18681 else if (CONST_INT_P (operands[3]))
18684 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
18685 operands[2] = constm1_rtx, op = and_optab;
18686 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
18687 operands[2] = const0_rtx, op = ior_optab;
18694 orig_out = operands[0];
18695 tmp = gen_reg_rtx (mode);
18698 /* Recurse to get the constant loaded. */
18699 if (ix86_expand_int_movcc (operands) == 0)
18702 /* Mask in the interesting variable. */
18703 out = expand_binop (mode, op, var, tmp, orig_out, 0,
18705 if (!rtx_equal_p (out, orig_out))
18706 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
18712 * For comparison with above,
18722 if (! nonimmediate_operand (operands[2], mode))
18723 operands[2] = force_reg (mode, operands[2]);
18724 if (! nonimmediate_operand (operands[3], mode))
18725 operands[3] = force_reg (mode, operands[3]);
18727 if (! register_operand (operands[2], VOIDmode)
18729 || ! register_operand (operands[3], VOIDmode)))
18730 operands[2] = force_reg (mode, operands[2]);
18733 && ! register_operand (operands[3], VOIDmode))
18734 operands[3] = force_reg (mode, operands[3]);
18736 emit_insn (compare_seq);
18737 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
18738 gen_rtx_IF_THEN_ELSE (mode,
18739 compare_op, operands[2],
18744 /* Swap, force into registers, or otherwise massage the two operands
18745 to an sse comparison with a mask result. Thus we differ a bit from
18746 ix86_prepare_fp_compare_args which expects to produce a flags result.
18748 The DEST operand exists to help determine whether to commute commutative
18749 operators. The POP0/POP1 operands are updated in place. The new
18750 comparison code is returned, or UNKNOWN if not implementable. */
18752 static enum rtx_code
18753 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
18754 rtx *pop0, rtx *pop1)
18758 /* AVX supports all the needed comparisons, no need to swap arguments
18759 nor help reload. */
18767 /* We have no LTGT as an operator. We could implement it with
18768 NE & ORDERED, but this requires an extra temporary. It's
18769 not clear that it's worth it. */
18776 /* These are supported directly. */
18783 /* For commutative operators, try to canonicalize the destination
18784 operand to be first in the comparison - this helps reload to
18785 avoid extra moves. */
18786 if (!dest || !rtx_equal_p (dest, *pop1))
18794 /* These are not supported directly. Swap the comparison operands
18795 to transform into something that is supported. */
18799 code = swap_condition (code);
18803 gcc_unreachable ();
18809 /* Detect conditional moves that exactly match min/max operational
18810 semantics. Note that this is IEEE safe, as long as we don't
18811 interchange the operands.
18813 Returns FALSE if this conditional move doesn't match a MIN/MAX,
18814 and TRUE if the operation is successful and instructions are emitted. */
18817 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
18818 rtx cmp_op1, rtx if_true, rtx if_false)
18820 enum machine_mode mode;
18826 else if (code == UNGE)
18829 if_true = if_false;
18835 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
18837 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
18842 mode = GET_MODE (dest);
18844 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
18845 but MODE may be a vector mode and thus not appropriate. */
18846 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
18848 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
18851 if_true = force_reg (mode, if_true);
18852 v = gen_rtvec (2, if_true, if_false);
18853 tmp = gen_rtx_UNSPEC (mode, v, u);
18857 code = is_min ? SMIN : SMAX;
18858 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
18861 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
18865 /* Expand an sse vector comparison. Return the register with the result. */
18868 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
18869 rtx op_true, rtx op_false)
18871 enum machine_mode mode = GET_MODE (dest);
18874 cmp_op0 = force_reg (mode, cmp_op0);
18875 if (!nonimmediate_operand (cmp_op1, mode))
18876 cmp_op1 = force_reg (mode, cmp_op1);
18879 || reg_overlap_mentioned_p (dest, op_true)
18880 || reg_overlap_mentioned_p (dest, op_false))
18881 dest = gen_reg_rtx (mode);
18883 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
18884 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18889 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
18890 operations. This is used for both scalar and vector conditional moves. */
18893 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
18895 enum machine_mode mode = GET_MODE (dest);
18898 if (op_false == CONST0_RTX (mode))
18900 op_true = force_reg (mode, op_true);
18901 x = gen_rtx_AND (mode, cmp, op_true);
18902 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18904 else if (op_true == CONST0_RTX (mode))
18906 op_false = force_reg (mode, op_false);
18907 x = gen_rtx_NOT (mode, cmp);
18908 x = gen_rtx_AND (mode, x, op_false);
18909 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18911 else if (TARGET_XOP)
18913 op_true = force_reg (mode, op_true);
18915 if (!nonimmediate_operand (op_false, mode))
18916 op_false = force_reg (mode, op_false);
18918 emit_insn (gen_rtx_SET (mode, dest,
18919 gen_rtx_IF_THEN_ELSE (mode, cmp,
18925 op_true = force_reg (mode, op_true);
18926 op_false = force_reg (mode, op_false);
18928 t2 = gen_reg_rtx (mode);
18930 t3 = gen_reg_rtx (mode);
18934 x = gen_rtx_AND (mode, op_true, cmp);
18935 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
18937 x = gen_rtx_NOT (mode, cmp);
18938 x = gen_rtx_AND (mode, x, op_false);
18939 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
18941 x = gen_rtx_IOR (mode, t3, t2);
18942 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18946 /* Expand a floating-point conditional move. Return true if successful. */
18949 ix86_expand_fp_movcc (rtx operands[])
18951 enum machine_mode mode = GET_MODE (operands[0]);
18952 enum rtx_code code = GET_CODE (operands[1]);
18953 rtx tmp, compare_op;
18954 rtx op0 = XEXP (operands[1], 0);
18955 rtx op1 = XEXP (operands[1], 1);
18957 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
18959 enum machine_mode cmode;
18961 /* Since we've no cmove for sse registers, don't force bad register
18962 allocation just to gain access to it. Deny movcc when the
18963 comparison mode doesn't match the move mode. */
18964 cmode = GET_MODE (op0);
18965 if (cmode == VOIDmode)
18966 cmode = GET_MODE (op1);
18970 code = ix86_prepare_sse_fp_compare_args (operands[0], code, &op0, &op1);
18971 if (code == UNKNOWN)
18974 if (ix86_expand_sse_fp_minmax (operands[0], code, op0, op1,
18975 operands[2], operands[3]))
18978 tmp = ix86_expand_sse_cmp (operands[0], code, op0, op1,
18979 operands[2], operands[3]);
18980 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
18984 /* The floating point conditional move instructions don't directly
18985 support conditions resulting from a signed integer comparison. */
18987 compare_op = ix86_expand_compare (code, op0, op1);
18988 if (!fcmov_comparison_operator (compare_op, VOIDmode))
18990 tmp = gen_reg_rtx (QImode);
18991 ix86_expand_setcc (tmp, code, op0, op1);
18993 compare_op = ix86_expand_compare (NE, tmp, const0_rtx);
18996 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
18997 gen_rtx_IF_THEN_ELSE (mode, compare_op,
18998 operands[2], operands[3])));
19003 /* Expand a floating-point vector conditional move; a vcond operation
19004 rather than a movcc operation. */
19007 ix86_expand_fp_vcond (rtx operands[])
19009 enum rtx_code code = GET_CODE (operands[3]);
19012 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
19013 &operands[4], &operands[5]);
19014 if (code == UNKNOWN)
19017 switch (GET_CODE (operands[3]))
19020 temp = ix86_expand_sse_cmp (operands[0], ORDERED, operands[4],
19021 operands[5], operands[0], operands[0]);
19022 cmp = ix86_expand_sse_cmp (operands[0], NE, operands[4],
19023 operands[5], operands[1], operands[2]);
19027 temp = ix86_expand_sse_cmp (operands[0], UNORDERED, operands[4],
19028 operands[5], operands[0], operands[0]);
19029 cmp = ix86_expand_sse_cmp (operands[0], EQ, operands[4],
19030 operands[5], operands[1], operands[2]);
19034 gcc_unreachable ();
19036 cmp = expand_simple_binop (GET_MODE (cmp), code, temp, cmp, cmp, 1,
19038 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
19042 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
19043 operands[5], operands[1], operands[2]))
19046 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
19047 operands[1], operands[2]);
19048 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
19052 /* Expand a signed/unsigned integral vector conditional move. */
19055 ix86_expand_int_vcond (rtx operands[])
19057 enum machine_mode mode = GET_MODE (operands[0]);
19058 enum rtx_code code = GET_CODE (operands[3]);
19059 bool negate = false;
19062 cop0 = operands[4];
19063 cop1 = operands[5];
19065 /* XOP supports all of the comparisons on all vector int types. */
19068 /* Canonicalize the comparison to EQ, GT, GTU. */
19079 code = reverse_condition (code);
19085 code = reverse_condition (code);
19091 code = swap_condition (code);
19092 x = cop0, cop0 = cop1, cop1 = x;
19096 gcc_unreachable ();
19099 /* Only SSE4.1/SSE4.2 supports V2DImode. */
19100 if (mode == V2DImode)
19105 /* SSE4.1 supports EQ. */
19106 if (!TARGET_SSE4_1)
19112 /* SSE4.2 supports GT/GTU. */
19113 if (!TARGET_SSE4_2)
19118 gcc_unreachable ();
19122 /* Unsigned parallel compare is not supported by the hardware.
19123 Play some tricks to turn this into a signed comparison
19127 cop0 = force_reg (mode, cop0);
19135 rtx (*gen_sub3) (rtx, rtx, rtx);
19137 /* Subtract (-(INT MAX) - 1) from both operands to make
19139 mask = ix86_build_signbit_mask (mode, true, false);
19140 gen_sub3 = (mode == V4SImode
19141 ? gen_subv4si3 : gen_subv2di3);
19142 t1 = gen_reg_rtx (mode);
19143 emit_insn (gen_sub3 (t1, cop0, mask));
19145 t2 = gen_reg_rtx (mode);
19146 emit_insn (gen_sub3 (t2, cop1, mask));
19156 /* Perform a parallel unsigned saturating subtraction. */
19157 x = gen_reg_rtx (mode);
19158 emit_insn (gen_rtx_SET (VOIDmode, x,
19159 gen_rtx_US_MINUS (mode, cop0, cop1)));
19162 cop1 = CONST0_RTX (mode);
19168 gcc_unreachable ();
19173 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
19174 operands[1+negate], operands[2-negate]);
19176 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
19177 operands[2-negate]);
19181 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
19182 true if we should do zero extension, else sign extension. HIGH_P is
19183 true if we want the N/2 high elements, else the low elements. */
19186 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
19188 enum machine_mode imode = GET_MODE (operands[1]);
19189 rtx (*unpack)(rtx, rtx, rtx);
19196 unpack = gen_vec_interleave_highv16qi;
19198 unpack = gen_vec_interleave_lowv16qi;
19202 unpack = gen_vec_interleave_highv8hi;
19204 unpack = gen_vec_interleave_lowv8hi;
19208 unpack = gen_vec_interleave_highv4si;
19210 unpack = gen_vec_interleave_lowv4si;
19213 gcc_unreachable ();
19216 dest = gen_lowpart (imode, operands[0]);
19219 se = force_reg (imode, CONST0_RTX (imode));
19221 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
19222 operands[1], pc_rtx, pc_rtx);
19224 emit_insn (unpack (dest, operands[1], se));
19227 /* This function performs the same task as ix86_expand_sse_unpack,
19228 but with SSE4.1 instructions. */
19231 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
19233 enum machine_mode imode = GET_MODE (operands[1]);
19234 rtx (*unpack)(rtx, rtx);
19241 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
19243 unpack = gen_sse4_1_sign_extendv8qiv8hi2;
19247 unpack = gen_sse4_1_zero_extendv4hiv4si2;
19249 unpack = gen_sse4_1_sign_extendv4hiv4si2;
19253 unpack = gen_sse4_1_zero_extendv2siv2di2;
19255 unpack = gen_sse4_1_sign_extendv2siv2di2;
19258 gcc_unreachable ();
19261 dest = operands[0];
19264 /* Shift higher 8 bytes to lower 8 bytes. */
19265 src = gen_reg_rtx (imode);
19266 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, src),
19267 gen_lowpart (V1TImode, operands[1]),
19273 emit_insn (unpack (dest, src));
19276 /* Expand conditional increment or decrement using adb/sbb instructions.
19277 The default case using setcc followed by the conditional move can be
19278 done by generic code. */
19280 ix86_expand_int_addcc (rtx operands[])
19282 enum rtx_code code = GET_CODE (operands[1]);
19284 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
19286 rtx val = const0_rtx;
19287 bool fpcmp = false;
19288 enum machine_mode mode;
19289 rtx op0 = XEXP (operands[1], 0);
19290 rtx op1 = XEXP (operands[1], 1);
19292 if (operands[3] != const1_rtx
19293 && operands[3] != constm1_rtx)
19295 if (!ix86_expand_carry_flag_compare (code, op0, op1, &compare_op))
19297 code = GET_CODE (compare_op);
19299 flags = XEXP (compare_op, 0);
19301 if (GET_MODE (flags) == CCFPmode
19302 || GET_MODE (flags) == CCFPUmode)
19305 code = ix86_fp_compare_code_to_integer (code);
19312 PUT_CODE (compare_op,
19313 reverse_condition_maybe_unordered
19314 (GET_CODE (compare_op)));
19316 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
19319 mode = GET_MODE (operands[0]);
19321 /* Construct either adc or sbb insn. */
19322 if ((code == LTU) == (operands[3] == constm1_rtx))
19327 insn = gen_subqi3_carry;
19330 insn = gen_subhi3_carry;
19333 insn = gen_subsi3_carry;
19336 insn = gen_subdi3_carry;
19339 gcc_unreachable ();
19347 insn = gen_addqi3_carry;
19350 insn = gen_addhi3_carry;
19353 insn = gen_addsi3_carry;
19356 insn = gen_adddi3_carry;
19359 gcc_unreachable ();
19362 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
19368 /* Split operands 0 and 1 into half-mode parts. Similar to split_double_mode,
19369 but works for floating pointer parameters and nonoffsetable memories.
19370 For pushes, it returns just stack offsets; the values will be saved
19371 in the right order. Maximally three parts are generated. */
19374 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
19379 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
19381 size = (GET_MODE_SIZE (mode) + 4) / 8;
19383 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
19384 gcc_assert (size >= 2 && size <= 4);
19386 /* Optimize constant pool reference to immediates. This is used by fp
19387 moves, that force all constants to memory to allow combining. */
19388 if (MEM_P (operand) && MEM_READONLY_P (operand))
19390 rtx tmp = maybe_get_pool_constant (operand);
19395 if (MEM_P (operand) && !offsettable_memref_p (operand))
19397 /* The only non-offsetable memories we handle are pushes. */
19398 int ok = push_operand (operand, VOIDmode);
19402 operand = copy_rtx (operand);
19403 PUT_MODE (operand, Pmode);
19404 parts[0] = parts[1] = parts[2] = parts[3] = operand;
19408 if (GET_CODE (operand) == CONST_VECTOR)
19410 enum machine_mode imode = int_mode_for_mode (mode);
19411 /* Caution: if we looked through a constant pool memory above,
19412 the operand may actually have a different mode now. That's
19413 ok, since we want to pun this all the way back to an integer. */
19414 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
19415 gcc_assert (operand != NULL);
19421 if (mode == DImode)
19422 split_double_mode (mode, &operand, 1, &parts[0], &parts[1]);
19427 if (REG_P (operand))
19429 gcc_assert (reload_completed);
19430 for (i = 0; i < size; i++)
19431 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
19433 else if (offsettable_memref_p (operand))
19435 operand = adjust_address (operand, SImode, 0);
19436 parts[0] = operand;
19437 for (i = 1; i < size; i++)
19438 parts[i] = adjust_address (operand, SImode, 4 * i);
19440 else if (GET_CODE (operand) == CONST_DOUBLE)
19445 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
19449 real_to_target (l, &r, mode);
19450 parts[3] = gen_int_mode (l[3], SImode);
19451 parts[2] = gen_int_mode (l[2], SImode);
19454 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
19455 parts[2] = gen_int_mode (l[2], SImode);
19458 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
19461 gcc_unreachable ();
19463 parts[1] = gen_int_mode (l[1], SImode);
19464 parts[0] = gen_int_mode (l[0], SImode);
19467 gcc_unreachable ();
19472 if (mode == TImode)
19473 split_double_mode (mode, &operand, 1, &parts[0], &parts[1]);
19474 if (mode == XFmode || mode == TFmode)
19476 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
19477 if (REG_P (operand))
19479 gcc_assert (reload_completed);
19480 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
19481 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
19483 else if (offsettable_memref_p (operand))
19485 operand = adjust_address (operand, DImode, 0);
19486 parts[0] = operand;
19487 parts[1] = adjust_address (operand, upper_mode, 8);
19489 else if (GET_CODE (operand) == CONST_DOUBLE)
19494 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
19495 real_to_target (l, &r, mode);
19497 /* Do not use shift by 32 to avoid warning on 32bit systems. */
19498 if (HOST_BITS_PER_WIDE_INT >= 64)
19501 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
19502 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
19505 parts[0] = immed_double_const (l[0], l[1], DImode);
19507 if (upper_mode == SImode)
19508 parts[1] = gen_int_mode (l[2], SImode);
19509 else if (HOST_BITS_PER_WIDE_INT >= 64)
19512 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
19513 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
19516 parts[1] = immed_double_const (l[2], l[3], DImode);
19519 gcc_unreachable ();
19526 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
19527 Return false when normal moves are needed; true when all required
19528 insns have been emitted. Operands 2-4 contain the input values
19529 int the correct order; operands 5-7 contain the output values. */
19532 ix86_split_long_move (rtx operands[])
19537 int collisions = 0;
19538 enum machine_mode mode = GET_MODE (operands[0]);
19539 bool collisionparts[4];
19541 /* The DFmode expanders may ask us to move double.
19542 For 64bit target this is single move. By hiding the fact
19543 here we simplify i386.md splitters. */
19544 if (TARGET_64BIT && GET_MODE_SIZE (GET_MODE (operands[0])) == 8)
19546 /* Optimize constant pool reference to immediates. This is used by
19547 fp moves, that force all constants to memory to allow combining. */
19549 if (MEM_P (operands[1])
19550 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
19551 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
19552 operands[1] = get_pool_constant (XEXP (operands[1], 0));
19553 if (push_operand (operands[0], VOIDmode))
19555 operands[0] = copy_rtx (operands[0]);
19556 PUT_MODE (operands[0], Pmode);
19559 operands[0] = gen_lowpart (DImode, operands[0]);
19560 operands[1] = gen_lowpart (DImode, operands[1]);
19561 emit_move_insn (operands[0], operands[1]);
19565 /* The only non-offsettable memory we handle is push. */
19566 if (push_operand (operands[0], VOIDmode))
19569 gcc_assert (!MEM_P (operands[0])
19570 || offsettable_memref_p (operands[0]));
19572 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
19573 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
19575 /* When emitting push, take care for source operands on the stack. */
19576 if (push && MEM_P (operands[1])
19577 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
19579 rtx src_base = XEXP (part[1][nparts - 1], 0);
19581 /* Compensate for the stack decrement by 4. */
19582 if (!TARGET_64BIT && nparts == 3
19583 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
19584 src_base = plus_constant (src_base, 4);
19586 /* src_base refers to the stack pointer and is
19587 automatically decreased by emitted push. */
19588 for (i = 0; i < nparts; i++)
19589 part[1][i] = change_address (part[1][i],
19590 GET_MODE (part[1][i]), src_base);
19593 /* We need to do copy in the right order in case an address register
19594 of the source overlaps the destination. */
19595 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
19599 for (i = 0; i < nparts; i++)
19602 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
19603 if (collisionparts[i])
19607 /* Collision in the middle part can be handled by reordering. */
19608 if (collisions == 1 && nparts == 3 && collisionparts [1])
19610 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
19611 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
19613 else if (collisions == 1
19615 && (collisionparts [1] || collisionparts [2]))
19617 if (collisionparts [1])
19619 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
19620 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
19624 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
19625 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
19629 /* If there are more collisions, we can't handle it by reordering.
19630 Do an lea to the last part and use only one colliding move. */
19631 else if (collisions > 1)
19637 base = part[0][nparts - 1];
19639 /* Handle the case when the last part isn't valid for lea.
19640 Happens in 64-bit mode storing the 12-byte XFmode. */
19641 if (GET_MODE (base) != Pmode)
19642 base = gen_rtx_REG (Pmode, REGNO (base));
19644 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
19645 part[1][0] = replace_equiv_address (part[1][0], base);
19646 for (i = 1; i < nparts; i++)
19648 tmp = plus_constant (base, UNITS_PER_WORD * i);
19649 part[1][i] = replace_equiv_address (part[1][i], tmp);
19660 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
19661 emit_insn (gen_addsi3 (stack_pointer_rtx,
19662 stack_pointer_rtx, GEN_INT (-4)));
19663 emit_move_insn (part[0][2], part[1][2]);
19665 else if (nparts == 4)
19667 emit_move_insn (part[0][3], part[1][3]);
19668 emit_move_insn (part[0][2], part[1][2]);
19673 /* In 64bit mode we don't have 32bit push available. In case this is
19674 register, it is OK - we will just use larger counterpart. We also
19675 retype memory - these comes from attempt to avoid REX prefix on
19676 moving of second half of TFmode value. */
19677 if (GET_MODE (part[1][1]) == SImode)
19679 switch (GET_CODE (part[1][1]))
19682 part[1][1] = adjust_address (part[1][1], DImode, 0);
19686 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
19690 gcc_unreachable ();
19693 if (GET_MODE (part[1][0]) == SImode)
19694 part[1][0] = part[1][1];
19697 emit_move_insn (part[0][1], part[1][1]);
19698 emit_move_insn (part[0][0], part[1][0]);
19702 /* Choose correct order to not overwrite the source before it is copied. */
19703 if ((REG_P (part[0][0])
19704 && REG_P (part[1][1])
19705 && (REGNO (part[0][0]) == REGNO (part[1][1])
19707 && REGNO (part[0][0]) == REGNO (part[1][2]))
19709 && REGNO (part[0][0]) == REGNO (part[1][3]))))
19711 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
19713 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
19715 operands[2 + i] = part[0][j];
19716 operands[6 + i] = part[1][j];
19721 for (i = 0; i < nparts; i++)
19723 operands[2 + i] = part[0][i];
19724 operands[6 + i] = part[1][i];
19728 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
19729 if (optimize_insn_for_size_p ())
19731 for (j = 0; j < nparts - 1; j++)
19732 if (CONST_INT_P (operands[6 + j])
19733 && operands[6 + j] != const0_rtx
19734 && REG_P (operands[2 + j]))
19735 for (i = j; i < nparts - 1; i++)
19736 if (CONST_INT_P (operands[7 + i])
19737 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
19738 operands[7 + i] = operands[2 + j];
19741 for (i = 0; i < nparts; i++)
19742 emit_move_insn (operands[2 + i], operands[6 + i]);
19747 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
19748 left shift by a constant, either using a single shift or
19749 a sequence of add instructions. */
19752 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
19754 rtx (*insn)(rtx, rtx, rtx);
19757 || (count * ix86_cost->add <= ix86_cost->shift_const
19758 && !optimize_insn_for_size_p ()))
19760 insn = mode == DImode ? gen_addsi3 : gen_adddi3;
19761 while (count-- > 0)
19762 emit_insn (insn (operand, operand, operand));
19766 insn = mode == DImode ? gen_ashlsi3 : gen_ashldi3;
19767 emit_insn (insn (operand, operand, GEN_INT (count)));
19772 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
19774 rtx (*gen_ashl3)(rtx, rtx, rtx);
19775 rtx (*gen_shld)(rtx, rtx, rtx);
19776 int half_width = GET_MODE_BITSIZE (mode) >> 1;
19778 rtx low[2], high[2];
19781 if (CONST_INT_P (operands[2]))
19783 split_double_mode (mode, operands, 2, low, high);
19784 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
19786 if (count >= half_width)
19788 emit_move_insn (high[0], low[1]);
19789 emit_move_insn (low[0], const0_rtx);
19791 if (count > half_width)
19792 ix86_expand_ashl_const (high[0], count - half_width, mode);
19796 gen_shld = mode == DImode ? gen_x86_shld : gen_x86_64_shld;
19798 if (!rtx_equal_p (operands[0], operands[1]))
19799 emit_move_insn (operands[0], operands[1]);
19801 emit_insn (gen_shld (high[0], low[0], GEN_INT (count)));
19802 ix86_expand_ashl_const (low[0], count, mode);
19807 split_double_mode (mode, operands, 1, low, high);
19809 gen_ashl3 = mode == DImode ? gen_ashlsi3 : gen_ashldi3;
19811 if (operands[1] == const1_rtx)
19813 /* Assuming we've chosen a QImode capable registers, then 1 << N
19814 can be done with two 32/64-bit shifts, no branches, no cmoves. */
19815 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
19817 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
19819 ix86_expand_clear (low[0]);
19820 ix86_expand_clear (high[0]);
19821 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (half_width)));
19823 d = gen_lowpart (QImode, low[0]);
19824 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
19825 s = gen_rtx_EQ (QImode, flags, const0_rtx);
19826 emit_insn (gen_rtx_SET (VOIDmode, d, s));
19828 d = gen_lowpart (QImode, high[0]);
19829 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
19830 s = gen_rtx_NE (QImode, flags, const0_rtx);
19831 emit_insn (gen_rtx_SET (VOIDmode, d, s));
19834 /* Otherwise, we can get the same results by manually performing
19835 a bit extract operation on bit 5/6, and then performing the two
19836 shifts. The two methods of getting 0/1 into low/high are exactly
19837 the same size. Avoiding the shift in the bit extract case helps
19838 pentium4 a bit; no one else seems to care much either way. */
19841 enum machine_mode half_mode;
19842 rtx (*gen_lshr3)(rtx, rtx, rtx);
19843 rtx (*gen_and3)(rtx, rtx, rtx);
19844 rtx (*gen_xor3)(rtx, rtx, rtx);
19845 HOST_WIDE_INT bits;
19848 if (mode == DImode)
19850 half_mode = SImode;
19851 gen_lshr3 = gen_lshrsi3;
19852 gen_and3 = gen_andsi3;
19853 gen_xor3 = gen_xorsi3;
19858 half_mode = DImode;
19859 gen_lshr3 = gen_lshrdi3;
19860 gen_and3 = gen_anddi3;
19861 gen_xor3 = gen_xordi3;
19865 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
19866 x = gen_rtx_ZERO_EXTEND (half_mode, operands[2]);
19868 x = gen_lowpart (half_mode, operands[2]);
19869 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
19871 emit_insn (gen_lshr3 (high[0], high[0], GEN_INT (bits)));
19872 emit_insn (gen_and3 (high[0], high[0], const1_rtx));
19873 emit_move_insn (low[0], high[0]);
19874 emit_insn (gen_xor3 (low[0], low[0], const1_rtx));
19877 emit_insn (gen_ashl3 (low[0], low[0], operands[2]));
19878 emit_insn (gen_ashl3 (high[0], high[0], operands[2]));
19882 if (operands[1] == constm1_rtx)
19884 /* For -1 << N, we can avoid the shld instruction, because we
19885 know that we're shifting 0...31/63 ones into a -1. */
19886 emit_move_insn (low[0], constm1_rtx);
19887 if (optimize_insn_for_size_p ())
19888 emit_move_insn (high[0], low[0]);
19890 emit_move_insn (high[0], constm1_rtx);
19894 gen_shld = mode == DImode ? gen_x86_shld : gen_x86_64_shld;
19896 if (!rtx_equal_p (operands[0], operands[1]))
19897 emit_move_insn (operands[0], operands[1]);
19899 split_double_mode (mode, operands, 1, low, high);
19900 emit_insn (gen_shld (high[0], low[0], operands[2]));
19903 emit_insn (gen_ashl3 (low[0], low[0], operands[2]));
19905 if (TARGET_CMOVE && scratch)
19907 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
19908 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
19910 ix86_expand_clear (scratch);
19911 emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2], scratch));
19915 rtx (*gen_x86_shift_adj_2)(rtx, rtx, rtx)
19916 = mode == DImode ? gen_x86_shiftsi_adj_2 : gen_x86_shiftdi_adj_2;
19918 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
19923 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
19925 rtx (*gen_ashr3)(rtx, rtx, rtx)
19926 = mode == DImode ? gen_ashrsi3 : gen_ashrdi3;
19927 rtx (*gen_shrd)(rtx, rtx, rtx);
19928 int half_width = GET_MODE_BITSIZE (mode) >> 1;
19930 rtx low[2], high[2];
19933 if (CONST_INT_P (operands[2]))
19935 split_double_mode (mode, operands, 2, low, high);
19936 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
19938 if (count == GET_MODE_BITSIZE (mode) - 1)
19940 emit_move_insn (high[0], high[1]);
19941 emit_insn (gen_ashr3 (high[0], high[0],
19942 GEN_INT (half_width - 1)));
19943 emit_move_insn (low[0], high[0]);
19946 else if (count >= half_width)
19948 emit_move_insn (low[0], high[1]);
19949 emit_move_insn (high[0], low[0]);
19950 emit_insn (gen_ashr3 (high[0], high[0],
19951 GEN_INT (half_width - 1)));
19953 if (count > half_width)
19954 emit_insn (gen_ashr3 (low[0], low[0],
19955 GEN_INT (count - half_width)));
19959 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19961 if (!rtx_equal_p (operands[0], operands[1]))
19962 emit_move_insn (operands[0], operands[1]);
19964 emit_insn (gen_shrd (low[0], high[0], GEN_INT (count)));
19965 emit_insn (gen_ashr3 (high[0], high[0], GEN_INT (count)));
19970 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19972 if (!rtx_equal_p (operands[0], operands[1]))
19973 emit_move_insn (operands[0], operands[1]);
19975 split_double_mode (mode, operands, 1, low, high);
19977 emit_insn (gen_shrd (low[0], high[0], operands[2]));
19978 emit_insn (gen_ashr3 (high[0], high[0], operands[2]));
19980 if (TARGET_CMOVE && scratch)
19982 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
19983 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
19985 emit_move_insn (scratch, high[0]);
19986 emit_insn (gen_ashr3 (scratch, scratch,
19987 GEN_INT (half_width - 1)));
19988 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
19993 rtx (*gen_x86_shift_adj_3)(rtx, rtx, rtx)
19994 = mode == DImode ? gen_x86_shiftsi_adj_3 : gen_x86_shiftdi_adj_3;
19996 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
20002 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
20004 rtx (*gen_lshr3)(rtx, rtx, rtx)
20005 = mode == DImode ? gen_lshrsi3 : gen_lshrdi3;
20006 rtx (*gen_shrd)(rtx, rtx, rtx);
20007 int half_width = GET_MODE_BITSIZE (mode) >> 1;
20009 rtx low[2], high[2];
20012 if (CONST_INT_P (operands[2]))
20014 split_double_mode (mode, operands, 2, low, high);
20015 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
20017 if (count >= half_width)
20019 emit_move_insn (low[0], high[1]);
20020 ix86_expand_clear (high[0]);
20022 if (count > half_width)
20023 emit_insn (gen_lshr3 (low[0], low[0],
20024 GEN_INT (count - half_width)));
20028 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
20030 if (!rtx_equal_p (operands[0], operands[1]))
20031 emit_move_insn (operands[0], operands[1]);
20033 emit_insn (gen_shrd (low[0], high[0], GEN_INT (count)));
20034 emit_insn (gen_lshr3 (high[0], high[0], GEN_INT (count)));
20039 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
20041 if (!rtx_equal_p (operands[0], operands[1]))
20042 emit_move_insn (operands[0], operands[1]);
20044 split_double_mode (mode, operands, 1, low, high);
20046 emit_insn (gen_shrd (low[0], high[0], operands[2]));
20047 emit_insn (gen_lshr3 (high[0], high[0], operands[2]));
20049 if (TARGET_CMOVE && scratch)
20051 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
20052 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
20054 ix86_expand_clear (scratch);
20055 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
20060 rtx (*gen_x86_shift_adj_2)(rtx, rtx, rtx)
20061 = mode == DImode ? gen_x86_shiftsi_adj_2 : gen_x86_shiftdi_adj_2;
20063 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
20068 /* Predict just emitted jump instruction to be taken with probability PROB. */
20070 predict_jump (int prob)
20072 rtx insn = get_last_insn ();
20073 gcc_assert (JUMP_P (insn));
20074 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
20077 /* Helper function for the string operations below. Dest VARIABLE whether
20078 it is aligned to VALUE bytes. If true, jump to the label. */
20080 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
20082 rtx label = gen_label_rtx ();
20083 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
20084 if (GET_MODE (variable) == DImode)
20085 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
20087 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
20088 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
20091 predict_jump (REG_BR_PROB_BASE * 50 / 100);
20093 predict_jump (REG_BR_PROB_BASE * 90 / 100);
20097 /* Adjust COUNTER by the VALUE. */
20099 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
20101 rtx (*gen_add)(rtx, rtx, rtx)
20102 = GET_MODE (countreg) == DImode ? gen_adddi3 : gen_addsi3;
20104 emit_insn (gen_add (countreg, countreg, GEN_INT (-value)));
20107 /* Zero extend possibly SImode EXP to Pmode register. */
20109 ix86_zero_extend_to_Pmode (rtx exp)
20112 if (GET_MODE (exp) == VOIDmode)
20113 return force_reg (Pmode, exp);
20114 if (GET_MODE (exp) == Pmode)
20115 return copy_to_mode_reg (Pmode, exp);
20116 r = gen_reg_rtx (Pmode);
20117 emit_insn (gen_zero_extendsidi2 (r, exp));
20121 /* Divide COUNTREG by SCALE. */
20123 scale_counter (rtx countreg, int scale)
20129 if (CONST_INT_P (countreg))
20130 return GEN_INT (INTVAL (countreg) / scale);
20131 gcc_assert (REG_P (countreg));
20133 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
20134 GEN_INT (exact_log2 (scale)),
20135 NULL, 1, OPTAB_DIRECT);
20139 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
20140 DImode for constant loop counts. */
20142 static enum machine_mode
20143 counter_mode (rtx count_exp)
20145 if (GET_MODE (count_exp) != VOIDmode)
20146 return GET_MODE (count_exp);
20147 if (!CONST_INT_P (count_exp))
20149 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
20154 /* When SRCPTR is non-NULL, output simple loop to move memory
20155 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
20156 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
20157 equivalent loop to set memory by VALUE (supposed to be in MODE).
20159 The size is rounded down to whole number of chunk size moved at once.
20160 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
20164 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
20165 rtx destptr, rtx srcptr, rtx value,
20166 rtx count, enum machine_mode mode, int unroll,
20169 rtx out_label, top_label, iter, tmp;
20170 enum machine_mode iter_mode = counter_mode (count);
20171 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
20172 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
20178 top_label = gen_label_rtx ();
20179 out_label = gen_label_rtx ();
20180 iter = gen_reg_rtx (iter_mode);
20182 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
20183 NULL, 1, OPTAB_DIRECT);
20184 /* Those two should combine. */
20185 if (piece_size == const1_rtx)
20187 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
20189 predict_jump (REG_BR_PROB_BASE * 10 / 100);
20191 emit_move_insn (iter, const0_rtx);
20193 emit_label (top_label);
20195 tmp = convert_modes (Pmode, iter_mode, iter, true);
20196 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
20197 destmem = change_address (destmem, mode, x_addr);
20201 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
20202 srcmem = change_address (srcmem, mode, y_addr);
20204 /* When unrolling for chips that reorder memory reads and writes,
20205 we can save registers by using single temporary.
20206 Also using 4 temporaries is overkill in 32bit mode. */
20207 if (!TARGET_64BIT && 0)
20209 for (i = 0; i < unroll; i++)
20214 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
20216 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
20218 emit_move_insn (destmem, srcmem);
20224 gcc_assert (unroll <= 4);
20225 for (i = 0; i < unroll; i++)
20227 tmpreg[i] = gen_reg_rtx (mode);
20231 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
20233 emit_move_insn (tmpreg[i], srcmem);
20235 for (i = 0; i < unroll; i++)
20240 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
20242 emit_move_insn (destmem, tmpreg[i]);
20247 for (i = 0; i < unroll; i++)
20251 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
20252 emit_move_insn (destmem, value);
20255 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
20256 true, OPTAB_LIB_WIDEN);
20258 emit_move_insn (iter, tmp);
20260 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
20262 if (expected_size != -1)
20264 expected_size /= GET_MODE_SIZE (mode) * unroll;
20265 if (expected_size == 0)
20267 else if (expected_size > REG_BR_PROB_BASE)
20268 predict_jump (REG_BR_PROB_BASE - 1);
20270 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
20273 predict_jump (REG_BR_PROB_BASE * 80 / 100);
20274 iter = ix86_zero_extend_to_Pmode (iter);
20275 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
20276 true, OPTAB_LIB_WIDEN);
20277 if (tmp != destptr)
20278 emit_move_insn (destptr, tmp);
20281 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
20282 true, OPTAB_LIB_WIDEN);
20284 emit_move_insn (srcptr, tmp);
20286 emit_label (out_label);
20289 /* Output "rep; mov" instruction.
20290 Arguments have same meaning as for previous function */
20292 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
20293 rtx destptr, rtx srcptr,
20295 enum machine_mode mode)
20301 /* If the size is known, it is shorter to use rep movs. */
20302 if (mode == QImode && CONST_INT_P (count)
20303 && !(INTVAL (count) & 3))
20306 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
20307 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
20308 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
20309 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
20310 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
20311 if (mode != QImode)
20313 destexp = gen_rtx_ASHIFT (Pmode, countreg,
20314 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
20315 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
20316 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
20317 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
20318 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
20322 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
20323 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
20325 if (CONST_INT_P (count))
20327 count = GEN_INT (INTVAL (count)
20328 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
20329 destmem = shallow_copy_rtx (destmem);
20330 srcmem = shallow_copy_rtx (srcmem);
20331 set_mem_size (destmem, count);
20332 set_mem_size (srcmem, count);
20336 if (MEM_SIZE (destmem))
20337 set_mem_size (destmem, NULL_RTX);
20338 if (MEM_SIZE (srcmem))
20339 set_mem_size (srcmem, NULL_RTX);
20341 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
20345 /* Output "rep; stos" instruction.
20346 Arguments have same meaning as for previous function */
20348 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
20349 rtx count, enum machine_mode mode,
20355 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
20356 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
20357 value = force_reg (mode, gen_lowpart (mode, value));
20358 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
20359 if (mode != QImode)
20361 destexp = gen_rtx_ASHIFT (Pmode, countreg,
20362 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
20363 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
20366 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
20367 if (orig_value == const0_rtx && CONST_INT_P (count))
20369 count = GEN_INT (INTVAL (count)
20370 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
20371 destmem = shallow_copy_rtx (destmem);
20372 set_mem_size (destmem, count);
20374 else if (MEM_SIZE (destmem))
20375 set_mem_size (destmem, NULL_RTX);
20376 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
20380 emit_strmov (rtx destmem, rtx srcmem,
20381 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
20383 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
20384 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
20385 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20388 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
20390 expand_movmem_epilogue (rtx destmem, rtx srcmem,
20391 rtx destptr, rtx srcptr, rtx count, int max_size)
20394 if (CONST_INT_P (count))
20396 HOST_WIDE_INT countval = INTVAL (count);
20399 if ((countval & 0x10) && max_size > 16)
20403 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
20404 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
20407 gcc_unreachable ();
20410 if ((countval & 0x08) && max_size > 8)
20413 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
20416 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
20417 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
20421 if ((countval & 0x04) && max_size > 4)
20423 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
20426 if ((countval & 0x02) && max_size > 2)
20428 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
20431 if ((countval & 0x01) && max_size > 1)
20433 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
20440 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
20441 count, 1, OPTAB_DIRECT);
20442 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
20443 count, QImode, 1, 4);
20447 /* When there are stringops, we can cheaply increase dest and src pointers.
20448 Otherwise we save code size by maintaining offset (zero is readily
20449 available from preceding rep operation) and using x86 addressing modes.
20451 if (TARGET_SINGLE_STRINGOP)
20455 rtx label = ix86_expand_aligntest (count, 4, true);
20456 src = change_address (srcmem, SImode, srcptr);
20457 dest = change_address (destmem, SImode, destptr);
20458 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20459 emit_label (label);
20460 LABEL_NUSES (label) = 1;
20464 rtx label = ix86_expand_aligntest (count, 2, true);
20465 src = change_address (srcmem, HImode, srcptr);
20466 dest = change_address (destmem, HImode, destptr);
20467 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20468 emit_label (label);
20469 LABEL_NUSES (label) = 1;
20473 rtx label = ix86_expand_aligntest (count, 1, true);
20474 src = change_address (srcmem, QImode, srcptr);
20475 dest = change_address (destmem, QImode, destptr);
20476 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20477 emit_label (label);
20478 LABEL_NUSES (label) = 1;
20483 rtx offset = force_reg (Pmode, const0_rtx);
20488 rtx label = ix86_expand_aligntest (count, 4, true);
20489 src = change_address (srcmem, SImode, srcptr);
20490 dest = change_address (destmem, SImode, destptr);
20491 emit_move_insn (dest, src);
20492 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
20493 true, OPTAB_LIB_WIDEN);
20495 emit_move_insn (offset, tmp);
20496 emit_label (label);
20497 LABEL_NUSES (label) = 1;
20501 rtx label = ix86_expand_aligntest (count, 2, true);
20502 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
20503 src = change_address (srcmem, HImode, tmp);
20504 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
20505 dest = change_address (destmem, HImode, tmp);
20506 emit_move_insn (dest, src);
20507 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
20508 true, OPTAB_LIB_WIDEN);
20510 emit_move_insn (offset, tmp);
20511 emit_label (label);
20512 LABEL_NUSES (label) = 1;
20516 rtx label = ix86_expand_aligntest (count, 1, true);
20517 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
20518 src = change_address (srcmem, QImode, tmp);
20519 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
20520 dest = change_address (destmem, QImode, tmp);
20521 emit_move_insn (dest, src);
20522 emit_label (label);
20523 LABEL_NUSES (label) = 1;
20528 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
20530 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
20531 rtx count, int max_size)
20534 expand_simple_binop (counter_mode (count), AND, count,
20535 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
20536 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
20537 gen_lowpart (QImode, value), count, QImode,
20541 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
20543 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
20547 if (CONST_INT_P (count))
20549 HOST_WIDE_INT countval = INTVAL (count);
20552 if ((countval & 0x10) && max_size > 16)
20556 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
20557 emit_insn (gen_strset (destptr, dest, value));
20558 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
20559 emit_insn (gen_strset (destptr, dest, value));
20562 gcc_unreachable ();
20565 if ((countval & 0x08) && max_size > 8)
20569 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
20570 emit_insn (gen_strset (destptr, dest, value));
20574 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
20575 emit_insn (gen_strset (destptr, dest, value));
20576 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
20577 emit_insn (gen_strset (destptr, dest, value));
20581 if ((countval & 0x04) && max_size > 4)
20583 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
20584 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
20587 if ((countval & 0x02) && max_size > 2)
20589 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
20590 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
20593 if ((countval & 0x01) && max_size > 1)
20595 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
20596 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
20603 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
20608 rtx label = ix86_expand_aligntest (count, 16, true);
20611 dest = change_address (destmem, DImode, destptr);
20612 emit_insn (gen_strset (destptr, dest, value));
20613 emit_insn (gen_strset (destptr, dest, value));
20617 dest = change_address (destmem, SImode, destptr);
20618 emit_insn (gen_strset (destptr, dest, value));
20619 emit_insn (gen_strset (destptr, dest, value));
20620 emit_insn (gen_strset (destptr, dest, value));
20621 emit_insn (gen_strset (destptr, dest, value));
20623 emit_label (label);
20624 LABEL_NUSES (label) = 1;
20628 rtx label = ix86_expand_aligntest (count, 8, true);
20631 dest = change_address (destmem, DImode, destptr);
20632 emit_insn (gen_strset (destptr, dest, value));
20636 dest = change_address (destmem, SImode, destptr);
20637 emit_insn (gen_strset (destptr, dest, value));
20638 emit_insn (gen_strset (destptr, dest, value));
20640 emit_label (label);
20641 LABEL_NUSES (label) = 1;
20645 rtx label = ix86_expand_aligntest (count, 4, true);
20646 dest = change_address (destmem, SImode, destptr);
20647 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
20648 emit_label (label);
20649 LABEL_NUSES (label) = 1;
20653 rtx label = ix86_expand_aligntest (count, 2, true);
20654 dest = change_address (destmem, HImode, destptr);
20655 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
20656 emit_label (label);
20657 LABEL_NUSES (label) = 1;
20661 rtx label = ix86_expand_aligntest (count, 1, true);
20662 dest = change_address (destmem, QImode, destptr);
20663 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
20664 emit_label (label);
20665 LABEL_NUSES (label) = 1;
20669 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
20670 DESIRED_ALIGNMENT. */
20672 expand_movmem_prologue (rtx destmem, rtx srcmem,
20673 rtx destptr, rtx srcptr, rtx count,
20674 int align, int desired_alignment)
20676 if (align <= 1 && desired_alignment > 1)
20678 rtx label = ix86_expand_aligntest (destptr, 1, false);
20679 srcmem = change_address (srcmem, QImode, srcptr);
20680 destmem = change_address (destmem, QImode, destptr);
20681 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
20682 ix86_adjust_counter (count, 1);
20683 emit_label (label);
20684 LABEL_NUSES (label) = 1;
20686 if (align <= 2 && desired_alignment > 2)
20688 rtx label = ix86_expand_aligntest (destptr, 2, false);
20689 srcmem = change_address (srcmem, HImode, srcptr);
20690 destmem = change_address (destmem, HImode, destptr);
20691 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
20692 ix86_adjust_counter (count, 2);
20693 emit_label (label);
20694 LABEL_NUSES (label) = 1;
20696 if (align <= 4 && desired_alignment > 4)
20698 rtx label = ix86_expand_aligntest (destptr, 4, false);
20699 srcmem = change_address (srcmem, SImode, srcptr);
20700 destmem = change_address (destmem, SImode, destptr);
20701 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
20702 ix86_adjust_counter (count, 4);
20703 emit_label (label);
20704 LABEL_NUSES (label) = 1;
20706 gcc_assert (desired_alignment <= 8);
20709 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
20710 ALIGN_BYTES is how many bytes need to be copied. */
20712 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
20713 int desired_align, int align_bytes)
20716 rtx src_size, dst_size;
20718 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
20719 if (src_align_bytes >= 0)
20720 src_align_bytes = desired_align - src_align_bytes;
20721 src_size = MEM_SIZE (src);
20722 dst_size = MEM_SIZE (dst);
20723 if (align_bytes & 1)
20725 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
20726 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
20728 emit_insn (gen_strmov (destreg, dst, srcreg, src));
20730 if (align_bytes & 2)
20732 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
20733 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
20734 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
20735 set_mem_align (dst, 2 * BITS_PER_UNIT);
20736 if (src_align_bytes >= 0
20737 && (src_align_bytes & 1) == (align_bytes & 1)
20738 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
20739 set_mem_align (src, 2 * BITS_PER_UNIT);
20741 emit_insn (gen_strmov (destreg, dst, srcreg, src));
20743 if (align_bytes & 4)
20745 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
20746 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
20747 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
20748 set_mem_align (dst, 4 * BITS_PER_UNIT);
20749 if (src_align_bytes >= 0)
20751 unsigned int src_align = 0;
20752 if ((src_align_bytes & 3) == (align_bytes & 3))
20754 else if ((src_align_bytes & 1) == (align_bytes & 1))
20756 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
20757 set_mem_align (src, src_align * BITS_PER_UNIT);
20760 emit_insn (gen_strmov (destreg, dst, srcreg, src));
20762 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
20763 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
20764 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
20765 set_mem_align (dst, desired_align * BITS_PER_UNIT);
20766 if (src_align_bytes >= 0)
20768 unsigned int src_align = 0;
20769 if ((src_align_bytes & 7) == (align_bytes & 7))
20771 else if ((src_align_bytes & 3) == (align_bytes & 3))
20773 else if ((src_align_bytes & 1) == (align_bytes & 1))
20775 if (src_align > (unsigned int) desired_align)
20776 src_align = desired_align;
20777 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
20778 set_mem_align (src, src_align * BITS_PER_UNIT);
20781 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
20783 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
20788 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
20789 DESIRED_ALIGNMENT. */
20791 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
20792 int align, int desired_alignment)
20794 if (align <= 1 && desired_alignment > 1)
20796 rtx label = ix86_expand_aligntest (destptr, 1, false);
20797 destmem = change_address (destmem, QImode, destptr);
20798 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
20799 ix86_adjust_counter (count, 1);
20800 emit_label (label);
20801 LABEL_NUSES (label) = 1;
20803 if (align <= 2 && desired_alignment > 2)
20805 rtx label = ix86_expand_aligntest (destptr, 2, false);
20806 destmem = change_address (destmem, HImode, destptr);
20807 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
20808 ix86_adjust_counter (count, 2);
20809 emit_label (label);
20810 LABEL_NUSES (label) = 1;
20812 if (align <= 4 && desired_alignment > 4)
20814 rtx label = ix86_expand_aligntest (destptr, 4, false);
20815 destmem = change_address (destmem, SImode, destptr);
20816 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
20817 ix86_adjust_counter (count, 4);
20818 emit_label (label);
20819 LABEL_NUSES (label) = 1;
20821 gcc_assert (desired_alignment <= 8);
20824 /* Set enough from DST to align DST known to by aligned by ALIGN to
20825 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
20827 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
20828 int desired_align, int align_bytes)
20831 rtx dst_size = MEM_SIZE (dst);
20832 if (align_bytes & 1)
20834 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
20836 emit_insn (gen_strset (destreg, dst,
20837 gen_lowpart (QImode, value)));
20839 if (align_bytes & 2)
20841 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
20842 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
20843 set_mem_align (dst, 2 * BITS_PER_UNIT);
20845 emit_insn (gen_strset (destreg, dst,
20846 gen_lowpart (HImode, value)));
20848 if (align_bytes & 4)
20850 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
20851 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
20852 set_mem_align (dst, 4 * BITS_PER_UNIT);
20854 emit_insn (gen_strset (destreg, dst,
20855 gen_lowpart (SImode, value)));
20857 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
20858 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
20859 set_mem_align (dst, desired_align * BITS_PER_UNIT);
20861 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
20865 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
20866 static enum stringop_alg
20867 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
20868 int *dynamic_check)
20870 const struct stringop_algs * algs;
20871 bool optimize_for_speed;
20872 /* Algorithms using the rep prefix want at least edi and ecx;
20873 additionally, memset wants eax and memcpy wants esi. Don't
20874 consider such algorithms if the user has appropriated those
20875 registers for their own purposes. */
20876 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
20878 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
20880 #define ALG_USABLE_P(alg) (rep_prefix_usable \
20881 || (alg != rep_prefix_1_byte \
20882 && alg != rep_prefix_4_byte \
20883 && alg != rep_prefix_8_byte))
20884 const struct processor_costs *cost;
20886 /* Even if the string operation call is cold, we still might spend a lot
20887 of time processing large blocks. */
20888 if (optimize_function_for_size_p (cfun)
20889 || (optimize_insn_for_size_p ()
20890 && expected_size != -1 && expected_size < 256))
20891 optimize_for_speed = false;
20893 optimize_for_speed = true;
20895 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
20897 *dynamic_check = -1;
20899 algs = &cost->memset[TARGET_64BIT != 0];
20901 algs = &cost->memcpy[TARGET_64BIT != 0];
20902 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
20903 return stringop_alg;
20904 /* rep; movq or rep; movl is the smallest variant. */
20905 else if (!optimize_for_speed)
20907 if (!count || (count & 3))
20908 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
20910 return rep_prefix_usable ? rep_prefix_4_byte : loop;
20912 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
20914 else if (expected_size != -1 && expected_size < 4)
20915 return loop_1_byte;
20916 else if (expected_size != -1)
20919 enum stringop_alg alg = libcall;
20920 for (i = 0; i < MAX_STRINGOP_ALGS; i++)
20922 /* We get here if the algorithms that were not libcall-based
20923 were rep-prefix based and we are unable to use rep prefixes
20924 based on global register usage. Break out of the loop and
20925 use the heuristic below. */
20926 if (algs->size[i].max == 0)
20928 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
20930 enum stringop_alg candidate = algs->size[i].alg;
20932 if (candidate != libcall && ALG_USABLE_P (candidate))
20934 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
20935 last non-libcall inline algorithm. */
20936 if (TARGET_INLINE_ALL_STRINGOPS)
20938 /* When the current size is best to be copied by a libcall,
20939 but we are still forced to inline, run the heuristic below
20940 that will pick code for medium sized blocks. */
20941 if (alg != libcall)
20945 else if (ALG_USABLE_P (candidate))
20949 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
20951 /* When asked to inline the call anyway, try to pick meaningful choice.
20952 We look for maximal size of block that is faster to copy by hand and
20953 take blocks of at most of that size guessing that average size will
20954 be roughly half of the block.
20956 If this turns out to be bad, we might simply specify the preferred
20957 choice in ix86_costs. */
20958 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
20959 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
20962 enum stringop_alg alg;
20964 bool any_alg_usable_p = true;
20966 for (i = 0; i < MAX_STRINGOP_ALGS; i++)
20968 enum stringop_alg candidate = algs->size[i].alg;
20969 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
20971 if (candidate != libcall && candidate
20972 && ALG_USABLE_P (candidate))
20973 max = algs->size[i].max;
20975 /* If there aren't any usable algorithms, then recursing on
20976 smaller sizes isn't going to find anything. Just return the
20977 simple byte-at-a-time copy loop. */
20978 if (!any_alg_usable_p)
20980 /* Pick something reasonable. */
20981 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
20982 *dynamic_check = 128;
20983 return loop_1_byte;
20987 alg = decide_alg (count, max / 2, memset, dynamic_check);
20988 gcc_assert (*dynamic_check == -1);
20989 gcc_assert (alg != libcall);
20990 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
20991 *dynamic_check = max;
20994 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
20995 #undef ALG_USABLE_P
20998 /* Decide on alignment. We know that the operand is already aligned to ALIGN
20999 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
21001 decide_alignment (int align,
21002 enum stringop_alg alg,
21005 int desired_align = 0;
21009 gcc_unreachable ();
21011 case unrolled_loop:
21012 desired_align = GET_MODE_SIZE (Pmode);
21014 case rep_prefix_8_byte:
21017 case rep_prefix_4_byte:
21018 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
21019 copying whole cacheline at once. */
21020 if (TARGET_PENTIUMPRO)
21025 case rep_prefix_1_byte:
21026 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
21027 copying whole cacheline at once. */
21028 if (TARGET_PENTIUMPRO)
21042 if (desired_align < align)
21043 desired_align = align;
21044 if (expected_size != -1 && expected_size < 4)
21045 desired_align = align;
21046 return desired_align;
21049 /* Return the smallest power of 2 greater than VAL. */
21051 smallest_pow2_greater_than (int val)
21059 /* Expand string move (memcpy) operation. Use i386 string operations when
21060 profitable. expand_setmem contains similar code. The code depends upon
21061 architecture, block size and alignment, but always has the same
21064 1) Prologue guard: Conditional that jumps up to epilogues for small
21065 blocks that can be handled by epilogue alone. This is faster but
21066 also needed for correctness, since prologue assume the block is larger
21067 than the desired alignment.
21069 Optional dynamic check for size and libcall for large
21070 blocks is emitted here too, with -minline-stringops-dynamically.
21072 2) Prologue: copy first few bytes in order to get destination aligned
21073 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
21074 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
21075 We emit either a jump tree on power of two sized blocks, or a byte loop.
21077 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
21078 with specified algorithm.
21080 4) Epilogue: code copying tail of the block that is too small to be
21081 handled by main body (or up to size guarded by prologue guard). */
21084 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
21085 rtx expected_align_exp, rtx expected_size_exp)
21091 rtx jump_around_label = NULL;
21092 HOST_WIDE_INT align = 1;
21093 unsigned HOST_WIDE_INT count = 0;
21094 HOST_WIDE_INT expected_size = -1;
21095 int size_needed = 0, epilogue_size_needed;
21096 int desired_align = 0, align_bytes = 0;
21097 enum stringop_alg alg;
21099 bool need_zero_guard = false;
21101 if (CONST_INT_P (align_exp))
21102 align = INTVAL (align_exp);
21103 /* i386 can do misaligned access on reasonably increased cost. */
21104 if (CONST_INT_P (expected_align_exp)
21105 && INTVAL (expected_align_exp) > align)
21106 align = INTVAL (expected_align_exp);
21107 /* ALIGN is the minimum of destination and source alignment, but we care here
21108 just about destination alignment. */
21109 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
21110 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
21112 if (CONST_INT_P (count_exp))
21113 count = expected_size = INTVAL (count_exp);
21114 if (CONST_INT_P (expected_size_exp) && count == 0)
21115 expected_size = INTVAL (expected_size_exp);
21117 /* Make sure we don't need to care about overflow later on. */
21118 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
21121 /* Step 0: Decide on preferred algorithm, desired alignment and
21122 size of chunks to be copied by main loop. */
21124 alg = decide_alg (count, expected_size, false, &dynamic_check);
21125 desired_align = decide_alignment (align, alg, expected_size);
21127 if (!TARGET_ALIGN_STRINGOPS)
21128 align = desired_align;
21130 if (alg == libcall)
21132 gcc_assert (alg != no_stringop);
21134 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
21135 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
21136 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
21141 gcc_unreachable ();
21143 need_zero_guard = true;
21144 size_needed = GET_MODE_SIZE (Pmode);
21146 case unrolled_loop:
21147 need_zero_guard = true;
21148 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
21150 case rep_prefix_8_byte:
21153 case rep_prefix_4_byte:
21156 case rep_prefix_1_byte:
21160 need_zero_guard = true;
21165 epilogue_size_needed = size_needed;
21167 /* Step 1: Prologue guard. */
21169 /* Alignment code needs count to be in register. */
21170 if (CONST_INT_P (count_exp) && desired_align > align)
21172 if (INTVAL (count_exp) > desired_align
21173 && INTVAL (count_exp) > size_needed)
21176 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
21177 if (align_bytes <= 0)
21180 align_bytes = desired_align - align_bytes;
21182 if (align_bytes == 0)
21183 count_exp = force_reg (counter_mode (count_exp), count_exp);
21185 gcc_assert (desired_align >= 1 && align >= 1);
21187 /* Ensure that alignment prologue won't copy past end of block. */
21188 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
21190 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
21191 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
21192 Make sure it is power of 2. */
21193 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
21197 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
21199 /* If main algorithm works on QImode, no epilogue is needed.
21200 For small sizes just don't align anything. */
21201 if (size_needed == 1)
21202 desired_align = align;
21209 label = gen_label_rtx ();
21210 emit_cmp_and_jump_insns (count_exp,
21211 GEN_INT (epilogue_size_needed),
21212 LTU, 0, counter_mode (count_exp), 1, label);
21213 if (expected_size == -1 || expected_size < epilogue_size_needed)
21214 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21216 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21220 /* Emit code to decide on runtime whether library call or inline should be
21222 if (dynamic_check != -1)
21224 if (CONST_INT_P (count_exp))
21226 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
21228 emit_block_move_via_libcall (dst, src, count_exp, false);
21229 count_exp = const0_rtx;
21235 rtx hot_label = gen_label_rtx ();
21236 jump_around_label = gen_label_rtx ();
21237 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
21238 LEU, 0, GET_MODE (count_exp), 1, hot_label);
21239 predict_jump (REG_BR_PROB_BASE * 90 / 100);
21240 emit_block_move_via_libcall (dst, src, count_exp, false);
21241 emit_jump (jump_around_label);
21242 emit_label (hot_label);
21246 /* Step 2: Alignment prologue. */
21248 if (desired_align > align)
21250 if (align_bytes == 0)
21252 /* Except for the first move in epilogue, we no longer know
21253 constant offset in aliasing info. It don't seems to worth
21254 the pain to maintain it for the first move, so throw away
21256 src = change_address (src, BLKmode, srcreg);
21257 dst = change_address (dst, BLKmode, destreg);
21258 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
21263 /* If we know how many bytes need to be stored before dst is
21264 sufficiently aligned, maintain aliasing info accurately. */
21265 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
21266 desired_align, align_bytes);
21267 count_exp = plus_constant (count_exp, -align_bytes);
21268 count -= align_bytes;
21270 if (need_zero_guard
21271 && (count < (unsigned HOST_WIDE_INT) size_needed
21272 || (align_bytes == 0
21273 && count < ((unsigned HOST_WIDE_INT) size_needed
21274 + desired_align - align))))
21276 /* It is possible that we copied enough so the main loop will not
21278 gcc_assert (size_needed > 1);
21279 if (label == NULL_RTX)
21280 label = gen_label_rtx ();
21281 emit_cmp_and_jump_insns (count_exp,
21282 GEN_INT (size_needed),
21283 LTU, 0, counter_mode (count_exp), 1, label);
21284 if (expected_size == -1
21285 || expected_size < (desired_align - align) / 2 + size_needed)
21286 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21288 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21291 if (label && size_needed == 1)
21293 emit_label (label);
21294 LABEL_NUSES (label) = 1;
21296 epilogue_size_needed = 1;
21298 else if (label == NULL_RTX)
21299 epilogue_size_needed = size_needed;
21301 /* Step 3: Main loop. */
21307 gcc_unreachable ();
21309 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
21310 count_exp, QImode, 1, expected_size);
21313 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
21314 count_exp, Pmode, 1, expected_size);
21316 case unrolled_loop:
21317 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
21318 registers for 4 temporaries anyway. */
21319 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
21320 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
21323 case rep_prefix_8_byte:
21324 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
21327 case rep_prefix_4_byte:
21328 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
21331 case rep_prefix_1_byte:
21332 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
21336 /* Adjust properly the offset of src and dest memory for aliasing. */
21337 if (CONST_INT_P (count_exp))
21339 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
21340 (count / size_needed) * size_needed);
21341 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
21342 (count / size_needed) * size_needed);
21346 src = change_address (src, BLKmode, srcreg);
21347 dst = change_address (dst, BLKmode, destreg);
21350 /* Step 4: Epilogue to copy the remaining bytes. */
21354 /* When the main loop is done, COUNT_EXP might hold original count,
21355 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
21356 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
21357 bytes. Compensate if needed. */
21359 if (size_needed < epilogue_size_needed)
21362 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
21363 GEN_INT (size_needed - 1), count_exp, 1,
21365 if (tmp != count_exp)
21366 emit_move_insn (count_exp, tmp);
21368 emit_label (label);
21369 LABEL_NUSES (label) = 1;
21372 if (count_exp != const0_rtx && epilogue_size_needed > 1)
21373 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
21374 epilogue_size_needed);
21375 if (jump_around_label)
21376 emit_label (jump_around_label);
21380 /* Helper function for memcpy. For QImode value 0xXY produce
21381 0xXYXYXYXY of wide specified by MODE. This is essentially
21382 a * 0x10101010, but we can do slightly better than
21383 synth_mult by unwinding the sequence by hand on CPUs with
21386 promote_duplicated_reg (enum machine_mode mode, rtx val)
21388 enum machine_mode valmode = GET_MODE (val);
21390 int nops = mode == DImode ? 3 : 2;
21392 gcc_assert (mode == SImode || mode == DImode);
21393 if (val == const0_rtx)
21394 return copy_to_mode_reg (mode, const0_rtx);
21395 if (CONST_INT_P (val))
21397 HOST_WIDE_INT v = INTVAL (val) & 255;
21401 if (mode == DImode)
21402 v |= (v << 16) << 16;
21403 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
21406 if (valmode == VOIDmode)
21408 if (valmode != QImode)
21409 val = gen_lowpart (QImode, val);
21410 if (mode == QImode)
21412 if (!TARGET_PARTIAL_REG_STALL)
21414 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
21415 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
21416 <= (ix86_cost->shift_const + ix86_cost->add) * nops
21417 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
21419 rtx reg = convert_modes (mode, QImode, val, true);
21420 tmp = promote_duplicated_reg (mode, const1_rtx);
21421 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
21426 rtx reg = convert_modes (mode, QImode, val, true);
21428 if (!TARGET_PARTIAL_REG_STALL)
21429 if (mode == SImode)
21430 emit_insn (gen_movsi_insv_1 (reg, reg));
21432 emit_insn (gen_movdi_insv_1 (reg, reg));
21435 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
21436 NULL, 1, OPTAB_DIRECT);
21438 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
21440 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
21441 NULL, 1, OPTAB_DIRECT);
21442 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
21443 if (mode == SImode)
21445 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
21446 NULL, 1, OPTAB_DIRECT);
21447 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
21452 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
21453 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
21454 alignment from ALIGN to DESIRED_ALIGN. */
21456 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
21461 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
21462 promoted_val = promote_duplicated_reg (DImode, val);
21463 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
21464 promoted_val = promote_duplicated_reg (SImode, val);
21465 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
21466 promoted_val = promote_duplicated_reg (HImode, val);
21468 promoted_val = val;
21470 return promoted_val;
21473 /* Expand string clear operation (bzero). Use i386 string operations when
21474 profitable. See expand_movmem comment for explanation of individual
21475 steps performed. */
21477 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
21478 rtx expected_align_exp, rtx expected_size_exp)
21483 rtx jump_around_label = NULL;
21484 HOST_WIDE_INT align = 1;
21485 unsigned HOST_WIDE_INT count = 0;
21486 HOST_WIDE_INT expected_size = -1;
21487 int size_needed = 0, epilogue_size_needed;
21488 int desired_align = 0, align_bytes = 0;
21489 enum stringop_alg alg;
21490 rtx promoted_val = NULL;
21491 bool force_loopy_epilogue = false;
21493 bool need_zero_guard = false;
21495 if (CONST_INT_P (align_exp))
21496 align = INTVAL (align_exp);
21497 /* i386 can do misaligned access on reasonably increased cost. */
21498 if (CONST_INT_P (expected_align_exp)
21499 && INTVAL (expected_align_exp) > align)
21500 align = INTVAL (expected_align_exp);
21501 if (CONST_INT_P (count_exp))
21502 count = expected_size = INTVAL (count_exp);
21503 if (CONST_INT_P (expected_size_exp) && count == 0)
21504 expected_size = INTVAL (expected_size_exp);
21506 /* Make sure we don't need to care about overflow later on. */
21507 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
21510 /* Step 0: Decide on preferred algorithm, desired alignment and
21511 size of chunks to be copied by main loop. */
21513 alg = decide_alg (count, expected_size, true, &dynamic_check);
21514 desired_align = decide_alignment (align, alg, expected_size);
21516 if (!TARGET_ALIGN_STRINGOPS)
21517 align = desired_align;
21519 if (alg == libcall)
21521 gcc_assert (alg != no_stringop);
21523 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
21524 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
21529 gcc_unreachable ();
21531 need_zero_guard = true;
21532 size_needed = GET_MODE_SIZE (Pmode);
21534 case unrolled_loop:
21535 need_zero_guard = true;
21536 size_needed = GET_MODE_SIZE (Pmode) * 4;
21538 case rep_prefix_8_byte:
21541 case rep_prefix_4_byte:
21544 case rep_prefix_1_byte:
21548 need_zero_guard = true;
21552 epilogue_size_needed = size_needed;
21554 /* Step 1: Prologue guard. */
21556 /* Alignment code needs count to be in register. */
21557 if (CONST_INT_P (count_exp) && desired_align > align)
21559 if (INTVAL (count_exp) > desired_align
21560 && INTVAL (count_exp) > size_needed)
21563 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
21564 if (align_bytes <= 0)
21567 align_bytes = desired_align - align_bytes;
21569 if (align_bytes == 0)
21571 enum machine_mode mode = SImode;
21572 if (TARGET_64BIT && (count & ~0xffffffff))
21574 count_exp = force_reg (mode, count_exp);
21577 /* Do the cheap promotion to allow better CSE across the
21578 main loop and epilogue (ie one load of the big constant in the
21579 front of all code. */
21580 if (CONST_INT_P (val_exp))
21581 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
21582 desired_align, align);
21583 /* Ensure that alignment prologue won't copy past end of block. */
21584 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
21586 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
21587 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
21588 Make sure it is power of 2. */
21589 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
21591 /* To improve performance of small blocks, we jump around the VAL
21592 promoting mode. This mean that if the promoted VAL is not constant,
21593 we might not use it in the epilogue and have to use byte
21595 if (epilogue_size_needed > 2 && !promoted_val)
21596 force_loopy_epilogue = true;
21599 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
21601 /* If main algorithm works on QImode, no epilogue is needed.
21602 For small sizes just don't align anything. */
21603 if (size_needed == 1)
21604 desired_align = align;
21611 label = gen_label_rtx ();
21612 emit_cmp_and_jump_insns (count_exp,
21613 GEN_INT (epilogue_size_needed),
21614 LTU, 0, counter_mode (count_exp), 1, label);
21615 if (expected_size == -1 || expected_size <= epilogue_size_needed)
21616 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21618 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21621 if (dynamic_check != -1)
21623 rtx hot_label = gen_label_rtx ();
21624 jump_around_label = gen_label_rtx ();
21625 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
21626 LEU, 0, counter_mode (count_exp), 1, hot_label);
21627 predict_jump (REG_BR_PROB_BASE * 90 / 100);
21628 set_storage_via_libcall (dst, count_exp, val_exp, false);
21629 emit_jump (jump_around_label);
21630 emit_label (hot_label);
21633 /* Step 2: Alignment prologue. */
21635 /* Do the expensive promotion once we branched off the small blocks. */
21637 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
21638 desired_align, align);
21639 gcc_assert (desired_align >= 1 && align >= 1);
21641 if (desired_align > align)
21643 if (align_bytes == 0)
21645 /* Except for the first move in epilogue, we no longer know
21646 constant offset in aliasing info. It don't seems to worth
21647 the pain to maintain it for the first move, so throw away
21649 dst = change_address (dst, BLKmode, destreg);
21650 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
21655 /* If we know how many bytes need to be stored before dst is
21656 sufficiently aligned, maintain aliasing info accurately. */
21657 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
21658 desired_align, align_bytes);
21659 count_exp = plus_constant (count_exp, -align_bytes);
21660 count -= align_bytes;
21662 if (need_zero_guard
21663 && (count < (unsigned HOST_WIDE_INT) size_needed
21664 || (align_bytes == 0
21665 && count < ((unsigned HOST_WIDE_INT) size_needed
21666 + desired_align - align))))
21668 /* It is possible that we copied enough so the main loop will not
21670 gcc_assert (size_needed > 1);
21671 if (label == NULL_RTX)
21672 label = gen_label_rtx ();
21673 emit_cmp_and_jump_insns (count_exp,
21674 GEN_INT (size_needed),
21675 LTU, 0, counter_mode (count_exp), 1, label);
21676 if (expected_size == -1
21677 || expected_size < (desired_align - align) / 2 + size_needed)
21678 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21680 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21683 if (label && size_needed == 1)
21685 emit_label (label);
21686 LABEL_NUSES (label) = 1;
21688 promoted_val = val_exp;
21689 epilogue_size_needed = 1;
21691 else if (label == NULL_RTX)
21692 epilogue_size_needed = size_needed;
21694 /* Step 3: Main loop. */
21700 gcc_unreachable ();
21702 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
21703 count_exp, QImode, 1, expected_size);
21706 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
21707 count_exp, Pmode, 1, expected_size);
21709 case unrolled_loop:
21710 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
21711 count_exp, Pmode, 4, expected_size);
21713 case rep_prefix_8_byte:
21714 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
21717 case rep_prefix_4_byte:
21718 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
21721 case rep_prefix_1_byte:
21722 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
21726 /* Adjust properly the offset of src and dest memory for aliasing. */
21727 if (CONST_INT_P (count_exp))
21728 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
21729 (count / size_needed) * size_needed);
21731 dst = change_address (dst, BLKmode, destreg);
21733 /* Step 4: Epilogue to copy the remaining bytes. */
21737 /* When the main loop is done, COUNT_EXP might hold original count,
21738 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
21739 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
21740 bytes. Compensate if needed. */
21742 if (size_needed < epilogue_size_needed)
21745 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
21746 GEN_INT (size_needed - 1), count_exp, 1,
21748 if (tmp != count_exp)
21749 emit_move_insn (count_exp, tmp);
21751 emit_label (label);
21752 LABEL_NUSES (label) = 1;
21755 if (count_exp != const0_rtx && epilogue_size_needed > 1)
21757 if (force_loopy_epilogue)
21758 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
21759 epilogue_size_needed);
21761 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
21762 epilogue_size_needed);
21764 if (jump_around_label)
21765 emit_label (jump_around_label);
21769 /* Expand the appropriate insns for doing strlen if not just doing
21772 out = result, initialized with the start address
21773 align_rtx = alignment of the address.
21774 scratch = scratch register, initialized with the startaddress when
21775 not aligned, otherwise undefined
21777 This is just the body. It needs the initializations mentioned above and
21778 some address computing at the end. These things are done in i386.md. */
21781 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
21785 rtx align_2_label = NULL_RTX;
21786 rtx align_3_label = NULL_RTX;
21787 rtx align_4_label = gen_label_rtx ();
21788 rtx end_0_label = gen_label_rtx ();
21790 rtx tmpreg = gen_reg_rtx (SImode);
21791 rtx scratch = gen_reg_rtx (SImode);
21795 if (CONST_INT_P (align_rtx))
21796 align = INTVAL (align_rtx);
21798 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
21800 /* Is there a known alignment and is it less than 4? */
21803 rtx scratch1 = gen_reg_rtx (Pmode);
21804 emit_move_insn (scratch1, out);
21805 /* Is there a known alignment and is it not 2? */
21808 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
21809 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
21811 /* Leave just the 3 lower bits. */
21812 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
21813 NULL_RTX, 0, OPTAB_WIDEN);
21815 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
21816 Pmode, 1, align_4_label);
21817 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
21818 Pmode, 1, align_2_label);
21819 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
21820 Pmode, 1, align_3_label);
21824 /* Since the alignment is 2, we have to check 2 or 0 bytes;
21825 check if is aligned to 4 - byte. */
21827 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
21828 NULL_RTX, 0, OPTAB_WIDEN);
21830 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
21831 Pmode, 1, align_4_label);
21834 mem = change_address (src, QImode, out);
21836 /* Now compare the bytes. */
21838 /* Compare the first n unaligned byte on a byte per byte basis. */
21839 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
21840 QImode, 1, end_0_label);
21842 /* Increment the address. */
21843 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
21845 /* Not needed with an alignment of 2 */
21848 emit_label (align_2_label);
21850 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
21853 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
21855 emit_label (align_3_label);
21858 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
21861 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
21864 /* Generate loop to check 4 bytes at a time. It is not a good idea to
21865 align this loop. It gives only huge programs, but does not help to
21867 emit_label (align_4_label);
21869 mem = change_address (src, SImode, out);
21870 emit_move_insn (scratch, mem);
21871 emit_insn (ix86_gen_add3 (out, out, GEN_INT (4)));
21873 /* This formula yields a nonzero result iff one of the bytes is zero.
21874 This saves three branches inside loop and many cycles. */
21876 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
21877 emit_insn (gen_one_cmplsi2 (scratch, scratch));
21878 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
21879 emit_insn (gen_andsi3 (tmpreg, tmpreg,
21880 gen_int_mode (0x80808080, SImode)));
21881 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
21886 rtx reg = gen_reg_rtx (SImode);
21887 rtx reg2 = gen_reg_rtx (Pmode);
21888 emit_move_insn (reg, tmpreg);
21889 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
21891 /* If zero is not in the first two bytes, move two bytes forward. */
21892 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
21893 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21894 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
21895 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
21896 gen_rtx_IF_THEN_ELSE (SImode, tmp,
21899 /* Emit lea manually to avoid clobbering of flags. */
21900 emit_insn (gen_rtx_SET (SImode, reg2,
21901 gen_rtx_PLUS (Pmode, out, const2_rtx)));
21903 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21904 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
21905 emit_insn (gen_rtx_SET (VOIDmode, out,
21906 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
21912 rtx end_2_label = gen_label_rtx ();
21913 /* Is zero in the first two bytes? */
21915 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
21916 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21917 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
21918 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
21919 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
21921 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
21922 JUMP_LABEL (tmp) = end_2_label;
21924 /* Not in the first two. Move two bytes forward. */
21925 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
21926 emit_insn (ix86_gen_add3 (out, out, const2_rtx));
21928 emit_label (end_2_label);
21932 /* Avoid branch in fixing the byte. */
21933 tmpreg = gen_lowpart (QImode, tmpreg);
21934 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
21935 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
21936 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
21937 emit_insn (ix86_gen_sub3_carry (out, out, GEN_INT (3), tmp, cmp));
21939 emit_label (end_0_label);
21942 /* Expand strlen. */
21945 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
21947 rtx addr, scratch1, scratch2, scratch3, scratch4;
21949 /* The generic case of strlen expander is long. Avoid it's
21950 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
21952 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
21953 && !TARGET_INLINE_ALL_STRINGOPS
21954 && !optimize_insn_for_size_p ()
21955 && (!CONST_INT_P (align) || INTVAL (align) < 4))
21958 addr = force_reg (Pmode, XEXP (src, 0));
21959 scratch1 = gen_reg_rtx (Pmode);
21961 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
21962 && !optimize_insn_for_size_p ())
21964 /* Well it seems that some optimizer does not combine a call like
21965 foo(strlen(bar), strlen(bar));
21966 when the move and the subtraction is done here. It does calculate
21967 the length just once when these instructions are done inside of
21968 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
21969 often used and I use one fewer register for the lifetime of
21970 output_strlen_unroll() this is better. */
21972 emit_move_insn (out, addr);
21974 ix86_expand_strlensi_unroll_1 (out, src, align);
21976 /* strlensi_unroll_1 returns the address of the zero at the end of
21977 the string, like memchr(), so compute the length by subtracting
21978 the start address. */
21979 emit_insn (ix86_gen_sub3 (out, out, addr));
21985 /* Can't use this if the user has appropriated eax, ecx, or edi. */
21986 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
21989 scratch2 = gen_reg_rtx (Pmode);
21990 scratch3 = gen_reg_rtx (Pmode);
21991 scratch4 = force_reg (Pmode, constm1_rtx);
21993 emit_move_insn (scratch3, addr);
21994 eoschar = force_reg (QImode, eoschar);
21996 src = replace_equiv_address_nv (src, scratch3);
21998 /* If .md starts supporting :P, this can be done in .md. */
21999 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
22000 scratch4), UNSPEC_SCAS);
22001 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
22002 emit_insn (ix86_gen_one_cmpl2 (scratch2, scratch1));
22003 emit_insn (ix86_gen_add3 (out, scratch2, constm1_rtx));
22008 /* For given symbol (function) construct code to compute address of it's PLT
22009 entry in large x86-64 PIC model. */
22011 construct_plt_address (rtx symbol)
22013 rtx tmp = gen_reg_rtx (Pmode);
22014 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
22016 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
22017 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
22019 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
22020 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
22025 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
22027 rtx pop, int sibcall)
22029 rtx use = NULL, call;
22031 if (pop == const0_rtx)
22033 gcc_assert (!TARGET_64BIT || !pop);
22035 if (TARGET_MACHO && !TARGET_64BIT)
22038 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
22039 fnaddr = machopic_indirect_call_target (fnaddr);
22044 /* Static functions and indirect calls don't need the pic register. */
22045 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
22046 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
22047 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
22048 use_reg (&use, pic_offset_table_rtx);
22051 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
22053 rtx al = gen_rtx_REG (QImode, AX_REG);
22054 emit_move_insn (al, callarg2);
22055 use_reg (&use, al);
22058 if (ix86_cmodel == CM_LARGE_PIC
22060 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
22061 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
22062 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
22064 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
22065 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
22067 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
22068 fnaddr = gen_rtx_MEM (QImode, fnaddr);
22071 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
22073 call = gen_rtx_SET (VOIDmode, retval, call);
22076 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
22077 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
22078 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
22081 && ix86_cfun_abi () == MS_ABI
22082 && (!callarg2 || INTVAL (callarg2) != -2))
22084 /* We need to represent that SI and DI registers are clobbered
22086 static int clobbered_registers[] = {
22087 XMM6_REG, XMM7_REG, XMM8_REG,
22088 XMM9_REG, XMM10_REG, XMM11_REG,
22089 XMM12_REG, XMM13_REG, XMM14_REG,
22090 XMM15_REG, SI_REG, DI_REG
22093 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
22094 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
22095 UNSPEC_MS_TO_SYSV_CALL);
22099 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
22100 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
22103 (SSE_REGNO_P (clobbered_registers[i])
22105 clobbered_registers[i]));
22107 call = gen_rtx_PARALLEL (VOIDmode,
22108 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
22112 /* Add UNSPEC_CALL_NEEDS_VZEROUPPER decoration. */
22113 if (TARGET_VZEROUPPER)
22118 if (cfun->machine->callee_pass_avx256_p)
22120 if (cfun->machine->callee_return_avx256_p)
22121 avx256 = callee_return_pass_avx256;
22123 avx256 = callee_pass_avx256;
22125 else if (cfun->machine->callee_return_avx256_p)
22126 avx256 = callee_return_avx256;
22128 avx256 = call_no_avx256;
22130 if (reload_completed)
22131 emit_insn (gen_avx_vzeroupper (GEN_INT (avx256)));
22134 unspec = gen_rtx_UNSPEC (VOIDmode,
22135 gen_rtvec (1, GEN_INT (avx256)),
22136 UNSPEC_CALL_NEEDS_VZEROUPPER);
22137 call = gen_rtx_PARALLEL (VOIDmode,
22138 gen_rtvec (2, call, unspec));
22142 call = emit_call_insn (call);
22144 CALL_INSN_FUNCTION_USAGE (call) = use;
22150 ix86_split_call_vzeroupper (rtx insn, rtx vzeroupper)
22152 rtx call = XVECEXP (PATTERN (insn), 0, 0);
22153 emit_insn (gen_avx_vzeroupper (vzeroupper));
22154 emit_call_insn (call);
22157 /* Output the assembly for a call instruction. */
22160 ix86_output_call_insn (rtx insn, rtx call_op, int addr_op)
22162 bool direct_p = constant_call_address_operand (call_op, Pmode);
22163 bool seh_nop_p = false;
22165 gcc_assert (addr_op == 0 || addr_op == 1);
22167 if (SIBLING_CALL_P (insn))
22170 return addr_op ? "jmp\t%P1" : "jmp\t%P0";
22171 /* SEH epilogue detection requires the indirect branch case
22172 to include REX.W. */
22173 else if (TARGET_SEH)
22174 return addr_op ? "rex.W jmp %A1" : "rex.W jmp %A0";
22176 return addr_op ? "jmp\t%A1" : "jmp\t%A0";
22179 /* SEH unwinding can require an extra nop to be emitted in several
22180 circumstances. Determine if we have one of those. */
22185 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
22187 /* If we get to another real insn, we don't need the nop. */
22191 /* If we get to the epilogue note, prevent a catch region from
22192 being adjacent to the standard epilogue sequence. If non-
22193 call-exceptions, we'll have done this during epilogue emission. */
22194 if (NOTE_P (i) && NOTE_KIND (i) == NOTE_INSN_EPILOGUE_BEG
22195 && !flag_non_call_exceptions
22196 && !can_throw_internal (insn))
22203 /* If we didn't find a real insn following the call, prevent the
22204 unwinder from looking into the next function. */
22212 return addr_op ? "call\t%P1\n\tnop" : "call\t%P0\n\tnop";
22214 return addr_op ? "call\t%P1" : "call\t%P0";
22219 return addr_op ? "call\t%A1\n\tnop" : "call\t%A0\n\tnop";
22221 return addr_op ? "call\t%A1" : "call\t%A0";
22225 /* Clear stack slot assignments remembered from previous functions.
22226 This is called from INIT_EXPANDERS once before RTL is emitted for each
22229 static struct machine_function *
22230 ix86_init_machine_status (void)
22232 struct machine_function *f;
22234 f = ggc_alloc_cleared_machine_function ();
22235 f->use_fast_prologue_epilogue_nregs = -1;
22236 f->tls_descriptor_call_expanded_p = 0;
22237 f->call_abi = ix86_abi;
22242 /* Return a MEM corresponding to a stack slot with mode MODE.
22243 Allocate a new slot if necessary.
22245 The RTL for a function can have several slots available: N is
22246 which slot to use. */
22249 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
22251 struct stack_local_entry *s;
22253 gcc_assert (n < MAX_386_STACK_LOCALS);
22255 /* Virtual slot is valid only before vregs are instantiated. */
22256 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
22258 for (s = ix86_stack_locals; s; s = s->next)
22259 if (s->mode == mode && s->n == n)
22260 return validize_mem (copy_rtx (s->rtl));
22262 s = ggc_alloc_stack_local_entry ();
22265 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
22267 s->next = ix86_stack_locals;
22268 ix86_stack_locals = s;
22269 return validize_mem (s->rtl);
22272 /* Construct the SYMBOL_REF for the tls_get_addr function. */
22274 static GTY(()) rtx ix86_tls_symbol;
22276 ix86_tls_get_addr (void)
22279 if (!ix86_tls_symbol)
22281 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
22282 (TARGET_ANY_GNU_TLS
22284 ? "___tls_get_addr"
22285 : "__tls_get_addr");
22288 return ix86_tls_symbol;
22291 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
22293 static GTY(()) rtx ix86_tls_module_base_symbol;
22295 ix86_tls_module_base (void)
22298 if (!ix86_tls_module_base_symbol)
22300 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
22301 "_TLS_MODULE_BASE_");
22302 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
22303 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
22306 return ix86_tls_module_base_symbol;
22309 /* Calculate the length of the memory address in the instruction
22310 encoding. Does not include the one-byte modrm, opcode, or prefix. */
22313 memory_address_length (rtx addr)
22315 struct ix86_address parts;
22316 rtx base, index, disp;
22320 if (GET_CODE (addr) == PRE_DEC
22321 || GET_CODE (addr) == POST_INC
22322 || GET_CODE (addr) == PRE_MODIFY
22323 || GET_CODE (addr) == POST_MODIFY)
22326 ok = ix86_decompose_address (addr, &parts);
22329 if (parts.base && GET_CODE (parts.base) == SUBREG)
22330 parts.base = SUBREG_REG (parts.base);
22331 if (parts.index && GET_CODE (parts.index) == SUBREG)
22332 parts.index = SUBREG_REG (parts.index);
22335 index = parts.index;
22340 - esp as the base always wants an index,
22341 - ebp as the base always wants a displacement,
22342 - r12 as the base always wants an index,
22343 - r13 as the base always wants a displacement. */
22345 /* Register Indirect. */
22346 if (base && !index && !disp)
22348 /* esp (for its index) and ebp (for its displacement) need
22349 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
22352 && (addr == arg_pointer_rtx
22353 || addr == frame_pointer_rtx
22354 || REGNO (addr) == SP_REG
22355 || REGNO (addr) == BP_REG
22356 || REGNO (addr) == R12_REG
22357 || REGNO (addr) == R13_REG))
22361 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
22362 is not disp32, but disp32(%rip), so for disp32
22363 SIB byte is needed, unless print_operand_address
22364 optimizes it into disp32(%rip) or (%rip) is implied
22366 else if (disp && !base && !index)
22373 if (GET_CODE (disp) == CONST)
22374 symbol = XEXP (disp, 0);
22375 if (GET_CODE (symbol) == PLUS
22376 && CONST_INT_P (XEXP (symbol, 1)))
22377 symbol = XEXP (symbol, 0);
22379 if (GET_CODE (symbol) != LABEL_REF
22380 && (GET_CODE (symbol) != SYMBOL_REF
22381 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
22382 && (GET_CODE (symbol) != UNSPEC
22383 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
22384 && XINT (symbol, 1) != UNSPEC_PCREL
22385 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
22392 /* Find the length of the displacement constant. */
22395 if (base && satisfies_constraint_K (disp))
22400 /* ebp always wants a displacement. Similarly r13. */
22401 else if (base && REG_P (base)
22402 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
22405 /* An index requires the two-byte modrm form.... */
22407 /* ...like esp (or r12), which always wants an index. */
22408 || base == arg_pointer_rtx
22409 || base == frame_pointer_rtx
22410 || (base && REG_P (base)
22411 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
22428 /* Compute default value for "length_immediate" attribute. When SHORTFORM
22429 is set, expect that insn have 8bit immediate alternative. */
22431 ix86_attr_length_immediate_default (rtx insn, int shortform)
22435 extract_insn_cached (insn);
22436 for (i = recog_data.n_operands - 1; i >= 0; --i)
22437 if (CONSTANT_P (recog_data.operand[i]))
22439 enum attr_mode mode = get_attr_mode (insn);
22442 if (shortform && CONST_INT_P (recog_data.operand[i]))
22444 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
22451 ival = trunc_int_for_mode (ival, HImode);
22454 ival = trunc_int_for_mode (ival, SImode);
22459 if (IN_RANGE (ival, -128, 127))
22476 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
22481 fatal_insn ("unknown insn mode", insn);
22486 /* Compute default value for "length_address" attribute. */
22488 ix86_attr_length_address_default (rtx insn)
22492 if (get_attr_type (insn) == TYPE_LEA)
22494 rtx set = PATTERN (insn), addr;
22496 if (GET_CODE (set) == PARALLEL)
22497 set = XVECEXP (set, 0, 0);
22499 gcc_assert (GET_CODE (set) == SET);
22501 addr = SET_SRC (set);
22502 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
22504 if (GET_CODE (addr) == ZERO_EXTEND)
22505 addr = XEXP (addr, 0);
22506 if (GET_CODE (addr) == SUBREG)
22507 addr = SUBREG_REG (addr);
22510 return memory_address_length (addr);
22513 extract_insn_cached (insn);
22514 for (i = recog_data.n_operands - 1; i >= 0; --i)
22515 if (MEM_P (recog_data.operand[i]))
22517 constrain_operands_cached (reload_completed);
22518 if (which_alternative != -1)
22520 const char *constraints = recog_data.constraints[i];
22521 int alt = which_alternative;
22523 while (*constraints == '=' || *constraints == '+')
22526 while (*constraints++ != ',')
22528 /* Skip ignored operands. */
22529 if (*constraints == 'X')
22532 return memory_address_length (XEXP (recog_data.operand[i], 0));
22537 /* Compute default value for "length_vex" attribute. It includes
22538 2 or 3 byte VEX prefix and 1 opcode byte. */
22541 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
22546 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
22547 byte VEX prefix. */
22548 if (!has_0f_opcode || has_vex_w)
22551 /* We can always use 2 byte VEX prefix in 32bit. */
22555 extract_insn_cached (insn);
22557 for (i = recog_data.n_operands - 1; i >= 0; --i)
22558 if (REG_P (recog_data.operand[i]))
22560 /* REX.W bit uses 3 byte VEX prefix. */
22561 if (GET_MODE (recog_data.operand[i]) == DImode
22562 && GENERAL_REG_P (recog_data.operand[i]))
22567 /* REX.X or REX.B bits use 3 byte VEX prefix. */
22568 if (MEM_P (recog_data.operand[i])
22569 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
22576 /* Return the maximum number of instructions a cpu can issue. */
22579 ix86_issue_rate (void)
22583 case PROCESSOR_PENTIUM:
22584 case PROCESSOR_ATOM:
22588 case PROCESSOR_PENTIUMPRO:
22589 case PROCESSOR_PENTIUM4:
22590 case PROCESSOR_CORE2_32:
22591 case PROCESSOR_CORE2_64:
22592 case PROCESSOR_COREI7_32:
22593 case PROCESSOR_COREI7_64:
22594 case PROCESSOR_ATHLON:
22596 case PROCESSOR_AMDFAM10:
22597 case PROCESSOR_NOCONA:
22598 case PROCESSOR_GENERIC32:
22599 case PROCESSOR_GENERIC64:
22600 case PROCESSOR_BDVER1:
22601 case PROCESSOR_BTVER1:
22609 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
22610 by DEP_INSN and nothing set by DEP_INSN. */
22613 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
22617 /* Simplify the test for uninteresting insns. */
22618 if (insn_type != TYPE_SETCC
22619 && insn_type != TYPE_ICMOV
22620 && insn_type != TYPE_FCMOV
22621 && insn_type != TYPE_IBR)
22624 if ((set = single_set (dep_insn)) != 0)
22626 set = SET_DEST (set);
22629 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
22630 && XVECLEN (PATTERN (dep_insn), 0) == 2
22631 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
22632 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
22634 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
22635 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
22640 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
22643 /* This test is true if the dependent insn reads the flags but
22644 not any other potentially set register. */
22645 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
22648 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
22654 /* Return true iff USE_INSN has a memory address with operands set by
22658 ix86_agi_dependent (rtx set_insn, rtx use_insn)
22661 extract_insn_cached (use_insn);
22662 for (i = recog_data.n_operands - 1; i >= 0; --i)
22663 if (MEM_P (recog_data.operand[i]))
22665 rtx addr = XEXP (recog_data.operand[i], 0);
22666 return modified_in_p (addr, set_insn) != 0;
22672 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
22674 enum attr_type insn_type, dep_insn_type;
22675 enum attr_memory memory;
22677 int dep_insn_code_number;
22679 /* Anti and output dependencies have zero cost on all CPUs. */
22680 if (REG_NOTE_KIND (link) != 0)
22683 dep_insn_code_number = recog_memoized (dep_insn);
22685 /* If we can't recognize the insns, we can't really do anything. */
22686 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
22689 insn_type = get_attr_type (insn);
22690 dep_insn_type = get_attr_type (dep_insn);
22694 case PROCESSOR_PENTIUM:
22695 /* Address Generation Interlock adds a cycle of latency. */
22696 if (insn_type == TYPE_LEA)
22698 rtx addr = PATTERN (insn);
22700 if (GET_CODE (addr) == PARALLEL)
22701 addr = XVECEXP (addr, 0, 0);
22703 gcc_assert (GET_CODE (addr) == SET);
22705 addr = SET_SRC (addr);
22706 if (modified_in_p (addr, dep_insn))
22709 else if (ix86_agi_dependent (dep_insn, insn))
22712 /* ??? Compares pair with jump/setcc. */
22713 if (ix86_flags_dependent (insn, dep_insn, insn_type))
22716 /* Floating point stores require value to be ready one cycle earlier. */
22717 if (insn_type == TYPE_FMOV
22718 && get_attr_memory (insn) == MEMORY_STORE
22719 && !ix86_agi_dependent (dep_insn, insn))
22723 case PROCESSOR_PENTIUMPRO:
22724 memory = get_attr_memory (insn);
22726 /* INT->FP conversion is expensive. */
22727 if (get_attr_fp_int_src (dep_insn))
22730 /* There is one cycle extra latency between an FP op and a store. */
22731 if (insn_type == TYPE_FMOV
22732 && (set = single_set (dep_insn)) != NULL_RTX
22733 && (set2 = single_set (insn)) != NULL_RTX
22734 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
22735 && MEM_P (SET_DEST (set2)))
22738 /* Show ability of reorder buffer to hide latency of load by executing
22739 in parallel with previous instruction in case
22740 previous instruction is not needed to compute the address. */
22741 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
22742 && !ix86_agi_dependent (dep_insn, insn))
22744 /* Claim moves to take one cycle, as core can issue one load
22745 at time and the next load can start cycle later. */
22746 if (dep_insn_type == TYPE_IMOV
22747 || dep_insn_type == TYPE_FMOV)
22755 memory = get_attr_memory (insn);
22757 /* The esp dependency is resolved before the instruction is really
22759 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
22760 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
22763 /* INT->FP conversion is expensive. */
22764 if (get_attr_fp_int_src (dep_insn))
22767 /* Show ability of reorder buffer to hide latency of load by executing
22768 in parallel with previous instruction in case
22769 previous instruction is not needed to compute the address. */
22770 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
22771 && !ix86_agi_dependent (dep_insn, insn))
22773 /* Claim moves to take one cycle, as core can issue one load
22774 at time and the next load can start cycle later. */
22775 if (dep_insn_type == TYPE_IMOV
22776 || dep_insn_type == TYPE_FMOV)
22785 case PROCESSOR_ATHLON:
22787 case PROCESSOR_AMDFAM10:
22788 case PROCESSOR_BDVER1:
22789 case PROCESSOR_BTVER1:
22790 case PROCESSOR_ATOM:
22791 case PROCESSOR_GENERIC32:
22792 case PROCESSOR_GENERIC64:
22793 memory = get_attr_memory (insn);
22795 /* Show ability of reorder buffer to hide latency of load by executing
22796 in parallel with previous instruction in case
22797 previous instruction is not needed to compute the address. */
22798 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
22799 && !ix86_agi_dependent (dep_insn, insn))
22801 enum attr_unit unit = get_attr_unit (insn);
22804 /* Because of the difference between the length of integer and
22805 floating unit pipeline preparation stages, the memory operands
22806 for floating point are cheaper.
22808 ??? For Athlon it the difference is most probably 2. */
22809 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
22812 loadcost = TARGET_ATHLON ? 2 : 0;
22814 if (cost >= loadcost)
22827 /* How many alternative schedules to try. This should be as wide as the
22828 scheduling freedom in the DFA, but no wider. Making this value too
22829 large results extra work for the scheduler. */
22832 ia32_multipass_dfa_lookahead (void)
22836 case PROCESSOR_PENTIUM:
22839 case PROCESSOR_PENTIUMPRO:
22843 case PROCESSOR_CORE2_32:
22844 case PROCESSOR_CORE2_64:
22845 case PROCESSOR_COREI7_32:
22846 case PROCESSOR_COREI7_64:
22847 /* Generally, we want haifa-sched:max_issue() to look ahead as far
22848 as many instructions can be executed on a cycle, i.e.,
22849 issue_rate. I wonder why tuning for many CPUs does not do this. */
22850 return ix86_issue_rate ();
22859 /* Model decoder of Core 2/i7.
22860 Below hooks for multipass scheduling (see haifa-sched.c:max_issue)
22861 track the instruction fetch block boundaries and make sure that long
22862 (9+ bytes) instructions are assigned to D0. */
22864 /* Maximum length of an insn that can be handled by
22865 a secondary decoder unit. '8' for Core 2/i7. */
22866 static int core2i7_secondary_decoder_max_insn_size;
22868 /* Ifetch block size, i.e., number of bytes decoder reads per cycle.
22869 '16' for Core 2/i7. */
22870 static int core2i7_ifetch_block_size;
22872 /* Maximum number of instructions decoder can handle per cycle.
22873 '6' for Core 2/i7. */
22874 static int core2i7_ifetch_block_max_insns;
22876 typedef struct ix86_first_cycle_multipass_data_ *
22877 ix86_first_cycle_multipass_data_t;
22878 typedef const struct ix86_first_cycle_multipass_data_ *
22879 const_ix86_first_cycle_multipass_data_t;
22881 /* A variable to store target state across calls to max_issue within
22883 static struct ix86_first_cycle_multipass_data_ _ix86_first_cycle_multipass_data,
22884 *ix86_first_cycle_multipass_data = &_ix86_first_cycle_multipass_data;
22886 /* Initialize DATA. */
22888 core2i7_first_cycle_multipass_init (void *_data)
22890 ix86_first_cycle_multipass_data_t data
22891 = (ix86_first_cycle_multipass_data_t) _data;
22893 data->ifetch_block_len = 0;
22894 data->ifetch_block_n_insns = 0;
22895 data->ready_try_change = NULL;
22896 data->ready_try_change_size = 0;
22899 /* Advancing the cycle; reset ifetch block counts. */
22901 core2i7_dfa_post_advance_cycle (void)
22903 ix86_first_cycle_multipass_data_t data = ix86_first_cycle_multipass_data;
22905 gcc_assert (data->ifetch_block_n_insns <= core2i7_ifetch_block_max_insns);
22907 data->ifetch_block_len = 0;
22908 data->ifetch_block_n_insns = 0;
22911 static int min_insn_size (rtx);
22913 /* Filter out insns from ready_try that the core will not be able to issue
22914 on current cycle due to decoder. */
22916 core2i7_first_cycle_multipass_filter_ready_try
22917 (const_ix86_first_cycle_multipass_data_t data,
22918 char *ready_try, int n_ready, bool first_cycle_insn_p)
22925 if (ready_try[n_ready])
22928 insn = get_ready_element (n_ready);
22929 insn_size = min_insn_size (insn);
22931 if (/* If this is a too long an insn for a secondary decoder ... */
22932 (!first_cycle_insn_p
22933 && insn_size > core2i7_secondary_decoder_max_insn_size)
22934 /* ... or it would not fit into the ifetch block ... */
22935 || data->ifetch_block_len + insn_size > core2i7_ifetch_block_size
22936 /* ... or the decoder is full already ... */
22937 || data->ifetch_block_n_insns + 1 > core2i7_ifetch_block_max_insns)
22938 /* ... mask the insn out. */
22940 ready_try[n_ready] = 1;
22942 if (data->ready_try_change)
22943 SET_BIT (data->ready_try_change, n_ready);
22948 /* Prepare for a new round of multipass lookahead scheduling. */
22950 core2i7_first_cycle_multipass_begin (void *_data, char *ready_try, int n_ready,
22951 bool first_cycle_insn_p)
22953 ix86_first_cycle_multipass_data_t data
22954 = (ix86_first_cycle_multipass_data_t) _data;
22955 const_ix86_first_cycle_multipass_data_t prev_data
22956 = ix86_first_cycle_multipass_data;
22958 /* Restore the state from the end of the previous round. */
22959 data->ifetch_block_len = prev_data->ifetch_block_len;
22960 data->ifetch_block_n_insns = prev_data->ifetch_block_n_insns;
22962 /* Filter instructions that cannot be issued on current cycle due to
22963 decoder restrictions. */
22964 core2i7_first_cycle_multipass_filter_ready_try (data, ready_try, n_ready,
22965 first_cycle_insn_p);
22968 /* INSN is being issued in current solution. Account for its impact on
22969 the decoder model. */
22971 core2i7_first_cycle_multipass_issue (void *_data, char *ready_try, int n_ready,
22972 rtx insn, const void *_prev_data)
22974 ix86_first_cycle_multipass_data_t data
22975 = (ix86_first_cycle_multipass_data_t) _data;
22976 const_ix86_first_cycle_multipass_data_t prev_data
22977 = (const_ix86_first_cycle_multipass_data_t) _prev_data;
22979 int insn_size = min_insn_size (insn);
22981 data->ifetch_block_len = prev_data->ifetch_block_len + insn_size;
22982 data->ifetch_block_n_insns = prev_data->ifetch_block_n_insns + 1;
22983 gcc_assert (data->ifetch_block_len <= core2i7_ifetch_block_size
22984 && data->ifetch_block_n_insns <= core2i7_ifetch_block_max_insns);
22986 /* Allocate or resize the bitmap for storing INSN's effect on ready_try. */
22987 if (!data->ready_try_change)
22989 data->ready_try_change = sbitmap_alloc (n_ready);
22990 data->ready_try_change_size = n_ready;
22992 else if (data->ready_try_change_size < n_ready)
22994 data->ready_try_change = sbitmap_resize (data->ready_try_change,
22996 data->ready_try_change_size = n_ready;
22998 sbitmap_zero (data->ready_try_change);
23000 /* Filter out insns from ready_try that the core will not be able to issue
23001 on current cycle due to decoder. */
23002 core2i7_first_cycle_multipass_filter_ready_try (data, ready_try, n_ready,
23006 /* Revert the effect on ready_try. */
23008 core2i7_first_cycle_multipass_backtrack (const void *_data,
23010 int n_ready ATTRIBUTE_UNUSED)
23012 const_ix86_first_cycle_multipass_data_t data
23013 = (const_ix86_first_cycle_multipass_data_t) _data;
23014 unsigned int i = 0;
23015 sbitmap_iterator sbi;
23017 gcc_assert (sbitmap_last_set_bit (data->ready_try_change) < n_ready);
23018 EXECUTE_IF_SET_IN_SBITMAP (data->ready_try_change, 0, i, sbi)
23024 /* Save the result of multipass lookahead scheduling for the next round. */
23026 core2i7_first_cycle_multipass_end (const void *_data)
23028 const_ix86_first_cycle_multipass_data_t data
23029 = (const_ix86_first_cycle_multipass_data_t) _data;
23030 ix86_first_cycle_multipass_data_t next_data
23031 = ix86_first_cycle_multipass_data;
23035 next_data->ifetch_block_len = data->ifetch_block_len;
23036 next_data->ifetch_block_n_insns = data->ifetch_block_n_insns;
23040 /* Deallocate target data. */
23042 core2i7_first_cycle_multipass_fini (void *_data)
23044 ix86_first_cycle_multipass_data_t data
23045 = (ix86_first_cycle_multipass_data_t) _data;
23047 if (data->ready_try_change)
23049 sbitmap_free (data->ready_try_change);
23050 data->ready_try_change = NULL;
23051 data->ready_try_change_size = 0;
23055 /* Prepare for scheduling pass. */
23057 ix86_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
23058 int verbose ATTRIBUTE_UNUSED,
23059 int max_uid ATTRIBUTE_UNUSED)
23061 /* Install scheduling hooks for current CPU. Some of these hooks are used
23062 in time-critical parts of the scheduler, so we only set them up when
23063 they are actually used. */
23066 case PROCESSOR_CORE2_32:
23067 case PROCESSOR_CORE2_64:
23068 case PROCESSOR_COREI7_32:
23069 case PROCESSOR_COREI7_64:
23070 targetm.sched.dfa_post_advance_cycle
23071 = core2i7_dfa_post_advance_cycle;
23072 targetm.sched.first_cycle_multipass_init
23073 = core2i7_first_cycle_multipass_init;
23074 targetm.sched.first_cycle_multipass_begin
23075 = core2i7_first_cycle_multipass_begin;
23076 targetm.sched.first_cycle_multipass_issue
23077 = core2i7_first_cycle_multipass_issue;
23078 targetm.sched.first_cycle_multipass_backtrack
23079 = core2i7_first_cycle_multipass_backtrack;
23080 targetm.sched.first_cycle_multipass_end
23081 = core2i7_first_cycle_multipass_end;
23082 targetm.sched.first_cycle_multipass_fini
23083 = core2i7_first_cycle_multipass_fini;
23085 /* Set decoder parameters. */
23086 core2i7_secondary_decoder_max_insn_size = 8;
23087 core2i7_ifetch_block_size = 16;
23088 core2i7_ifetch_block_max_insns = 6;
23092 targetm.sched.dfa_post_advance_cycle = NULL;
23093 targetm.sched.first_cycle_multipass_init = NULL;
23094 targetm.sched.first_cycle_multipass_begin = NULL;
23095 targetm.sched.first_cycle_multipass_issue = NULL;
23096 targetm.sched.first_cycle_multipass_backtrack = NULL;
23097 targetm.sched.first_cycle_multipass_end = NULL;
23098 targetm.sched.first_cycle_multipass_fini = NULL;
23104 /* Compute the alignment given to a constant that is being placed in memory.
23105 EXP is the constant and ALIGN is the alignment that the object would
23107 The value of this function is used instead of that alignment to align
23111 ix86_constant_alignment (tree exp, int align)
23113 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
23114 || TREE_CODE (exp) == INTEGER_CST)
23116 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
23118 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
23121 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
23122 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
23123 return BITS_PER_WORD;
23128 /* Compute the alignment for a static variable.
23129 TYPE is the data type, and ALIGN is the alignment that
23130 the object would ordinarily have. The value of this function is used
23131 instead of that alignment to align the object. */
23134 ix86_data_alignment (tree type, int align)
23136 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
23138 if (AGGREGATE_TYPE_P (type)
23139 && TYPE_SIZE (type)
23140 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
23141 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
23142 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
23143 && align < max_align)
23146 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
23147 to 16byte boundary. */
23150 if (AGGREGATE_TYPE_P (type)
23151 && TYPE_SIZE (type)
23152 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
23153 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
23154 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
23158 if (TREE_CODE (type) == ARRAY_TYPE)
23160 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
23162 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
23165 else if (TREE_CODE (type) == COMPLEX_TYPE)
23168 if (TYPE_MODE (type) == DCmode && align < 64)
23170 if ((TYPE_MODE (type) == XCmode
23171 || TYPE_MODE (type) == TCmode) && align < 128)
23174 else if ((TREE_CODE (type) == RECORD_TYPE
23175 || TREE_CODE (type) == UNION_TYPE
23176 || TREE_CODE (type) == QUAL_UNION_TYPE)
23177 && TYPE_FIELDS (type))
23179 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
23181 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
23184 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
23185 || TREE_CODE (type) == INTEGER_TYPE)
23187 if (TYPE_MODE (type) == DFmode && align < 64)
23189 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
23196 /* Compute the alignment for a local variable or a stack slot. EXP is
23197 the data type or decl itself, MODE is the widest mode available and
23198 ALIGN is the alignment that the object would ordinarily have. The
23199 value of this macro is used instead of that alignment to align the
23203 ix86_local_alignment (tree exp, enum machine_mode mode,
23204 unsigned int align)
23208 if (exp && DECL_P (exp))
23210 type = TREE_TYPE (exp);
23219 /* Don't do dynamic stack realignment for long long objects with
23220 -mpreferred-stack-boundary=2. */
23223 && ix86_preferred_stack_boundary < 64
23224 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
23225 && (!type || !TYPE_USER_ALIGN (type))
23226 && (!decl || !DECL_USER_ALIGN (decl)))
23229 /* If TYPE is NULL, we are allocating a stack slot for caller-save
23230 register in MODE. We will return the largest alignment of XF
23234 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
23235 align = GET_MODE_ALIGNMENT (DFmode);
23239 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
23240 to 16byte boundary. Exact wording is:
23242 An array uses the same alignment as its elements, except that a local or
23243 global array variable of length at least 16 bytes or
23244 a C99 variable-length array variable always has alignment of at least 16 bytes.
23246 This was added to allow use of aligned SSE instructions at arrays. This
23247 rule is meant for static storage (where compiler can not do the analysis
23248 by itself). We follow it for automatic variables only when convenient.
23249 We fully control everything in the function compiled and functions from
23250 other unit can not rely on the alignment.
23252 Exclude va_list type. It is the common case of local array where
23253 we can not benefit from the alignment. */
23254 if (TARGET_64BIT && optimize_function_for_speed_p (cfun)
23257 if (AGGREGATE_TYPE_P (type)
23258 && (va_list_type_node == NULL_TREE
23259 || (TYPE_MAIN_VARIANT (type)
23260 != TYPE_MAIN_VARIANT (va_list_type_node)))
23261 && TYPE_SIZE (type)
23262 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
23263 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
23264 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
23267 if (TREE_CODE (type) == ARRAY_TYPE)
23269 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
23271 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
23274 else if (TREE_CODE (type) == COMPLEX_TYPE)
23276 if (TYPE_MODE (type) == DCmode && align < 64)
23278 if ((TYPE_MODE (type) == XCmode
23279 || TYPE_MODE (type) == TCmode) && align < 128)
23282 else if ((TREE_CODE (type) == RECORD_TYPE
23283 || TREE_CODE (type) == UNION_TYPE
23284 || TREE_CODE (type) == QUAL_UNION_TYPE)
23285 && TYPE_FIELDS (type))
23287 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
23289 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
23292 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
23293 || TREE_CODE (type) == INTEGER_TYPE)
23296 if (TYPE_MODE (type) == DFmode && align < 64)
23298 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
23304 /* Compute the minimum required alignment for dynamic stack realignment
23305 purposes for a local variable, parameter or a stack slot. EXP is
23306 the data type or decl itself, MODE is its mode and ALIGN is the
23307 alignment that the object would ordinarily have. */
23310 ix86_minimum_alignment (tree exp, enum machine_mode mode,
23311 unsigned int align)
23315 if (exp && DECL_P (exp))
23317 type = TREE_TYPE (exp);
23326 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
23329 /* Don't do dynamic stack realignment for long long objects with
23330 -mpreferred-stack-boundary=2. */
23331 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
23332 && (!type || !TYPE_USER_ALIGN (type))
23333 && (!decl || !DECL_USER_ALIGN (decl)))
23339 /* Find a location for the static chain incoming to a nested function.
23340 This is a register, unless all free registers are used by arguments. */
23343 ix86_static_chain (const_tree fndecl, bool incoming_p)
23347 if (!DECL_STATIC_CHAIN (fndecl))
23352 /* We always use R10 in 64-bit mode. */
23358 /* By default in 32-bit mode we use ECX to pass the static chain. */
23361 fntype = TREE_TYPE (fndecl);
23362 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
23364 /* Fastcall functions use ecx/edx for arguments, which leaves
23365 us with EAX for the static chain. */
23368 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
23370 /* Thiscall functions use ecx for arguments, which leaves
23371 us with EAX for the static chain. */
23374 else if (ix86_function_regparm (fntype, fndecl) == 3)
23376 /* For regparm 3, we have no free call-clobbered registers in
23377 which to store the static chain. In order to implement this,
23378 we have the trampoline push the static chain to the stack.
23379 However, we can't push a value below the return address when
23380 we call the nested function directly, so we have to use an
23381 alternate entry point. For this we use ESI, and have the
23382 alternate entry point push ESI, so that things appear the
23383 same once we're executing the nested function. */
23386 if (fndecl == current_function_decl)
23387 ix86_static_chain_on_stack = true;
23388 return gen_frame_mem (SImode,
23389 plus_constant (arg_pointer_rtx, -8));
23395 return gen_rtx_REG (Pmode, regno);
23398 /* Emit RTL insns to initialize the variable parts of a trampoline.
23399 FNDECL is the decl of the target address; M_TRAMP is a MEM for
23400 the trampoline, and CHAIN_VALUE is an RTX for the static chain
23401 to be passed to the target function. */
23404 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
23408 fnaddr = XEXP (DECL_RTL (fndecl), 0);
23415 /* Depending on the static chain location, either load a register
23416 with a constant, or push the constant to the stack. All of the
23417 instructions are the same size. */
23418 chain = ix86_static_chain (fndecl, true);
23421 if (REGNO (chain) == CX_REG)
23423 else if (REGNO (chain) == AX_REG)
23426 gcc_unreachable ();
23431 mem = adjust_address (m_tramp, QImode, 0);
23432 emit_move_insn (mem, gen_int_mode (opcode, QImode));
23434 mem = adjust_address (m_tramp, SImode, 1);
23435 emit_move_insn (mem, chain_value);
23437 /* Compute offset from the end of the jmp to the target function.
23438 In the case in which the trampoline stores the static chain on
23439 the stack, we need to skip the first insn which pushes the
23440 (call-saved) register static chain; this push is 1 byte. */
23441 disp = expand_binop (SImode, sub_optab, fnaddr,
23442 plus_constant (XEXP (m_tramp, 0),
23443 MEM_P (chain) ? 9 : 10),
23444 NULL_RTX, 1, OPTAB_DIRECT);
23446 mem = adjust_address (m_tramp, QImode, 5);
23447 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
23449 mem = adjust_address (m_tramp, SImode, 6);
23450 emit_move_insn (mem, disp);
23456 /* Load the function address to r11. Try to load address using
23457 the shorter movl instead of movabs. We may want to support
23458 movq for kernel mode, but kernel does not use trampolines at
23460 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
23462 fnaddr = copy_to_mode_reg (DImode, fnaddr);
23464 mem = adjust_address (m_tramp, HImode, offset);
23465 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
23467 mem = adjust_address (m_tramp, SImode, offset + 2);
23468 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
23473 mem = adjust_address (m_tramp, HImode, offset);
23474 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
23476 mem = adjust_address (m_tramp, DImode, offset + 2);
23477 emit_move_insn (mem, fnaddr);
23481 /* Load static chain using movabs to r10. */
23482 mem = adjust_address (m_tramp, HImode, offset);
23483 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
23485 mem = adjust_address (m_tramp, DImode, offset + 2);
23486 emit_move_insn (mem, chain_value);
23489 /* Jump to r11; the last (unused) byte is a nop, only there to
23490 pad the write out to a single 32-bit store. */
23491 mem = adjust_address (m_tramp, SImode, offset);
23492 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
23495 gcc_assert (offset <= TRAMPOLINE_SIZE);
23498 #ifdef ENABLE_EXECUTE_STACK
23499 #ifdef CHECK_EXECUTE_STACK_ENABLED
23500 if (CHECK_EXECUTE_STACK_ENABLED)
23502 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
23503 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
23507 /* The following file contains several enumerations and data structures
23508 built from the definitions in i386-builtin-types.def. */
23510 #include "i386-builtin-types.inc"
23512 /* Table for the ix86 builtin non-function types. */
23513 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
23515 /* Retrieve an element from the above table, building some of
23516 the types lazily. */
23519 ix86_get_builtin_type (enum ix86_builtin_type tcode)
23521 unsigned int index;
23524 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
23526 type = ix86_builtin_type_tab[(int) tcode];
23530 gcc_assert (tcode > IX86_BT_LAST_PRIM);
23531 if (tcode <= IX86_BT_LAST_VECT)
23533 enum machine_mode mode;
23535 index = tcode - IX86_BT_LAST_PRIM - 1;
23536 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
23537 mode = ix86_builtin_type_vect_mode[index];
23539 type = build_vector_type_for_mode (itype, mode);
23545 index = tcode - IX86_BT_LAST_VECT - 1;
23546 if (tcode <= IX86_BT_LAST_PTR)
23547 quals = TYPE_UNQUALIFIED;
23549 quals = TYPE_QUAL_CONST;
23551 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
23552 if (quals != TYPE_UNQUALIFIED)
23553 itype = build_qualified_type (itype, quals);
23555 type = build_pointer_type (itype);
23558 ix86_builtin_type_tab[(int) tcode] = type;
23562 /* Table for the ix86 builtin function types. */
23563 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
23565 /* Retrieve an element from the above table, building some of
23566 the types lazily. */
23569 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
23573 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
23575 type = ix86_builtin_func_type_tab[(int) tcode];
23579 if (tcode <= IX86_BT_LAST_FUNC)
23581 unsigned start = ix86_builtin_func_start[(int) tcode];
23582 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
23583 tree rtype, atype, args = void_list_node;
23586 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
23587 for (i = after - 1; i > start; --i)
23589 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
23590 args = tree_cons (NULL, atype, args);
23593 type = build_function_type (rtype, args);
23597 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
23598 enum ix86_builtin_func_type icode;
23600 icode = ix86_builtin_func_alias_base[index];
23601 type = ix86_get_builtin_func_type (icode);
23604 ix86_builtin_func_type_tab[(int) tcode] = type;
23609 /* Codes for all the SSE/MMX builtins. */
23612 IX86_BUILTIN_ADDPS,
23613 IX86_BUILTIN_ADDSS,
23614 IX86_BUILTIN_DIVPS,
23615 IX86_BUILTIN_DIVSS,
23616 IX86_BUILTIN_MULPS,
23617 IX86_BUILTIN_MULSS,
23618 IX86_BUILTIN_SUBPS,
23619 IX86_BUILTIN_SUBSS,
23621 IX86_BUILTIN_CMPEQPS,
23622 IX86_BUILTIN_CMPLTPS,
23623 IX86_BUILTIN_CMPLEPS,
23624 IX86_BUILTIN_CMPGTPS,
23625 IX86_BUILTIN_CMPGEPS,
23626 IX86_BUILTIN_CMPNEQPS,
23627 IX86_BUILTIN_CMPNLTPS,
23628 IX86_BUILTIN_CMPNLEPS,
23629 IX86_BUILTIN_CMPNGTPS,
23630 IX86_BUILTIN_CMPNGEPS,
23631 IX86_BUILTIN_CMPORDPS,
23632 IX86_BUILTIN_CMPUNORDPS,
23633 IX86_BUILTIN_CMPEQSS,
23634 IX86_BUILTIN_CMPLTSS,
23635 IX86_BUILTIN_CMPLESS,
23636 IX86_BUILTIN_CMPNEQSS,
23637 IX86_BUILTIN_CMPNLTSS,
23638 IX86_BUILTIN_CMPNLESS,
23639 IX86_BUILTIN_CMPNGTSS,
23640 IX86_BUILTIN_CMPNGESS,
23641 IX86_BUILTIN_CMPORDSS,
23642 IX86_BUILTIN_CMPUNORDSS,
23644 IX86_BUILTIN_COMIEQSS,
23645 IX86_BUILTIN_COMILTSS,
23646 IX86_BUILTIN_COMILESS,
23647 IX86_BUILTIN_COMIGTSS,
23648 IX86_BUILTIN_COMIGESS,
23649 IX86_BUILTIN_COMINEQSS,
23650 IX86_BUILTIN_UCOMIEQSS,
23651 IX86_BUILTIN_UCOMILTSS,
23652 IX86_BUILTIN_UCOMILESS,
23653 IX86_BUILTIN_UCOMIGTSS,
23654 IX86_BUILTIN_UCOMIGESS,
23655 IX86_BUILTIN_UCOMINEQSS,
23657 IX86_BUILTIN_CVTPI2PS,
23658 IX86_BUILTIN_CVTPS2PI,
23659 IX86_BUILTIN_CVTSI2SS,
23660 IX86_BUILTIN_CVTSI642SS,
23661 IX86_BUILTIN_CVTSS2SI,
23662 IX86_BUILTIN_CVTSS2SI64,
23663 IX86_BUILTIN_CVTTPS2PI,
23664 IX86_BUILTIN_CVTTSS2SI,
23665 IX86_BUILTIN_CVTTSS2SI64,
23667 IX86_BUILTIN_MAXPS,
23668 IX86_BUILTIN_MAXSS,
23669 IX86_BUILTIN_MINPS,
23670 IX86_BUILTIN_MINSS,
23672 IX86_BUILTIN_LOADUPS,
23673 IX86_BUILTIN_STOREUPS,
23674 IX86_BUILTIN_MOVSS,
23676 IX86_BUILTIN_MOVHLPS,
23677 IX86_BUILTIN_MOVLHPS,
23678 IX86_BUILTIN_LOADHPS,
23679 IX86_BUILTIN_LOADLPS,
23680 IX86_BUILTIN_STOREHPS,
23681 IX86_BUILTIN_STORELPS,
23683 IX86_BUILTIN_MASKMOVQ,
23684 IX86_BUILTIN_MOVMSKPS,
23685 IX86_BUILTIN_PMOVMSKB,
23687 IX86_BUILTIN_MOVNTPS,
23688 IX86_BUILTIN_MOVNTQ,
23690 IX86_BUILTIN_LOADDQU,
23691 IX86_BUILTIN_STOREDQU,
23693 IX86_BUILTIN_PACKSSWB,
23694 IX86_BUILTIN_PACKSSDW,
23695 IX86_BUILTIN_PACKUSWB,
23697 IX86_BUILTIN_PADDB,
23698 IX86_BUILTIN_PADDW,
23699 IX86_BUILTIN_PADDD,
23700 IX86_BUILTIN_PADDQ,
23701 IX86_BUILTIN_PADDSB,
23702 IX86_BUILTIN_PADDSW,
23703 IX86_BUILTIN_PADDUSB,
23704 IX86_BUILTIN_PADDUSW,
23705 IX86_BUILTIN_PSUBB,
23706 IX86_BUILTIN_PSUBW,
23707 IX86_BUILTIN_PSUBD,
23708 IX86_BUILTIN_PSUBQ,
23709 IX86_BUILTIN_PSUBSB,
23710 IX86_BUILTIN_PSUBSW,
23711 IX86_BUILTIN_PSUBUSB,
23712 IX86_BUILTIN_PSUBUSW,
23715 IX86_BUILTIN_PANDN,
23719 IX86_BUILTIN_PAVGB,
23720 IX86_BUILTIN_PAVGW,
23722 IX86_BUILTIN_PCMPEQB,
23723 IX86_BUILTIN_PCMPEQW,
23724 IX86_BUILTIN_PCMPEQD,
23725 IX86_BUILTIN_PCMPGTB,
23726 IX86_BUILTIN_PCMPGTW,
23727 IX86_BUILTIN_PCMPGTD,
23729 IX86_BUILTIN_PMADDWD,
23731 IX86_BUILTIN_PMAXSW,
23732 IX86_BUILTIN_PMAXUB,
23733 IX86_BUILTIN_PMINSW,
23734 IX86_BUILTIN_PMINUB,
23736 IX86_BUILTIN_PMULHUW,
23737 IX86_BUILTIN_PMULHW,
23738 IX86_BUILTIN_PMULLW,
23740 IX86_BUILTIN_PSADBW,
23741 IX86_BUILTIN_PSHUFW,
23743 IX86_BUILTIN_PSLLW,
23744 IX86_BUILTIN_PSLLD,
23745 IX86_BUILTIN_PSLLQ,
23746 IX86_BUILTIN_PSRAW,
23747 IX86_BUILTIN_PSRAD,
23748 IX86_BUILTIN_PSRLW,
23749 IX86_BUILTIN_PSRLD,
23750 IX86_BUILTIN_PSRLQ,
23751 IX86_BUILTIN_PSLLWI,
23752 IX86_BUILTIN_PSLLDI,
23753 IX86_BUILTIN_PSLLQI,
23754 IX86_BUILTIN_PSRAWI,
23755 IX86_BUILTIN_PSRADI,
23756 IX86_BUILTIN_PSRLWI,
23757 IX86_BUILTIN_PSRLDI,
23758 IX86_BUILTIN_PSRLQI,
23760 IX86_BUILTIN_PUNPCKHBW,
23761 IX86_BUILTIN_PUNPCKHWD,
23762 IX86_BUILTIN_PUNPCKHDQ,
23763 IX86_BUILTIN_PUNPCKLBW,
23764 IX86_BUILTIN_PUNPCKLWD,
23765 IX86_BUILTIN_PUNPCKLDQ,
23767 IX86_BUILTIN_SHUFPS,
23769 IX86_BUILTIN_RCPPS,
23770 IX86_BUILTIN_RCPSS,
23771 IX86_BUILTIN_RSQRTPS,
23772 IX86_BUILTIN_RSQRTPS_NR,
23773 IX86_BUILTIN_RSQRTSS,
23774 IX86_BUILTIN_RSQRTF,
23775 IX86_BUILTIN_SQRTPS,
23776 IX86_BUILTIN_SQRTPS_NR,
23777 IX86_BUILTIN_SQRTSS,
23779 IX86_BUILTIN_UNPCKHPS,
23780 IX86_BUILTIN_UNPCKLPS,
23782 IX86_BUILTIN_ANDPS,
23783 IX86_BUILTIN_ANDNPS,
23785 IX86_BUILTIN_XORPS,
23788 IX86_BUILTIN_LDMXCSR,
23789 IX86_BUILTIN_STMXCSR,
23790 IX86_BUILTIN_SFENCE,
23792 /* 3DNow! Original */
23793 IX86_BUILTIN_FEMMS,
23794 IX86_BUILTIN_PAVGUSB,
23795 IX86_BUILTIN_PF2ID,
23796 IX86_BUILTIN_PFACC,
23797 IX86_BUILTIN_PFADD,
23798 IX86_BUILTIN_PFCMPEQ,
23799 IX86_BUILTIN_PFCMPGE,
23800 IX86_BUILTIN_PFCMPGT,
23801 IX86_BUILTIN_PFMAX,
23802 IX86_BUILTIN_PFMIN,
23803 IX86_BUILTIN_PFMUL,
23804 IX86_BUILTIN_PFRCP,
23805 IX86_BUILTIN_PFRCPIT1,
23806 IX86_BUILTIN_PFRCPIT2,
23807 IX86_BUILTIN_PFRSQIT1,
23808 IX86_BUILTIN_PFRSQRT,
23809 IX86_BUILTIN_PFSUB,
23810 IX86_BUILTIN_PFSUBR,
23811 IX86_BUILTIN_PI2FD,
23812 IX86_BUILTIN_PMULHRW,
23814 /* 3DNow! Athlon Extensions */
23815 IX86_BUILTIN_PF2IW,
23816 IX86_BUILTIN_PFNACC,
23817 IX86_BUILTIN_PFPNACC,
23818 IX86_BUILTIN_PI2FW,
23819 IX86_BUILTIN_PSWAPDSI,
23820 IX86_BUILTIN_PSWAPDSF,
23823 IX86_BUILTIN_ADDPD,
23824 IX86_BUILTIN_ADDSD,
23825 IX86_BUILTIN_DIVPD,
23826 IX86_BUILTIN_DIVSD,
23827 IX86_BUILTIN_MULPD,
23828 IX86_BUILTIN_MULSD,
23829 IX86_BUILTIN_SUBPD,
23830 IX86_BUILTIN_SUBSD,
23832 IX86_BUILTIN_CMPEQPD,
23833 IX86_BUILTIN_CMPLTPD,
23834 IX86_BUILTIN_CMPLEPD,
23835 IX86_BUILTIN_CMPGTPD,
23836 IX86_BUILTIN_CMPGEPD,
23837 IX86_BUILTIN_CMPNEQPD,
23838 IX86_BUILTIN_CMPNLTPD,
23839 IX86_BUILTIN_CMPNLEPD,
23840 IX86_BUILTIN_CMPNGTPD,
23841 IX86_BUILTIN_CMPNGEPD,
23842 IX86_BUILTIN_CMPORDPD,
23843 IX86_BUILTIN_CMPUNORDPD,
23844 IX86_BUILTIN_CMPEQSD,
23845 IX86_BUILTIN_CMPLTSD,
23846 IX86_BUILTIN_CMPLESD,
23847 IX86_BUILTIN_CMPNEQSD,
23848 IX86_BUILTIN_CMPNLTSD,
23849 IX86_BUILTIN_CMPNLESD,
23850 IX86_BUILTIN_CMPORDSD,
23851 IX86_BUILTIN_CMPUNORDSD,
23853 IX86_BUILTIN_COMIEQSD,
23854 IX86_BUILTIN_COMILTSD,
23855 IX86_BUILTIN_COMILESD,
23856 IX86_BUILTIN_COMIGTSD,
23857 IX86_BUILTIN_COMIGESD,
23858 IX86_BUILTIN_COMINEQSD,
23859 IX86_BUILTIN_UCOMIEQSD,
23860 IX86_BUILTIN_UCOMILTSD,
23861 IX86_BUILTIN_UCOMILESD,
23862 IX86_BUILTIN_UCOMIGTSD,
23863 IX86_BUILTIN_UCOMIGESD,
23864 IX86_BUILTIN_UCOMINEQSD,
23866 IX86_BUILTIN_MAXPD,
23867 IX86_BUILTIN_MAXSD,
23868 IX86_BUILTIN_MINPD,
23869 IX86_BUILTIN_MINSD,
23871 IX86_BUILTIN_ANDPD,
23872 IX86_BUILTIN_ANDNPD,
23874 IX86_BUILTIN_XORPD,
23876 IX86_BUILTIN_SQRTPD,
23877 IX86_BUILTIN_SQRTSD,
23879 IX86_BUILTIN_UNPCKHPD,
23880 IX86_BUILTIN_UNPCKLPD,
23882 IX86_BUILTIN_SHUFPD,
23884 IX86_BUILTIN_LOADUPD,
23885 IX86_BUILTIN_STOREUPD,
23886 IX86_BUILTIN_MOVSD,
23888 IX86_BUILTIN_LOADHPD,
23889 IX86_BUILTIN_LOADLPD,
23891 IX86_BUILTIN_CVTDQ2PD,
23892 IX86_BUILTIN_CVTDQ2PS,
23894 IX86_BUILTIN_CVTPD2DQ,
23895 IX86_BUILTIN_CVTPD2PI,
23896 IX86_BUILTIN_CVTPD2PS,
23897 IX86_BUILTIN_CVTTPD2DQ,
23898 IX86_BUILTIN_CVTTPD2PI,
23900 IX86_BUILTIN_CVTPI2PD,
23901 IX86_BUILTIN_CVTSI2SD,
23902 IX86_BUILTIN_CVTSI642SD,
23904 IX86_BUILTIN_CVTSD2SI,
23905 IX86_BUILTIN_CVTSD2SI64,
23906 IX86_BUILTIN_CVTSD2SS,
23907 IX86_BUILTIN_CVTSS2SD,
23908 IX86_BUILTIN_CVTTSD2SI,
23909 IX86_BUILTIN_CVTTSD2SI64,
23911 IX86_BUILTIN_CVTPS2DQ,
23912 IX86_BUILTIN_CVTPS2PD,
23913 IX86_BUILTIN_CVTTPS2DQ,
23915 IX86_BUILTIN_MOVNTI,
23916 IX86_BUILTIN_MOVNTPD,
23917 IX86_BUILTIN_MOVNTDQ,
23919 IX86_BUILTIN_MOVQ128,
23922 IX86_BUILTIN_MASKMOVDQU,
23923 IX86_BUILTIN_MOVMSKPD,
23924 IX86_BUILTIN_PMOVMSKB128,
23926 IX86_BUILTIN_PACKSSWB128,
23927 IX86_BUILTIN_PACKSSDW128,
23928 IX86_BUILTIN_PACKUSWB128,
23930 IX86_BUILTIN_PADDB128,
23931 IX86_BUILTIN_PADDW128,
23932 IX86_BUILTIN_PADDD128,
23933 IX86_BUILTIN_PADDQ128,
23934 IX86_BUILTIN_PADDSB128,
23935 IX86_BUILTIN_PADDSW128,
23936 IX86_BUILTIN_PADDUSB128,
23937 IX86_BUILTIN_PADDUSW128,
23938 IX86_BUILTIN_PSUBB128,
23939 IX86_BUILTIN_PSUBW128,
23940 IX86_BUILTIN_PSUBD128,
23941 IX86_BUILTIN_PSUBQ128,
23942 IX86_BUILTIN_PSUBSB128,
23943 IX86_BUILTIN_PSUBSW128,
23944 IX86_BUILTIN_PSUBUSB128,
23945 IX86_BUILTIN_PSUBUSW128,
23947 IX86_BUILTIN_PAND128,
23948 IX86_BUILTIN_PANDN128,
23949 IX86_BUILTIN_POR128,
23950 IX86_BUILTIN_PXOR128,
23952 IX86_BUILTIN_PAVGB128,
23953 IX86_BUILTIN_PAVGW128,
23955 IX86_BUILTIN_PCMPEQB128,
23956 IX86_BUILTIN_PCMPEQW128,
23957 IX86_BUILTIN_PCMPEQD128,
23958 IX86_BUILTIN_PCMPGTB128,
23959 IX86_BUILTIN_PCMPGTW128,
23960 IX86_BUILTIN_PCMPGTD128,
23962 IX86_BUILTIN_PMADDWD128,
23964 IX86_BUILTIN_PMAXSW128,
23965 IX86_BUILTIN_PMAXUB128,
23966 IX86_BUILTIN_PMINSW128,
23967 IX86_BUILTIN_PMINUB128,
23969 IX86_BUILTIN_PMULUDQ,
23970 IX86_BUILTIN_PMULUDQ128,
23971 IX86_BUILTIN_PMULHUW128,
23972 IX86_BUILTIN_PMULHW128,
23973 IX86_BUILTIN_PMULLW128,
23975 IX86_BUILTIN_PSADBW128,
23976 IX86_BUILTIN_PSHUFHW,
23977 IX86_BUILTIN_PSHUFLW,
23978 IX86_BUILTIN_PSHUFD,
23980 IX86_BUILTIN_PSLLDQI128,
23981 IX86_BUILTIN_PSLLWI128,
23982 IX86_BUILTIN_PSLLDI128,
23983 IX86_BUILTIN_PSLLQI128,
23984 IX86_BUILTIN_PSRAWI128,
23985 IX86_BUILTIN_PSRADI128,
23986 IX86_BUILTIN_PSRLDQI128,
23987 IX86_BUILTIN_PSRLWI128,
23988 IX86_BUILTIN_PSRLDI128,
23989 IX86_BUILTIN_PSRLQI128,
23991 IX86_BUILTIN_PSLLDQ128,
23992 IX86_BUILTIN_PSLLW128,
23993 IX86_BUILTIN_PSLLD128,
23994 IX86_BUILTIN_PSLLQ128,
23995 IX86_BUILTIN_PSRAW128,
23996 IX86_BUILTIN_PSRAD128,
23997 IX86_BUILTIN_PSRLW128,
23998 IX86_BUILTIN_PSRLD128,
23999 IX86_BUILTIN_PSRLQ128,
24001 IX86_BUILTIN_PUNPCKHBW128,
24002 IX86_BUILTIN_PUNPCKHWD128,
24003 IX86_BUILTIN_PUNPCKHDQ128,
24004 IX86_BUILTIN_PUNPCKHQDQ128,
24005 IX86_BUILTIN_PUNPCKLBW128,
24006 IX86_BUILTIN_PUNPCKLWD128,
24007 IX86_BUILTIN_PUNPCKLDQ128,
24008 IX86_BUILTIN_PUNPCKLQDQ128,
24010 IX86_BUILTIN_CLFLUSH,
24011 IX86_BUILTIN_MFENCE,
24012 IX86_BUILTIN_LFENCE,
24014 IX86_BUILTIN_BSRSI,
24015 IX86_BUILTIN_BSRDI,
24016 IX86_BUILTIN_RDPMC,
24017 IX86_BUILTIN_RDTSC,
24018 IX86_BUILTIN_RDTSCP,
24019 IX86_BUILTIN_ROLQI,
24020 IX86_BUILTIN_ROLHI,
24021 IX86_BUILTIN_RORQI,
24022 IX86_BUILTIN_RORHI,
24025 IX86_BUILTIN_ADDSUBPS,
24026 IX86_BUILTIN_HADDPS,
24027 IX86_BUILTIN_HSUBPS,
24028 IX86_BUILTIN_MOVSHDUP,
24029 IX86_BUILTIN_MOVSLDUP,
24030 IX86_BUILTIN_ADDSUBPD,
24031 IX86_BUILTIN_HADDPD,
24032 IX86_BUILTIN_HSUBPD,
24033 IX86_BUILTIN_LDDQU,
24035 IX86_BUILTIN_MONITOR,
24036 IX86_BUILTIN_MWAIT,
24039 IX86_BUILTIN_PHADDW,
24040 IX86_BUILTIN_PHADDD,
24041 IX86_BUILTIN_PHADDSW,
24042 IX86_BUILTIN_PHSUBW,
24043 IX86_BUILTIN_PHSUBD,
24044 IX86_BUILTIN_PHSUBSW,
24045 IX86_BUILTIN_PMADDUBSW,
24046 IX86_BUILTIN_PMULHRSW,
24047 IX86_BUILTIN_PSHUFB,
24048 IX86_BUILTIN_PSIGNB,
24049 IX86_BUILTIN_PSIGNW,
24050 IX86_BUILTIN_PSIGND,
24051 IX86_BUILTIN_PALIGNR,
24052 IX86_BUILTIN_PABSB,
24053 IX86_BUILTIN_PABSW,
24054 IX86_BUILTIN_PABSD,
24056 IX86_BUILTIN_PHADDW128,
24057 IX86_BUILTIN_PHADDD128,
24058 IX86_BUILTIN_PHADDSW128,
24059 IX86_BUILTIN_PHSUBW128,
24060 IX86_BUILTIN_PHSUBD128,
24061 IX86_BUILTIN_PHSUBSW128,
24062 IX86_BUILTIN_PMADDUBSW128,
24063 IX86_BUILTIN_PMULHRSW128,
24064 IX86_BUILTIN_PSHUFB128,
24065 IX86_BUILTIN_PSIGNB128,
24066 IX86_BUILTIN_PSIGNW128,
24067 IX86_BUILTIN_PSIGND128,
24068 IX86_BUILTIN_PALIGNR128,
24069 IX86_BUILTIN_PABSB128,
24070 IX86_BUILTIN_PABSW128,
24071 IX86_BUILTIN_PABSD128,
24073 /* AMDFAM10 - SSE4A New Instructions. */
24074 IX86_BUILTIN_MOVNTSD,
24075 IX86_BUILTIN_MOVNTSS,
24076 IX86_BUILTIN_EXTRQI,
24077 IX86_BUILTIN_EXTRQ,
24078 IX86_BUILTIN_INSERTQI,
24079 IX86_BUILTIN_INSERTQ,
24082 IX86_BUILTIN_BLENDPD,
24083 IX86_BUILTIN_BLENDPS,
24084 IX86_BUILTIN_BLENDVPD,
24085 IX86_BUILTIN_BLENDVPS,
24086 IX86_BUILTIN_PBLENDVB128,
24087 IX86_BUILTIN_PBLENDW128,
24092 IX86_BUILTIN_INSERTPS128,
24094 IX86_BUILTIN_MOVNTDQA,
24095 IX86_BUILTIN_MPSADBW128,
24096 IX86_BUILTIN_PACKUSDW128,
24097 IX86_BUILTIN_PCMPEQQ,
24098 IX86_BUILTIN_PHMINPOSUW128,
24100 IX86_BUILTIN_PMAXSB128,
24101 IX86_BUILTIN_PMAXSD128,
24102 IX86_BUILTIN_PMAXUD128,
24103 IX86_BUILTIN_PMAXUW128,
24105 IX86_BUILTIN_PMINSB128,
24106 IX86_BUILTIN_PMINSD128,
24107 IX86_BUILTIN_PMINUD128,
24108 IX86_BUILTIN_PMINUW128,
24110 IX86_BUILTIN_PMOVSXBW128,
24111 IX86_BUILTIN_PMOVSXBD128,
24112 IX86_BUILTIN_PMOVSXBQ128,
24113 IX86_BUILTIN_PMOVSXWD128,
24114 IX86_BUILTIN_PMOVSXWQ128,
24115 IX86_BUILTIN_PMOVSXDQ128,
24117 IX86_BUILTIN_PMOVZXBW128,
24118 IX86_BUILTIN_PMOVZXBD128,
24119 IX86_BUILTIN_PMOVZXBQ128,
24120 IX86_BUILTIN_PMOVZXWD128,
24121 IX86_BUILTIN_PMOVZXWQ128,
24122 IX86_BUILTIN_PMOVZXDQ128,
24124 IX86_BUILTIN_PMULDQ128,
24125 IX86_BUILTIN_PMULLD128,
24127 IX86_BUILTIN_ROUNDPD,
24128 IX86_BUILTIN_ROUNDPS,
24129 IX86_BUILTIN_ROUNDSD,
24130 IX86_BUILTIN_ROUNDSS,
24132 IX86_BUILTIN_PTESTZ,
24133 IX86_BUILTIN_PTESTC,
24134 IX86_BUILTIN_PTESTNZC,
24136 IX86_BUILTIN_VEC_INIT_V2SI,
24137 IX86_BUILTIN_VEC_INIT_V4HI,
24138 IX86_BUILTIN_VEC_INIT_V8QI,
24139 IX86_BUILTIN_VEC_EXT_V2DF,
24140 IX86_BUILTIN_VEC_EXT_V2DI,
24141 IX86_BUILTIN_VEC_EXT_V4SF,
24142 IX86_BUILTIN_VEC_EXT_V4SI,
24143 IX86_BUILTIN_VEC_EXT_V8HI,
24144 IX86_BUILTIN_VEC_EXT_V2SI,
24145 IX86_BUILTIN_VEC_EXT_V4HI,
24146 IX86_BUILTIN_VEC_EXT_V16QI,
24147 IX86_BUILTIN_VEC_SET_V2DI,
24148 IX86_BUILTIN_VEC_SET_V4SF,
24149 IX86_BUILTIN_VEC_SET_V4SI,
24150 IX86_BUILTIN_VEC_SET_V8HI,
24151 IX86_BUILTIN_VEC_SET_V4HI,
24152 IX86_BUILTIN_VEC_SET_V16QI,
24154 IX86_BUILTIN_VEC_PACK_SFIX,
24157 IX86_BUILTIN_CRC32QI,
24158 IX86_BUILTIN_CRC32HI,
24159 IX86_BUILTIN_CRC32SI,
24160 IX86_BUILTIN_CRC32DI,
24162 IX86_BUILTIN_PCMPESTRI128,
24163 IX86_BUILTIN_PCMPESTRM128,
24164 IX86_BUILTIN_PCMPESTRA128,
24165 IX86_BUILTIN_PCMPESTRC128,
24166 IX86_BUILTIN_PCMPESTRO128,
24167 IX86_BUILTIN_PCMPESTRS128,
24168 IX86_BUILTIN_PCMPESTRZ128,
24169 IX86_BUILTIN_PCMPISTRI128,
24170 IX86_BUILTIN_PCMPISTRM128,
24171 IX86_BUILTIN_PCMPISTRA128,
24172 IX86_BUILTIN_PCMPISTRC128,
24173 IX86_BUILTIN_PCMPISTRO128,
24174 IX86_BUILTIN_PCMPISTRS128,
24175 IX86_BUILTIN_PCMPISTRZ128,
24177 IX86_BUILTIN_PCMPGTQ,
24179 /* AES instructions */
24180 IX86_BUILTIN_AESENC128,
24181 IX86_BUILTIN_AESENCLAST128,
24182 IX86_BUILTIN_AESDEC128,
24183 IX86_BUILTIN_AESDECLAST128,
24184 IX86_BUILTIN_AESIMC128,
24185 IX86_BUILTIN_AESKEYGENASSIST128,
24187 /* PCLMUL instruction */
24188 IX86_BUILTIN_PCLMULQDQ128,
24191 IX86_BUILTIN_ADDPD256,
24192 IX86_BUILTIN_ADDPS256,
24193 IX86_BUILTIN_ADDSUBPD256,
24194 IX86_BUILTIN_ADDSUBPS256,
24195 IX86_BUILTIN_ANDPD256,
24196 IX86_BUILTIN_ANDPS256,
24197 IX86_BUILTIN_ANDNPD256,
24198 IX86_BUILTIN_ANDNPS256,
24199 IX86_BUILTIN_BLENDPD256,
24200 IX86_BUILTIN_BLENDPS256,
24201 IX86_BUILTIN_BLENDVPD256,
24202 IX86_BUILTIN_BLENDVPS256,
24203 IX86_BUILTIN_DIVPD256,
24204 IX86_BUILTIN_DIVPS256,
24205 IX86_BUILTIN_DPPS256,
24206 IX86_BUILTIN_HADDPD256,
24207 IX86_BUILTIN_HADDPS256,
24208 IX86_BUILTIN_HSUBPD256,
24209 IX86_BUILTIN_HSUBPS256,
24210 IX86_BUILTIN_MAXPD256,
24211 IX86_BUILTIN_MAXPS256,
24212 IX86_BUILTIN_MINPD256,
24213 IX86_BUILTIN_MINPS256,
24214 IX86_BUILTIN_MULPD256,
24215 IX86_BUILTIN_MULPS256,
24216 IX86_BUILTIN_ORPD256,
24217 IX86_BUILTIN_ORPS256,
24218 IX86_BUILTIN_SHUFPD256,
24219 IX86_BUILTIN_SHUFPS256,
24220 IX86_BUILTIN_SUBPD256,
24221 IX86_BUILTIN_SUBPS256,
24222 IX86_BUILTIN_XORPD256,
24223 IX86_BUILTIN_XORPS256,
24224 IX86_BUILTIN_CMPSD,
24225 IX86_BUILTIN_CMPSS,
24226 IX86_BUILTIN_CMPPD,
24227 IX86_BUILTIN_CMPPS,
24228 IX86_BUILTIN_CMPPD256,
24229 IX86_BUILTIN_CMPPS256,
24230 IX86_BUILTIN_CVTDQ2PD256,
24231 IX86_BUILTIN_CVTDQ2PS256,
24232 IX86_BUILTIN_CVTPD2PS256,
24233 IX86_BUILTIN_CVTPS2DQ256,
24234 IX86_BUILTIN_CVTPS2PD256,
24235 IX86_BUILTIN_CVTTPD2DQ256,
24236 IX86_BUILTIN_CVTPD2DQ256,
24237 IX86_BUILTIN_CVTTPS2DQ256,
24238 IX86_BUILTIN_EXTRACTF128PD256,
24239 IX86_BUILTIN_EXTRACTF128PS256,
24240 IX86_BUILTIN_EXTRACTF128SI256,
24241 IX86_BUILTIN_VZEROALL,
24242 IX86_BUILTIN_VZEROUPPER,
24243 IX86_BUILTIN_VPERMILVARPD,
24244 IX86_BUILTIN_VPERMILVARPS,
24245 IX86_BUILTIN_VPERMILVARPD256,
24246 IX86_BUILTIN_VPERMILVARPS256,
24247 IX86_BUILTIN_VPERMILPD,
24248 IX86_BUILTIN_VPERMILPS,
24249 IX86_BUILTIN_VPERMILPD256,
24250 IX86_BUILTIN_VPERMILPS256,
24251 IX86_BUILTIN_VPERMIL2PD,
24252 IX86_BUILTIN_VPERMIL2PS,
24253 IX86_BUILTIN_VPERMIL2PD256,
24254 IX86_BUILTIN_VPERMIL2PS256,
24255 IX86_BUILTIN_VPERM2F128PD256,
24256 IX86_BUILTIN_VPERM2F128PS256,
24257 IX86_BUILTIN_VPERM2F128SI256,
24258 IX86_BUILTIN_VBROADCASTSS,
24259 IX86_BUILTIN_VBROADCASTSD256,
24260 IX86_BUILTIN_VBROADCASTSS256,
24261 IX86_BUILTIN_VBROADCASTPD256,
24262 IX86_BUILTIN_VBROADCASTPS256,
24263 IX86_BUILTIN_VINSERTF128PD256,
24264 IX86_BUILTIN_VINSERTF128PS256,
24265 IX86_BUILTIN_VINSERTF128SI256,
24266 IX86_BUILTIN_LOADUPD256,
24267 IX86_BUILTIN_LOADUPS256,
24268 IX86_BUILTIN_STOREUPD256,
24269 IX86_BUILTIN_STOREUPS256,
24270 IX86_BUILTIN_LDDQU256,
24271 IX86_BUILTIN_MOVNTDQ256,
24272 IX86_BUILTIN_MOVNTPD256,
24273 IX86_BUILTIN_MOVNTPS256,
24274 IX86_BUILTIN_LOADDQU256,
24275 IX86_BUILTIN_STOREDQU256,
24276 IX86_BUILTIN_MASKLOADPD,
24277 IX86_BUILTIN_MASKLOADPS,
24278 IX86_BUILTIN_MASKSTOREPD,
24279 IX86_BUILTIN_MASKSTOREPS,
24280 IX86_BUILTIN_MASKLOADPD256,
24281 IX86_BUILTIN_MASKLOADPS256,
24282 IX86_BUILTIN_MASKSTOREPD256,
24283 IX86_BUILTIN_MASKSTOREPS256,
24284 IX86_BUILTIN_MOVSHDUP256,
24285 IX86_BUILTIN_MOVSLDUP256,
24286 IX86_BUILTIN_MOVDDUP256,
24288 IX86_BUILTIN_SQRTPD256,
24289 IX86_BUILTIN_SQRTPS256,
24290 IX86_BUILTIN_SQRTPS_NR256,
24291 IX86_BUILTIN_RSQRTPS256,
24292 IX86_BUILTIN_RSQRTPS_NR256,
24294 IX86_BUILTIN_RCPPS256,
24296 IX86_BUILTIN_ROUNDPD256,
24297 IX86_BUILTIN_ROUNDPS256,
24299 IX86_BUILTIN_UNPCKHPD256,
24300 IX86_BUILTIN_UNPCKLPD256,
24301 IX86_BUILTIN_UNPCKHPS256,
24302 IX86_BUILTIN_UNPCKLPS256,
24304 IX86_BUILTIN_SI256_SI,
24305 IX86_BUILTIN_PS256_PS,
24306 IX86_BUILTIN_PD256_PD,
24307 IX86_BUILTIN_SI_SI256,
24308 IX86_BUILTIN_PS_PS256,
24309 IX86_BUILTIN_PD_PD256,
24311 IX86_BUILTIN_VTESTZPD,
24312 IX86_BUILTIN_VTESTCPD,
24313 IX86_BUILTIN_VTESTNZCPD,
24314 IX86_BUILTIN_VTESTZPS,
24315 IX86_BUILTIN_VTESTCPS,
24316 IX86_BUILTIN_VTESTNZCPS,
24317 IX86_BUILTIN_VTESTZPD256,
24318 IX86_BUILTIN_VTESTCPD256,
24319 IX86_BUILTIN_VTESTNZCPD256,
24320 IX86_BUILTIN_VTESTZPS256,
24321 IX86_BUILTIN_VTESTCPS256,
24322 IX86_BUILTIN_VTESTNZCPS256,
24323 IX86_BUILTIN_PTESTZ256,
24324 IX86_BUILTIN_PTESTC256,
24325 IX86_BUILTIN_PTESTNZC256,
24327 IX86_BUILTIN_MOVMSKPD256,
24328 IX86_BUILTIN_MOVMSKPS256,
24330 /* TFmode support builtins. */
24332 IX86_BUILTIN_HUGE_VALQ,
24333 IX86_BUILTIN_FABSQ,
24334 IX86_BUILTIN_COPYSIGNQ,
24336 /* Vectorizer support builtins. */
24337 IX86_BUILTIN_CPYSGNPS,
24338 IX86_BUILTIN_CPYSGNPD,
24339 IX86_BUILTIN_CPYSGNPS256,
24340 IX86_BUILTIN_CPYSGNPD256,
24342 IX86_BUILTIN_CVTUDQ2PS,
24344 IX86_BUILTIN_VEC_PERM_V2DF,
24345 IX86_BUILTIN_VEC_PERM_V4SF,
24346 IX86_BUILTIN_VEC_PERM_V2DI,
24347 IX86_BUILTIN_VEC_PERM_V4SI,
24348 IX86_BUILTIN_VEC_PERM_V8HI,
24349 IX86_BUILTIN_VEC_PERM_V16QI,
24350 IX86_BUILTIN_VEC_PERM_V2DI_U,
24351 IX86_BUILTIN_VEC_PERM_V4SI_U,
24352 IX86_BUILTIN_VEC_PERM_V8HI_U,
24353 IX86_BUILTIN_VEC_PERM_V16QI_U,
24354 IX86_BUILTIN_VEC_PERM_V4DF,
24355 IX86_BUILTIN_VEC_PERM_V8SF,
24357 /* FMA4 and XOP instructions. */
24358 IX86_BUILTIN_VFMADDSS,
24359 IX86_BUILTIN_VFMADDSD,
24360 IX86_BUILTIN_VFMADDPS,
24361 IX86_BUILTIN_VFMADDPD,
24362 IX86_BUILTIN_VFMADDPS256,
24363 IX86_BUILTIN_VFMADDPD256,
24364 IX86_BUILTIN_VFMADDSUBPS,
24365 IX86_BUILTIN_VFMADDSUBPD,
24366 IX86_BUILTIN_VFMADDSUBPS256,
24367 IX86_BUILTIN_VFMADDSUBPD256,
24369 IX86_BUILTIN_VPCMOV,
24370 IX86_BUILTIN_VPCMOV_V2DI,
24371 IX86_BUILTIN_VPCMOV_V4SI,
24372 IX86_BUILTIN_VPCMOV_V8HI,
24373 IX86_BUILTIN_VPCMOV_V16QI,
24374 IX86_BUILTIN_VPCMOV_V4SF,
24375 IX86_BUILTIN_VPCMOV_V2DF,
24376 IX86_BUILTIN_VPCMOV256,
24377 IX86_BUILTIN_VPCMOV_V4DI256,
24378 IX86_BUILTIN_VPCMOV_V8SI256,
24379 IX86_BUILTIN_VPCMOV_V16HI256,
24380 IX86_BUILTIN_VPCMOV_V32QI256,
24381 IX86_BUILTIN_VPCMOV_V8SF256,
24382 IX86_BUILTIN_VPCMOV_V4DF256,
24384 IX86_BUILTIN_VPPERM,
24386 IX86_BUILTIN_VPMACSSWW,
24387 IX86_BUILTIN_VPMACSWW,
24388 IX86_BUILTIN_VPMACSSWD,
24389 IX86_BUILTIN_VPMACSWD,
24390 IX86_BUILTIN_VPMACSSDD,
24391 IX86_BUILTIN_VPMACSDD,
24392 IX86_BUILTIN_VPMACSSDQL,
24393 IX86_BUILTIN_VPMACSSDQH,
24394 IX86_BUILTIN_VPMACSDQL,
24395 IX86_BUILTIN_VPMACSDQH,
24396 IX86_BUILTIN_VPMADCSSWD,
24397 IX86_BUILTIN_VPMADCSWD,
24399 IX86_BUILTIN_VPHADDBW,
24400 IX86_BUILTIN_VPHADDBD,
24401 IX86_BUILTIN_VPHADDBQ,
24402 IX86_BUILTIN_VPHADDWD,
24403 IX86_BUILTIN_VPHADDWQ,
24404 IX86_BUILTIN_VPHADDDQ,
24405 IX86_BUILTIN_VPHADDUBW,
24406 IX86_BUILTIN_VPHADDUBD,
24407 IX86_BUILTIN_VPHADDUBQ,
24408 IX86_BUILTIN_VPHADDUWD,
24409 IX86_BUILTIN_VPHADDUWQ,
24410 IX86_BUILTIN_VPHADDUDQ,
24411 IX86_BUILTIN_VPHSUBBW,
24412 IX86_BUILTIN_VPHSUBWD,
24413 IX86_BUILTIN_VPHSUBDQ,
24415 IX86_BUILTIN_VPROTB,
24416 IX86_BUILTIN_VPROTW,
24417 IX86_BUILTIN_VPROTD,
24418 IX86_BUILTIN_VPROTQ,
24419 IX86_BUILTIN_VPROTB_IMM,
24420 IX86_BUILTIN_VPROTW_IMM,
24421 IX86_BUILTIN_VPROTD_IMM,
24422 IX86_BUILTIN_VPROTQ_IMM,
24424 IX86_BUILTIN_VPSHLB,
24425 IX86_BUILTIN_VPSHLW,
24426 IX86_BUILTIN_VPSHLD,
24427 IX86_BUILTIN_VPSHLQ,
24428 IX86_BUILTIN_VPSHAB,
24429 IX86_BUILTIN_VPSHAW,
24430 IX86_BUILTIN_VPSHAD,
24431 IX86_BUILTIN_VPSHAQ,
24433 IX86_BUILTIN_VFRCZSS,
24434 IX86_BUILTIN_VFRCZSD,
24435 IX86_BUILTIN_VFRCZPS,
24436 IX86_BUILTIN_VFRCZPD,
24437 IX86_BUILTIN_VFRCZPS256,
24438 IX86_BUILTIN_VFRCZPD256,
24440 IX86_BUILTIN_VPCOMEQUB,
24441 IX86_BUILTIN_VPCOMNEUB,
24442 IX86_BUILTIN_VPCOMLTUB,
24443 IX86_BUILTIN_VPCOMLEUB,
24444 IX86_BUILTIN_VPCOMGTUB,
24445 IX86_BUILTIN_VPCOMGEUB,
24446 IX86_BUILTIN_VPCOMFALSEUB,
24447 IX86_BUILTIN_VPCOMTRUEUB,
24449 IX86_BUILTIN_VPCOMEQUW,
24450 IX86_BUILTIN_VPCOMNEUW,
24451 IX86_BUILTIN_VPCOMLTUW,
24452 IX86_BUILTIN_VPCOMLEUW,
24453 IX86_BUILTIN_VPCOMGTUW,
24454 IX86_BUILTIN_VPCOMGEUW,
24455 IX86_BUILTIN_VPCOMFALSEUW,
24456 IX86_BUILTIN_VPCOMTRUEUW,
24458 IX86_BUILTIN_VPCOMEQUD,
24459 IX86_BUILTIN_VPCOMNEUD,
24460 IX86_BUILTIN_VPCOMLTUD,
24461 IX86_BUILTIN_VPCOMLEUD,
24462 IX86_BUILTIN_VPCOMGTUD,
24463 IX86_BUILTIN_VPCOMGEUD,
24464 IX86_BUILTIN_VPCOMFALSEUD,
24465 IX86_BUILTIN_VPCOMTRUEUD,
24467 IX86_BUILTIN_VPCOMEQUQ,
24468 IX86_BUILTIN_VPCOMNEUQ,
24469 IX86_BUILTIN_VPCOMLTUQ,
24470 IX86_BUILTIN_VPCOMLEUQ,
24471 IX86_BUILTIN_VPCOMGTUQ,
24472 IX86_BUILTIN_VPCOMGEUQ,
24473 IX86_BUILTIN_VPCOMFALSEUQ,
24474 IX86_BUILTIN_VPCOMTRUEUQ,
24476 IX86_BUILTIN_VPCOMEQB,
24477 IX86_BUILTIN_VPCOMNEB,
24478 IX86_BUILTIN_VPCOMLTB,
24479 IX86_BUILTIN_VPCOMLEB,
24480 IX86_BUILTIN_VPCOMGTB,
24481 IX86_BUILTIN_VPCOMGEB,
24482 IX86_BUILTIN_VPCOMFALSEB,
24483 IX86_BUILTIN_VPCOMTRUEB,
24485 IX86_BUILTIN_VPCOMEQW,
24486 IX86_BUILTIN_VPCOMNEW,
24487 IX86_BUILTIN_VPCOMLTW,
24488 IX86_BUILTIN_VPCOMLEW,
24489 IX86_BUILTIN_VPCOMGTW,
24490 IX86_BUILTIN_VPCOMGEW,
24491 IX86_BUILTIN_VPCOMFALSEW,
24492 IX86_BUILTIN_VPCOMTRUEW,
24494 IX86_BUILTIN_VPCOMEQD,
24495 IX86_BUILTIN_VPCOMNED,
24496 IX86_BUILTIN_VPCOMLTD,
24497 IX86_BUILTIN_VPCOMLED,
24498 IX86_BUILTIN_VPCOMGTD,
24499 IX86_BUILTIN_VPCOMGED,
24500 IX86_BUILTIN_VPCOMFALSED,
24501 IX86_BUILTIN_VPCOMTRUED,
24503 IX86_BUILTIN_VPCOMEQQ,
24504 IX86_BUILTIN_VPCOMNEQ,
24505 IX86_BUILTIN_VPCOMLTQ,
24506 IX86_BUILTIN_VPCOMLEQ,
24507 IX86_BUILTIN_VPCOMGTQ,
24508 IX86_BUILTIN_VPCOMGEQ,
24509 IX86_BUILTIN_VPCOMFALSEQ,
24510 IX86_BUILTIN_VPCOMTRUEQ,
24512 /* LWP instructions. */
24513 IX86_BUILTIN_LLWPCB,
24514 IX86_BUILTIN_SLWPCB,
24515 IX86_BUILTIN_LWPVAL32,
24516 IX86_BUILTIN_LWPVAL64,
24517 IX86_BUILTIN_LWPINS32,
24518 IX86_BUILTIN_LWPINS64,
24522 /* BMI instructions. */
24523 IX86_BUILTIN_BEXTR32,
24524 IX86_BUILTIN_BEXTR64,
24527 /* TBM instructions. */
24528 IX86_BUILTIN_BEXTRI32,
24529 IX86_BUILTIN_BEXTRI64,
24532 /* FSGSBASE instructions. */
24533 IX86_BUILTIN_RDFSBASE32,
24534 IX86_BUILTIN_RDFSBASE64,
24535 IX86_BUILTIN_RDGSBASE32,
24536 IX86_BUILTIN_RDGSBASE64,
24537 IX86_BUILTIN_WRFSBASE32,
24538 IX86_BUILTIN_WRFSBASE64,
24539 IX86_BUILTIN_WRGSBASE32,
24540 IX86_BUILTIN_WRGSBASE64,
24542 /* RDRND instructions. */
24543 IX86_BUILTIN_RDRAND16_STEP,
24544 IX86_BUILTIN_RDRAND32_STEP,
24545 IX86_BUILTIN_RDRAND64_STEP,
24547 /* F16C instructions. */
24548 IX86_BUILTIN_CVTPH2PS,
24549 IX86_BUILTIN_CVTPH2PS256,
24550 IX86_BUILTIN_CVTPS2PH,
24551 IX86_BUILTIN_CVTPS2PH256,
24553 /* CFString built-in for darwin */
24554 IX86_BUILTIN_CFSTRING,
24559 /* Table for the ix86 builtin decls. */
24560 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
24562 /* Table of all of the builtin functions that are possible with different ISA's
24563 but are waiting to be built until a function is declared to use that
24565 struct builtin_isa {
24566 const char *name; /* function name */
24567 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
24568 int isa; /* isa_flags this builtin is defined for */
24569 bool const_p; /* true if the declaration is constant */
24570 bool set_and_not_built_p;
24573 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
24576 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
24577 of which isa_flags to use in the ix86_builtins_isa array. Stores the
24578 function decl in the ix86_builtins array. Returns the function decl or
24579 NULL_TREE, if the builtin was not added.
24581 If the front end has a special hook for builtin functions, delay adding
24582 builtin functions that aren't in the current ISA until the ISA is changed
24583 with function specific optimization. Doing so, can save about 300K for the
24584 default compiler. When the builtin is expanded, check at that time whether
24587 If the front end doesn't have a special hook, record all builtins, even if
24588 it isn't an instruction set in the current ISA in case the user uses
24589 function specific options for a different ISA, so that we don't get scope
24590 errors if a builtin is added in the middle of a function scope. */
24593 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
24594 enum ix86_builtins code)
24596 tree decl = NULL_TREE;
24598 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
24600 ix86_builtins_isa[(int) code].isa = mask;
24602 mask &= ~OPTION_MASK_ISA_64BIT;
24604 || (mask & ix86_isa_flags) != 0
24605 || (lang_hooks.builtin_function
24606 == lang_hooks.builtin_function_ext_scope))
24609 tree type = ix86_get_builtin_func_type (tcode);
24610 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
24612 ix86_builtins[(int) code] = decl;
24613 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
24617 ix86_builtins[(int) code] = NULL_TREE;
24618 ix86_builtins_isa[(int) code].tcode = tcode;
24619 ix86_builtins_isa[(int) code].name = name;
24620 ix86_builtins_isa[(int) code].const_p = false;
24621 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
24628 /* Like def_builtin, but also marks the function decl "const". */
24631 def_builtin_const (int mask, const char *name,
24632 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
24634 tree decl = def_builtin (mask, name, tcode, code);
24636 TREE_READONLY (decl) = 1;
24638 ix86_builtins_isa[(int) code].const_p = true;
24643 /* Add any new builtin functions for a given ISA that may not have been
24644 declared. This saves a bit of space compared to adding all of the
24645 declarations to the tree, even if we didn't use them. */
24648 ix86_add_new_builtins (int isa)
24652 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
24654 if ((ix86_builtins_isa[i].isa & isa) != 0
24655 && ix86_builtins_isa[i].set_and_not_built_p)
24659 /* Don't define the builtin again. */
24660 ix86_builtins_isa[i].set_and_not_built_p = false;
24662 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
24663 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
24664 type, i, BUILT_IN_MD, NULL,
24667 ix86_builtins[i] = decl;
24668 if (ix86_builtins_isa[i].const_p)
24669 TREE_READONLY (decl) = 1;
24674 /* Bits for builtin_description.flag. */
24676 /* Set when we don't support the comparison natively, and should
24677 swap_comparison in order to support it. */
24678 #define BUILTIN_DESC_SWAP_OPERANDS 1
24680 struct builtin_description
24682 const unsigned int mask;
24683 const enum insn_code icode;
24684 const char *const name;
24685 const enum ix86_builtins code;
24686 const enum rtx_code comparison;
24690 static const struct builtin_description bdesc_comi[] =
24692 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
24693 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
24694 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
24695 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
24696 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
24697 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
24698 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
24699 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
24700 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
24701 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
24702 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
24703 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
24704 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
24705 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
24706 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
24707 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
24708 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
24709 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
24710 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
24711 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
24712 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
24713 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
24714 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
24715 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
24718 static const struct builtin_description bdesc_pcmpestr[] =
24721 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
24722 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
24723 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
24724 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
24725 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
24726 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
24727 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
24730 static const struct builtin_description bdesc_pcmpistr[] =
24733 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
24734 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
24735 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
24736 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
24737 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
24738 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
24739 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
24742 /* Special builtins with variable number of arguments. */
24743 static const struct builtin_description bdesc_special_args[] =
24745 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
24746 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
24749 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
24752 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
24755 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
24756 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
24757 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
24759 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
24760 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
24761 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
24762 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
24764 /* SSE or 3DNow!A */
24765 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
24766 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
24769 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
24770 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
24771 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
24772 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
24773 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
24774 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
24775 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
24776 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
24777 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
24779 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
24780 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
24783 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
24786 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
24789 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
24790 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
24793 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
24794 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
24796 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
24797 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
24798 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
24799 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
24800 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
24802 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
24803 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
24804 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
24805 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
24806 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
24807 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
24808 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
24810 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
24811 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
24812 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
24814 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DI },
24815 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SI },
24816 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DI },
24817 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SI },
24818 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DI_V2DF },
24819 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SI_V4SF },
24820 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DI_V4DF },
24821 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SI_V8SF },
24823 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
24824 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
24825 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
24826 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
24827 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
24828 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
24831 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdfsbasesi, "__builtin_ia32_rdfsbase32", IX86_BUILTIN_RDFSBASE32, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
24832 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdfsbasedi, "__builtin_ia32_rdfsbase64", IX86_BUILTIN_RDFSBASE64, UNKNOWN, (int) UINT64_FTYPE_VOID },
24833 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdgsbasesi, "__builtin_ia32_rdgsbase32", IX86_BUILTIN_RDGSBASE32, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
24834 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdgsbasedi, "__builtin_ia32_rdgsbase64", IX86_BUILTIN_RDGSBASE64, UNKNOWN, (int) UINT64_FTYPE_VOID },
24835 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrfsbasesi, "__builtin_ia32_wrfsbase32", IX86_BUILTIN_WRFSBASE32, UNKNOWN, (int) VOID_FTYPE_UNSIGNED },
24836 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrfsbasedi, "__builtin_ia32_wrfsbase64", IX86_BUILTIN_WRFSBASE64, UNKNOWN, (int) VOID_FTYPE_UINT64 },
24837 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrgsbasesi, "__builtin_ia32_wrgsbase32", IX86_BUILTIN_WRGSBASE32, UNKNOWN, (int) VOID_FTYPE_UNSIGNED },
24838 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrgsbasedi, "__builtin_ia32_wrgsbase64", IX86_BUILTIN_WRGSBASE64, UNKNOWN, (int) VOID_FTYPE_UINT64 },
24841 /* Builtins with variable number of arguments. */
24842 static const struct builtin_description bdesc_args[] =
24844 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
24845 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
24846 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
24847 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
24848 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
24849 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
24850 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
24853 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24854 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24855 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24856 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24857 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24858 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24860 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24861 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24862 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24863 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24864 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24865 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24866 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24867 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24869 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24870 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24872 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24873 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24874 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24875 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24877 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24878 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24879 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24880 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24881 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24882 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24884 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24885 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24886 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24887 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24888 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
24889 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
24891 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
24892 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
24893 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
24895 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
24897 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
24898 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
24899 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
24900 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
24901 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
24902 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
24904 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
24905 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
24906 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
24907 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
24908 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
24909 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
24911 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
24912 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
24913 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
24914 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
24917 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
24918 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
24919 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
24920 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
24922 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24923 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24924 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24925 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
24926 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
24927 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
24928 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24929 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24930 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24931 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24932 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24933 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24934 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24935 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24936 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24939 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
24940 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
24941 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
24942 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
24943 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24944 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24947 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
24948 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24949 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24950 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24951 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24952 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24953 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
24954 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
24955 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
24956 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
24957 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
24958 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
24960 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
24962 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24963 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24964 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24965 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24966 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24967 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24968 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24969 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24971 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
24972 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
24973 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
24974 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24975 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24976 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24977 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
24978 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
24979 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
24980 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24981 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
24982 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24983 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
24984 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
24985 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
24986 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24987 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
24988 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
24989 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
24990 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24991 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24992 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24994 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24995 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24996 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24997 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24999 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25000 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25001 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25002 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25004 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25006 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25007 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25008 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25009 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25010 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25012 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
25013 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
25014 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
25016 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
25018 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
25019 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
25020 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
25022 /* SSE MMX or 3Dnow!A */
25023 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25024 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25025 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25027 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25028 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25029 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25030 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25032 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
25033 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
25035 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
25038 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25040 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
25041 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
25042 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
25043 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
25044 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
25045 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
25046 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
25047 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
25048 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
25049 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
25050 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
25051 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
25053 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
25054 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
25055 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
25056 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
25057 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
25058 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
25060 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
25061 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
25062 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
25063 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
25064 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
25066 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
25068 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
25069 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
25070 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
25071 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
25073 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
25074 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
25075 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
25077 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25078 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25079 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25080 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25081 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25082 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25083 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25084 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25086 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
25087 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
25088 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
25089 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
25090 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
25091 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
25092 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
25093 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
25094 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
25095 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
25096 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
25097 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
25098 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
25099 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
25100 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
25101 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
25102 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
25103 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
25104 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
25105 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
25107 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25108 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25109 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25110 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25112 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25113 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25114 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25115 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25117 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25119 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25120 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25121 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25123 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
25125 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25126 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25127 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25128 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25129 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25130 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25131 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25132 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25134 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25135 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25136 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25137 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25138 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25139 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25140 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25141 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25143 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25144 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
25146 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25147 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25148 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25149 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25151 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25152 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25154 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25155 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25156 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25157 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25158 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25159 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25161 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25162 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25163 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25164 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25166 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25167 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25168 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25169 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25170 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25171 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25172 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25173 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25175 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
25176 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
25177 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
25179 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25180 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
25182 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
25183 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
25185 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
25187 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
25188 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
25189 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
25190 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
25192 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
25193 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
25194 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
25195 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
25196 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
25197 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
25198 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
25200 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
25201 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
25202 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
25203 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
25204 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
25205 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
25206 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
25208 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
25209 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
25210 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
25211 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
25213 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
25214 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
25215 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
25217 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
25219 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
25220 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
25222 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
25225 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
25226 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
25229 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
25230 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
25232 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25233 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25234 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25235 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25236 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25237 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25240 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
25241 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
25242 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
25243 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
25244 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
25245 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
25247 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25248 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25249 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25250 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25251 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25252 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25253 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25254 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25255 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25256 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25257 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25258 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25259 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
25260 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
25261 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25262 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25263 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25264 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25265 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25266 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25267 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25268 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25269 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25270 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25273 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
25274 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
25277 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25278 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25279 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
25280 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
25281 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25282 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25283 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25284 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
25285 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
25286 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
25288 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
25289 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
25290 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
25291 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
25292 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
25293 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
25294 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
25295 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
25296 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
25297 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
25298 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
25299 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
25300 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
25302 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
25303 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25304 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25305 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25306 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25307 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25308 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25309 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25310 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25311 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25312 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
25313 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25316 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
25317 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
25318 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25319 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25321 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
25322 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
25323 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
25326 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25327 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
25328 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
25329 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
25330 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
25333 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
25334 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
25335 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
25336 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25339 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
25340 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
25342 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25343 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25344 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25345 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25348 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
25351 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25352 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25353 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25354 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25355 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25356 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25357 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25358 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25359 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25360 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25361 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25362 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25363 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25364 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25365 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25366 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25367 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25368 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25369 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25370 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25371 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25372 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25373 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25374 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25375 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25376 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25378 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
25379 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
25380 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
25381 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
25383 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
25384 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25385 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
25386 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
25387 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25388 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
25389 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25390 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25391 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25392 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25393 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25394 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
25395 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25396 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
25397 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
25398 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
25399 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
25400 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
25401 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
25402 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
25403 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
25404 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
25405 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
25406 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
25407 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
25408 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25409 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
25410 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
25411 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
25412 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
25413 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
25414 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
25415 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
25416 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
25418 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25419 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25420 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
25422 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
25423 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25424 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25425 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25426 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25428 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25430 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
25431 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
25433 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25434 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25435 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25436 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25438 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
25439 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
25440 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
25441 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8si, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
25442 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8sf, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
25443 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v4df, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
25445 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
25446 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
25447 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
25448 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
25449 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
25450 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
25451 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
25452 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
25453 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
25454 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
25455 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
25456 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
25457 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
25458 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
25459 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
25461 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
25462 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
25464 { OPTION_MASK_ISA_AVX, CODE_FOR_copysignv8sf3, "__builtin_ia32_copysignps256", IX86_BUILTIN_CPYSGNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25465 { OPTION_MASK_ISA_AVX, CODE_FOR_copysignv4df3, "__builtin_ia32_copysignpd256", IX86_BUILTIN_CPYSGNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25467 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
25470 { OPTION_MASK_ISA_BMI, CODE_FOR_bmi_bextr_si, "__builtin_ia32_bextr_u32", IX86_BUILTIN_BEXTR32, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
25471 { OPTION_MASK_ISA_BMI, CODE_FOR_bmi_bextr_di, "__builtin_ia32_bextr_u64", IX86_BUILTIN_BEXTR64, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
25472 { OPTION_MASK_ISA_BMI, CODE_FOR_ctzhi2, "__builtin_ctzs", IX86_BUILTIN_CTZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
25475 { OPTION_MASK_ISA_TBM, CODE_FOR_tbm_bextri_si, "__builtin_ia32_bextri_u32", IX86_BUILTIN_BEXTRI32, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
25476 { OPTION_MASK_ISA_TBM, CODE_FOR_tbm_bextri_di, "__builtin_ia32_bextri_u64", IX86_BUILTIN_BEXTRI64, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
25479 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtph2ps, "__builtin_ia32_vcvtph2ps", IX86_BUILTIN_CVTPH2PS, UNKNOWN, (int) V4SF_FTYPE_V8HI },
25480 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtph2ps256, "__builtin_ia32_vcvtph2ps256", IX86_BUILTIN_CVTPH2PS256, UNKNOWN, (int) V8SF_FTYPE_V8HI },
25481 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtps2ph, "__builtin_ia32_vcvtps2ph", IX86_BUILTIN_CVTPS2PH, UNKNOWN, (int) V8HI_FTYPE_V4SF_INT },
25482 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtps2ph256, "__builtin_ia32_vcvtps2ph256", IX86_BUILTIN_CVTPS2PH256, UNKNOWN, (int) V8HI_FTYPE_V8SF_INT },
25485 /* FMA4 and XOP. */
25486 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
25487 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
25488 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
25489 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
25490 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
25491 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
25492 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
25493 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
25494 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
25495 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
25496 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
25497 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
25498 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
25499 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
25500 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
25501 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
25502 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
25503 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
25504 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
25505 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
25506 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
25507 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
25508 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
25509 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
25510 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
25511 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
25512 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
25513 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
25514 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
25515 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
25516 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
25517 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
25518 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
25519 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
25520 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
25521 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
25522 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
25523 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
25524 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
25525 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
25526 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
25527 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
25528 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
25529 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
25530 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
25531 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
25532 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
25533 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
25534 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
25535 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
25536 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
25537 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
25539 static const struct builtin_description bdesc_multi_arg[] =
25541 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmadd_v4sf,
25542 "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS,
25543 UNKNOWN, (int)MULTI_ARG_3_SF },
25544 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmadd_v2df,
25545 "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD,
25546 UNKNOWN, (int)MULTI_ARG_3_DF },
25548 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v4sf,
25549 "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS,
25550 UNKNOWN, (int)MULTI_ARG_3_SF },
25551 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v2df,
25552 "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD,
25553 UNKNOWN, (int)MULTI_ARG_3_DF },
25554 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v8sf,
25555 "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256,
25556 UNKNOWN, (int)MULTI_ARG_3_SF2 },
25557 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v4df,
25558 "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256,
25559 UNKNOWN, (int)MULTI_ARG_3_DF2 },
25561 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v4sf,
25562 "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS,
25563 UNKNOWN, (int)MULTI_ARG_3_SF },
25564 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v2df,
25565 "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD,
25566 UNKNOWN, (int)MULTI_ARG_3_DF },
25567 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v8sf,
25568 "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256,
25569 UNKNOWN, (int)MULTI_ARG_3_SF2 },
25570 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v4df,
25571 "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256,
25572 UNKNOWN, (int)MULTI_ARG_3_DF2 },
25574 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
25575 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
25576 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
25577 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
25578 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
25579 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
25580 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
25582 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
25583 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
25584 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
25585 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
25586 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
25587 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
25588 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
25590 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
25592 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
25593 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
25594 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25595 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25596 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
25597 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
25598 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25599 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25600 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25601 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25602 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25603 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25605 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
25606 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
25607 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
25608 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
25609 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
25610 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
25611 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
25612 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
25613 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
25614 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
25615 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
25616 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
25617 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
25618 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
25619 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
25620 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
25622 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
25623 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
25624 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
25625 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
25626 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
25627 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
25629 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
25630 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
25631 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
25632 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
25633 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
25634 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
25635 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
25636 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
25637 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
25638 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
25639 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
25640 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
25641 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
25642 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
25643 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
25645 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
25646 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
25647 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
25648 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
25649 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
25650 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
25651 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
25653 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
25654 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
25655 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
25656 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
25657 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
25658 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
25659 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
25661 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
25662 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
25663 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
25664 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
25665 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
25666 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
25667 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
25669 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
25670 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
25671 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
25672 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
25673 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
25674 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
25675 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
25677 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
25678 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
25679 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
25680 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
25681 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
25682 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
25683 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
25685 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
25686 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
25687 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
25688 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
25689 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
25690 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
25691 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
25693 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
25694 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
25695 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
25696 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
25697 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
25698 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
25699 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
25701 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
25702 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
25703 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
25704 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
25705 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
25706 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
25707 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
25709 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
25710 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
25711 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
25712 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
25713 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
25714 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
25715 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
25716 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
25718 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
25719 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
25720 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
25721 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
25722 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
25723 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
25724 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
25725 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
25727 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
25728 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
25729 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
25730 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
25734 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
25735 in the current target ISA to allow the user to compile particular modules
25736 with different target specific options that differ from the command line
25739 ix86_init_mmx_sse_builtins (void)
25741 const struct builtin_description * d;
25742 enum ix86_builtin_func_type ftype;
25745 /* Add all special builtins with variable number of operands. */
25746 for (i = 0, d = bdesc_special_args;
25747 i < ARRAY_SIZE (bdesc_special_args);
25753 ftype = (enum ix86_builtin_func_type) d->flag;
25754 def_builtin (d->mask, d->name, ftype, d->code);
25757 /* Add all builtins with variable number of operands. */
25758 for (i = 0, d = bdesc_args;
25759 i < ARRAY_SIZE (bdesc_args);
25765 ftype = (enum ix86_builtin_func_type) d->flag;
25766 def_builtin_const (d->mask, d->name, ftype, d->code);
25769 /* pcmpestr[im] insns. */
25770 for (i = 0, d = bdesc_pcmpestr;
25771 i < ARRAY_SIZE (bdesc_pcmpestr);
25774 if (d->code == IX86_BUILTIN_PCMPESTRM128)
25775 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
25777 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
25778 def_builtin_const (d->mask, d->name, ftype, d->code);
25781 /* pcmpistr[im] insns. */
25782 for (i = 0, d = bdesc_pcmpistr;
25783 i < ARRAY_SIZE (bdesc_pcmpistr);
25786 if (d->code == IX86_BUILTIN_PCMPISTRM128)
25787 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
25789 ftype = INT_FTYPE_V16QI_V16QI_INT;
25790 def_builtin_const (d->mask, d->name, ftype, d->code);
25793 /* comi/ucomi insns. */
25794 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
25796 if (d->mask == OPTION_MASK_ISA_SSE2)
25797 ftype = INT_FTYPE_V2DF_V2DF;
25799 ftype = INT_FTYPE_V4SF_V4SF;
25800 def_builtin_const (d->mask, d->name, ftype, d->code);
25804 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
25805 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
25806 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
25807 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
25809 /* SSE or 3DNow!A */
25810 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
25811 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
25812 IX86_BUILTIN_MASKMOVQ);
25815 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
25816 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
25818 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
25819 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
25820 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
25821 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
25824 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
25825 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
25826 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
25827 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
25830 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
25831 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
25832 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
25833 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
25834 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
25835 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
25836 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
25837 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
25838 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
25839 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
25840 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
25841 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
25844 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
25845 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
25848 def_builtin (OPTION_MASK_ISA_RDRND, "__builtin_ia32_rdrand16_step",
25849 INT_FTYPE_PUSHORT, IX86_BUILTIN_RDRAND16_STEP);
25850 def_builtin (OPTION_MASK_ISA_RDRND, "__builtin_ia32_rdrand32_step",
25851 INT_FTYPE_PUNSIGNED, IX86_BUILTIN_RDRAND32_STEP);
25852 def_builtin (OPTION_MASK_ISA_RDRND | OPTION_MASK_ISA_64BIT,
25853 "__builtin_ia32_rdrand64_step", INT_FTYPE_PULONGLONG,
25854 IX86_BUILTIN_RDRAND64_STEP);
25856 /* MMX access to the vec_init patterns. */
25857 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
25858 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
25860 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
25861 V4HI_FTYPE_HI_HI_HI_HI,
25862 IX86_BUILTIN_VEC_INIT_V4HI);
25864 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
25865 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
25866 IX86_BUILTIN_VEC_INIT_V8QI);
25868 /* Access to the vec_extract patterns. */
25869 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
25870 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
25871 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
25872 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
25873 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
25874 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
25875 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
25876 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
25877 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
25878 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
25880 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
25881 "__builtin_ia32_vec_ext_v4hi",
25882 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
25884 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
25885 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
25887 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
25888 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
25890 /* Access to the vec_set patterns. */
25891 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
25892 "__builtin_ia32_vec_set_v2di",
25893 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
25895 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
25896 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
25898 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
25899 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
25901 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
25902 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
25904 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
25905 "__builtin_ia32_vec_set_v4hi",
25906 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
25908 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
25909 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
25911 /* Add FMA4 multi-arg argument instructions */
25912 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
25917 ftype = (enum ix86_builtin_func_type) d->flag;
25918 def_builtin_const (d->mask, d->name, ftype, d->code);
25922 /* Internal method for ix86_init_builtins. */
25925 ix86_init_builtins_va_builtins_abi (void)
25927 tree ms_va_ref, sysv_va_ref;
25928 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
25929 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
25930 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
25931 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
25935 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
25936 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
25937 ms_va_ref = build_reference_type (ms_va_list_type_node);
25939 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
25942 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
25943 fnvoid_va_start_ms =
25944 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
25945 fnvoid_va_end_sysv =
25946 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
25947 fnvoid_va_start_sysv =
25948 build_varargs_function_type_list (void_type_node, sysv_va_ref,
25950 fnvoid_va_copy_ms =
25951 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
25953 fnvoid_va_copy_sysv =
25954 build_function_type_list (void_type_node, sysv_va_ref,
25955 sysv_va_ref, NULL_TREE);
25957 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
25958 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
25959 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
25960 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
25961 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
25962 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
25963 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
25964 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
25965 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
25966 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
25967 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
25968 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
25972 ix86_init_builtin_types (void)
25974 tree float128_type_node, float80_type_node;
25976 /* The __float80 type. */
25977 float80_type_node = long_double_type_node;
25978 if (TYPE_MODE (float80_type_node) != XFmode)
25980 /* The __float80 type. */
25981 float80_type_node = make_node (REAL_TYPE);
25983 TYPE_PRECISION (float80_type_node) = 80;
25984 layout_type (float80_type_node);
25986 lang_hooks.types.register_builtin_type (float80_type_node, "__float80");
25988 /* The __float128 type. */
25989 float128_type_node = make_node (REAL_TYPE);
25990 TYPE_PRECISION (float128_type_node) = 128;
25991 layout_type (float128_type_node);
25992 lang_hooks.types.register_builtin_type (float128_type_node, "__float128");
25994 /* This macro is built by i386-builtin-types.awk. */
25995 DEFINE_BUILTIN_PRIMITIVE_TYPES;
25999 ix86_init_builtins (void)
26003 ix86_init_builtin_types ();
26005 /* TFmode support builtins. */
26006 def_builtin_const (0, "__builtin_infq",
26007 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
26008 def_builtin_const (0, "__builtin_huge_valq",
26009 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
26011 /* We will expand them to normal call if SSE2 isn't available since
26012 they are used by libgcc. */
26013 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
26014 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
26015 BUILT_IN_MD, "__fabstf2", NULL_TREE);
26016 TREE_READONLY (t) = 1;
26017 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
26019 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
26020 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
26021 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
26022 TREE_READONLY (t) = 1;
26023 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
26025 ix86_init_mmx_sse_builtins ();
26028 ix86_init_builtins_va_builtins_abi ();
26030 #ifdef SUBTARGET_INIT_BUILTINS
26031 SUBTARGET_INIT_BUILTINS;
26035 /* Return the ix86 builtin for CODE. */
26038 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
26040 if (code >= IX86_BUILTIN_MAX)
26041 return error_mark_node;
26043 return ix86_builtins[code];
26046 /* Errors in the source file can cause expand_expr to return const0_rtx
26047 where we expect a vector. To avoid crashing, use one of the vector
26048 clear instructions. */
26050 safe_vector_operand (rtx x, enum machine_mode mode)
26052 if (x == const0_rtx)
26053 x = CONST0_RTX (mode);
26057 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
26060 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
26063 tree arg0 = CALL_EXPR_ARG (exp, 0);
26064 tree arg1 = CALL_EXPR_ARG (exp, 1);
26065 rtx op0 = expand_normal (arg0);
26066 rtx op1 = expand_normal (arg1);
26067 enum machine_mode tmode = insn_data[icode].operand[0].mode;
26068 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
26069 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
26071 if (VECTOR_MODE_P (mode0))
26072 op0 = safe_vector_operand (op0, mode0);
26073 if (VECTOR_MODE_P (mode1))
26074 op1 = safe_vector_operand (op1, mode1);
26076 if (optimize || !target
26077 || GET_MODE (target) != tmode
26078 || !insn_data[icode].operand[0].predicate (target, tmode))
26079 target = gen_reg_rtx (tmode);
26081 if (GET_MODE (op1) == SImode && mode1 == TImode)
26083 rtx x = gen_reg_rtx (V4SImode);
26084 emit_insn (gen_sse2_loadd (x, op1));
26085 op1 = gen_lowpart (TImode, x);
26088 if (!insn_data[icode].operand[1].predicate (op0, mode0))
26089 op0 = copy_to_mode_reg (mode0, op0);
26090 if (!insn_data[icode].operand[2].predicate (op1, mode1))
26091 op1 = copy_to_mode_reg (mode1, op1);
26093 pat = GEN_FCN (icode) (target, op0, op1);
26102 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
26105 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
26106 enum ix86_builtin_func_type m_type,
26107 enum rtx_code sub_code)
26112 bool comparison_p = false;
26114 bool last_arg_constant = false;
26115 int num_memory = 0;
26118 enum machine_mode mode;
26121 enum machine_mode tmode = insn_data[icode].operand[0].mode;
26125 case MULTI_ARG_4_DF2_DI_I:
26126 case MULTI_ARG_4_DF2_DI_I1:
26127 case MULTI_ARG_4_SF2_SI_I:
26128 case MULTI_ARG_4_SF2_SI_I1:
26130 last_arg_constant = true;
26133 case MULTI_ARG_3_SF:
26134 case MULTI_ARG_3_DF:
26135 case MULTI_ARG_3_SF2:
26136 case MULTI_ARG_3_DF2:
26137 case MULTI_ARG_3_DI:
26138 case MULTI_ARG_3_SI:
26139 case MULTI_ARG_3_SI_DI:
26140 case MULTI_ARG_3_HI:
26141 case MULTI_ARG_3_HI_SI:
26142 case MULTI_ARG_3_QI:
26143 case MULTI_ARG_3_DI2:
26144 case MULTI_ARG_3_SI2:
26145 case MULTI_ARG_3_HI2:
26146 case MULTI_ARG_3_QI2:
26150 case MULTI_ARG_2_SF:
26151 case MULTI_ARG_2_DF:
26152 case MULTI_ARG_2_DI:
26153 case MULTI_ARG_2_SI:
26154 case MULTI_ARG_2_HI:
26155 case MULTI_ARG_2_QI:
26159 case MULTI_ARG_2_DI_IMM:
26160 case MULTI_ARG_2_SI_IMM:
26161 case MULTI_ARG_2_HI_IMM:
26162 case MULTI_ARG_2_QI_IMM:
26164 last_arg_constant = true;
26167 case MULTI_ARG_1_SF:
26168 case MULTI_ARG_1_DF:
26169 case MULTI_ARG_1_SF2:
26170 case MULTI_ARG_1_DF2:
26171 case MULTI_ARG_1_DI:
26172 case MULTI_ARG_1_SI:
26173 case MULTI_ARG_1_HI:
26174 case MULTI_ARG_1_QI:
26175 case MULTI_ARG_1_SI_DI:
26176 case MULTI_ARG_1_HI_DI:
26177 case MULTI_ARG_1_HI_SI:
26178 case MULTI_ARG_1_QI_DI:
26179 case MULTI_ARG_1_QI_SI:
26180 case MULTI_ARG_1_QI_HI:
26184 case MULTI_ARG_2_DI_CMP:
26185 case MULTI_ARG_2_SI_CMP:
26186 case MULTI_ARG_2_HI_CMP:
26187 case MULTI_ARG_2_QI_CMP:
26189 comparison_p = true;
26192 case MULTI_ARG_2_SF_TF:
26193 case MULTI_ARG_2_DF_TF:
26194 case MULTI_ARG_2_DI_TF:
26195 case MULTI_ARG_2_SI_TF:
26196 case MULTI_ARG_2_HI_TF:
26197 case MULTI_ARG_2_QI_TF:
26203 gcc_unreachable ();
26206 if (optimize || !target
26207 || GET_MODE (target) != tmode
26208 || !insn_data[icode].operand[0].predicate (target, tmode))
26209 target = gen_reg_rtx (tmode);
26211 gcc_assert (nargs <= 4);
26213 for (i = 0; i < nargs; i++)
26215 tree arg = CALL_EXPR_ARG (exp, i);
26216 rtx op = expand_normal (arg);
26217 int adjust = (comparison_p) ? 1 : 0;
26218 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
26220 if (last_arg_constant && i == nargs - 1)
26222 if (!insn_data[icode].operand[i + 1].predicate (op, mode))
26224 enum insn_code new_icode = icode;
26227 case CODE_FOR_xop_vpermil2v2df3:
26228 case CODE_FOR_xop_vpermil2v4sf3:
26229 case CODE_FOR_xop_vpermil2v4df3:
26230 case CODE_FOR_xop_vpermil2v8sf3:
26231 error ("the last argument must be a 2-bit immediate");
26232 return gen_reg_rtx (tmode);
26233 case CODE_FOR_xop_rotlv2di3:
26234 new_icode = CODE_FOR_rotlv2di3;
26236 case CODE_FOR_xop_rotlv4si3:
26237 new_icode = CODE_FOR_rotlv4si3;
26239 case CODE_FOR_xop_rotlv8hi3:
26240 new_icode = CODE_FOR_rotlv8hi3;
26242 case CODE_FOR_xop_rotlv16qi3:
26243 new_icode = CODE_FOR_rotlv16qi3;
26245 if (CONST_INT_P (op))
26247 int mask = GET_MODE_BITSIZE (GET_MODE_INNER (tmode)) - 1;
26248 op = GEN_INT (INTVAL (op) & mask);
26249 gcc_checking_assert
26250 (insn_data[icode].operand[i + 1].predicate (op, mode));
26254 gcc_checking_assert
26256 && insn_data[new_icode].operand[0].mode == tmode
26257 && insn_data[new_icode].operand[1].mode == tmode
26258 && insn_data[new_icode].operand[2].mode == mode
26259 && insn_data[new_icode].operand[0].predicate
26260 == insn_data[icode].operand[0].predicate
26261 && insn_data[new_icode].operand[1].predicate
26262 == insn_data[icode].operand[1].predicate);
26268 gcc_unreachable ();
26275 if (VECTOR_MODE_P (mode))
26276 op = safe_vector_operand (op, mode);
26278 /* If we aren't optimizing, only allow one memory operand to be
26280 if (memory_operand (op, mode))
26283 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
26286 || !insn_data[icode].operand[i+adjust+1].predicate (op, mode)
26288 op = force_reg (mode, op);
26292 args[i].mode = mode;
26298 pat = GEN_FCN (icode) (target, args[0].op);
26303 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
26304 GEN_INT ((int)sub_code));
26305 else if (! comparison_p)
26306 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
26309 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
26313 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
26318 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
26322 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
26326 gcc_unreachable ();
26336 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
26337 insns with vec_merge. */
26340 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
26344 tree arg0 = CALL_EXPR_ARG (exp, 0);
26345 rtx op1, op0 = expand_normal (arg0);
26346 enum machine_mode tmode = insn_data[icode].operand[0].mode;
26347 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
26349 if (optimize || !target
26350 || GET_MODE (target) != tmode
26351 || !insn_data[icode].operand[0].predicate (target, tmode))
26352 target = gen_reg_rtx (tmode);
26354 if (VECTOR_MODE_P (mode0))
26355 op0 = safe_vector_operand (op0, mode0);
26357 if ((optimize && !register_operand (op0, mode0))
26358 || !insn_data[icode].operand[1].predicate (op0, mode0))
26359 op0 = copy_to_mode_reg (mode0, op0);
26362 if (!insn_data[icode].operand[2].predicate (op1, mode0))
26363 op1 = copy_to_mode_reg (mode0, op1);
26365 pat = GEN_FCN (icode) (target, op0, op1);
26372 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
26375 ix86_expand_sse_compare (const struct builtin_description *d,
26376 tree exp, rtx target, bool swap)
26379 tree arg0 = CALL_EXPR_ARG (exp, 0);
26380 tree arg1 = CALL_EXPR_ARG (exp, 1);
26381 rtx op0 = expand_normal (arg0);
26382 rtx op1 = expand_normal (arg1);
26384 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
26385 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
26386 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
26387 enum rtx_code comparison = d->comparison;
26389 if (VECTOR_MODE_P (mode0))
26390 op0 = safe_vector_operand (op0, mode0);
26391 if (VECTOR_MODE_P (mode1))
26392 op1 = safe_vector_operand (op1, mode1);
26394 /* Swap operands if we have a comparison that isn't available in
26398 rtx tmp = gen_reg_rtx (mode1);
26399 emit_move_insn (tmp, op1);
26404 if (optimize || !target
26405 || GET_MODE (target) != tmode
26406 || !insn_data[d->icode].operand[0].predicate (target, tmode))
26407 target = gen_reg_rtx (tmode);
26409 if ((optimize && !register_operand (op0, mode0))
26410 || !insn_data[d->icode].operand[1].predicate (op0, mode0))
26411 op0 = copy_to_mode_reg (mode0, op0);
26412 if ((optimize && !register_operand (op1, mode1))
26413 || !insn_data[d->icode].operand[2].predicate (op1, mode1))
26414 op1 = copy_to_mode_reg (mode1, op1);
26416 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
26417 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
26424 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
26427 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
26431 tree arg0 = CALL_EXPR_ARG (exp, 0);
26432 tree arg1 = CALL_EXPR_ARG (exp, 1);
26433 rtx op0 = expand_normal (arg0);
26434 rtx op1 = expand_normal (arg1);
26435 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
26436 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
26437 enum rtx_code comparison = d->comparison;
26439 if (VECTOR_MODE_P (mode0))
26440 op0 = safe_vector_operand (op0, mode0);
26441 if (VECTOR_MODE_P (mode1))
26442 op1 = safe_vector_operand (op1, mode1);
26444 /* Swap operands if we have a comparison that isn't available in
26446 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
26453 target = gen_reg_rtx (SImode);
26454 emit_move_insn (target, const0_rtx);
26455 target = gen_rtx_SUBREG (QImode, target, 0);
26457 if ((optimize && !register_operand (op0, mode0))
26458 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
26459 op0 = copy_to_mode_reg (mode0, op0);
26460 if ((optimize && !register_operand (op1, mode1))
26461 || !insn_data[d->icode].operand[1].predicate (op1, mode1))
26462 op1 = copy_to_mode_reg (mode1, op1);
26464 pat = GEN_FCN (d->icode) (op0, op1);
26468 emit_insn (gen_rtx_SET (VOIDmode,
26469 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26470 gen_rtx_fmt_ee (comparison, QImode,
26474 return SUBREG_REG (target);
26477 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
26480 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
26484 tree arg0 = CALL_EXPR_ARG (exp, 0);
26485 tree arg1 = CALL_EXPR_ARG (exp, 1);
26486 rtx op0 = expand_normal (arg0);
26487 rtx op1 = expand_normal (arg1);
26488 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
26489 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
26490 enum rtx_code comparison = d->comparison;
26492 if (VECTOR_MODE_P (mode0))
26493 op0 = safe_vector_operand (op0, mode0);
26494 if (VECTOR_MODE_P (mode1))
26495 op1 = safe_vector_operand (op1, mode1);
26497 target = gen_reg_rtx (SImode);
26498 emit_move_insn (target, const0_rtx);
26499 target = gen_rtx_SUBREG (QImode, target, 0);
26501 if ((optimize && !register_operand (op0, mode0))
26502 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
26503 op0 = copy_to_mode_reg (mode0, op0);
26504 if ((optimize && !register_operand (op1, mode1))
26505 || !insn_data[d->icode].operand[1].predicate (op1, mode1))
26506 op1 = copy_to_mode_reg (mode1, op1);
26508 pat = GEN_FCN (d->icode) (op0, op1);
26512 emit_insn (gen_rtx_SET (VOIDmode,
26513 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26514 gen_rtx_fmt_ee (comparison, QImode,
26518 return SUBREG_REG (target);
26521 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
26524 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
26525 tree exp, rtx target)
26528 tree arg0 = CALL_EXPR_ARG (exp, 0);
26529 tree arg1 = CALL_EXPR_ARG (exp, 1);
26530 tree arg2 = CALL_EXPR_ARG (exp, 2);
26531 tree arg3 = CALL_EXPR_ARG (exp, 3);
26532 tree arg4 = CALL_EXPR_ARG (exp, 4);
26533 rtx scratch0, scratch1;
26534 rtx op0 = expand_normal (arg0);
26535 rtx op1 = expand_normal (arg1);
26536 rtx op2 = expand_normal (arg2);
26537 rtx op3 = expand_normal (arg3);
26538 rtx op4 = expand_normal (arg4);
26539 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
26541 tmode0 = insn_data[d->icode].operand[0].mode;
26542 tmode1 = insn_data[d->icode].operand[1].mode;
26543 modev2 = insn_data[d->icode].operand[2].mode;
26544 modei3 = insn_data[d->icode].operand[3].mode;
26545 modev4 = insn_data[d->icode].operand[4].mode;
26546 modei5 = insn_data[d->icode].operand[5].mode;
26547 modeimm = insn_data[d->icode].operand[6].mode;
26549 if (VECTOR_MODE_P (modev2))
26550 op0 = safe_vector_operand (op0, modev2);
26551 if (VECTOR_MODE_P (modev4))
26552 op2 = safe_vector_operand (op2, modev4);
26554 if (!insn_data[d->icode].operand[2].predicate (op0, modev2))
26555 op0 = copy_to_mode_reg (modev2, op0);
26556 if (!insn_data[d->icode].operand[3].predicate (op1, modei3))
26557 op1 = copy_to_mode_reg (modei3, op1);
26558 if ((optimize && !register_operand (op2, modev4))
26559 || !insn_data[d->icode].operand[4].predicate (op2, modev4))
26560 op2 = copy_to_mode_reg (modev4, op2);
26561 if (!insn_data[d->icode].operand[5].predicate (op3, modei5))
26562 op3 = copy_to_mode_reg (modei5, op3);
26564 if (!insn_data[d->icode].operand[6].predicate (op4, modeimm))
26566 error ("the fifth argument must be an 8-bit immediate");
26570 if (d->code == IX86_BUILTIN_PCMPESTRI128)
26572 if (optimize || !target
26573 || GET_MODE (target) != tmode0
26574 || !insn_data[d->icode].operand[0].predicate (target, tmode0))
26575 target = gen_reg_rtx (tmode0);
26577 scratch1 = gen_reg_rtx (tmode1);
26579 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
26581 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
26583 if (optimize || !target
26584 || GET_MODE (target) != tmode1
26585 || !insn_data[d->icode].operand[1].predicate (target, tmode1))
26586 target = gen_reg_rtx (tmode1);
26588 scratch0 = gen_reg_rtx (tmode0);
26590 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
26594 gcc_assert (d->flag);
26596 scratch0 = gen_reg_rtx (tmode0);
26597 scratch1 = gen_reg_rtx (tmode1);
26599 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
26609 target = gen_reg_rtx (SImode);
26610 emit_move_insn (target, const0_rtx);
26611 target = gen_rtx_SUBREG (QImode, target, 0);
26614 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26615 gen_rtx_fmt_ee (EQ, QImode,
26616 gen_rtx_REG ((enum machine_mode) d->flag,
26619 return SUBREG_REG (target);
26626 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
26629 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
26630 tree exp, rtx target)
26633 tree arg0 = CALL_EXPR_ARG (exp, 0);
26634 tree arg1 = CALL_EXPR_ARG (exp, 1);
26635 tree arg2 = CALL_EXPR_ARG (exp, 2);
26636 rtx scratch0, scratch1;
26637 rtx op0 = expand_normal (arg0);
26638 rtx op1 = expand_normal (arg1);
26639 rtx op2 = expand_normal (arg2);
26640 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
26642 tmode0 = insn_data[d->icode].operand[0].mode;
26643 tmode1 = insn_data[d->icode].operand[1].mode;
26644 modev2 = insn_data[d->icode].operand[2].mode;
26645 modev3 = insn_data[d->icode].operand[3].mode;
26646 modeimm = insn_data[d->icode].operand[4].mode;
26648 if (VECTOR_MODE_P (modev2))
26649 op0 = safe_vector_operand (op0, modev2);
26650 if (VECTOR_MODE_P (modev3))
26651 op1 = safe_vector_operand (op1, modev3);
26653 if (!insn_data[d->icode].operand[2].predicate (op0, modev2))
26654 op0 = copy_to_mode_reg (modev2, op0);
26655 if ((optimize && !register_operand (op1, modev3))
26656 || !insn_data[d->icode].operand[3].predicate (op1, modev3))
26657 op1 = copy_to_mode_reg (modev3, op1);
26659 if (!insn_data[d->icode].operand[4].predicate (op2, modeimm))
26661 error ("the third argument must be an 8-bit immediate");
26665 if (d->code == IX86_BUILTIN_PCMPISTRI128)
26667 if (optimize || !target
26668 || GET_MODE (target) != tmode0
26669 || !insn_data[d->icode].operand[0].predicate (target, tmode0))
26670 target = gen_reg_rtx (tmode0);
26672 scratch1 = gen_reg_rtx (tmode1);
26674 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
26676 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
26678 if (optimize || !target
26679 || GET_MODE (target) != tmode1
26680 || !insn_data[d->icode].operand[1].predicate (target, tmode1))
26681 target = gen_reg_rtx (tmode1);
26683 scratch0 = gen_reg_rtx (tmode0);
26685 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
26689 gcc_assert (d->flag);
26691 scratch0 = gen_reg_rtx (tmode0);
26692 scratch1 = gen_reg_rtx (tmode1);
26694 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
26704 target = gen_reg_rtx (SImode);
26705 emit_move_insn (target, const0_rtx);
26706 target = gen_rtx_SUBREG (QImode, target, 0);
26709 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26710 gen_rtx_fmt_ee (EQ, QImode,
26711 gen_rtx_REG ((enum machine_mode) d->flag,
26714 return SUBREG_REG (target);
26720 /* Subroutine of ix86_expand_builtin to take care of insns with
26721 variable number of operands. */
26724 ix86_expand_args_builtin (const struct builtin_description *d,
26725 tree exp, rtx target)
26727 rtx pat, real_target;
26728 unsigned int i, nargs;
26729 unsigned int nargs_constant = 0;
26730 int num_memory = 0;
26734 enum machine_mode mode;
26736 bool last_arg_count = false;
26737 enum insn_code icode = d->icode;
26738 const struct insn_data_d *insn_p = &insn_data[icode];
26739 enum machine_mode tmode = insn_p->operand[0].mode;
26740 enum machine_mode rmode = VOIDmode;
26742 enum rtx_code comparison = d->comparison;
26744 switch ((enum ix86_builtin_func_type) d->flag)
26746 case INT_FTYPE_V8SF_V8SF_PTEST:
26747 case INT_FTYPE_V4DI_V4DI_PTEST:
26748 case INT_FTYPE_V4DF_V4DF_PTEST:
26749 case INT_FTYPE_V4SF_V4SF_PTEST:
26750 case INT_FTYPE_V2DI_V2DI_PTEST:
26751 case INT_FTYPE_V2DF_V2DF_PTEST:
26752 return ix86_expand_sse_ptest (d, exp, target);
26753 case FLOAT128_FTYPE_FLOAT128:
26754 case FLOAT_FTYPE_FLOAT:
26755 case INT_FTYPE_INT:
26756 case UINT64_FTYPE_INT:
26757 case UINT16_FTYPE_UINT16:
26758 case INT64_FTYPE_INT64:
26759 case INT64_FTYPE_V4SF:
26760 case INT64_FTYPE_V2DF:
26761 case INT_FTYPE_V16QI:
26762 case INT_FTYPE_V8QI:
26763 case INT_FTYPE_V8SF:
26764 case INT_FTYPE_V4DF:
26765 case INT_FTYPE_V4SF:
26766 case INT_FTYPE_V2DF:
26767 case V16QI_FTYPE_V16QI:
26768 case V8SI_FTYPE_V8SF:
26769 case V8SI_FTYPE_V4SI:
26770 case V8HI_FTYPE_V8HI:
26771 case V8HI_FTYPE_V16QI:
26772 case V8QI_FTYPE_V8QI:
26773 case V8SF_FTYPE_V8SF:
26774 case V8SF_FTYPE_V8SI:
26775 case V8SF_FTYPE_V4SF:
26776 case V8SF_FTYPE_V8HI:
26777 case V4SI_FTYPE_V4SI:
26778 case V4SI_FTYPE_V16QI:
26779 case V4SI_FTYPE_V4SF:
26780 case V4SI_FTYPE_V8SI:
26781 case V4SI_FTYPE_V8HI:
26782 case V4SI_FTYPE_V4DF:
26783 case V4SI_FTYPE_V2DF:
26784 case V4HI_FTYPE_V4HI:
26785 case V4DF_FTYPE_V4DF:
26786 case V4DF_FTYPE_V4SI:
26787 case V4DF_FTYPE_V4SF:
26788 case V4DF_FTYPE_V2DF:
26789 case V4SF_FTYPE_V4SF:
26790 case V4SF_FTYPE_V4SI:
26791 case V4SF_FTYPE_V8SF:
26792 case V4SF_FTYPE_V4DF:
26793 case V4SF_FTYPE_V8HI:
26794 case V4SF_FTYPE_V2DF:
26795 case V2DI_FTYPE_V2DI:
26796 case V2DI_FTYPE_V16QI:
26797 case V2DI_FTYPE_V8HI:
26798 case V2DI_FTYPE_V4SI:
26799 case V2DF_FTYPE_V2DF:
26800 case V2DF_FTYPE_V4SI:
26801 case V2DF_FTYPE_V4DF:
26802 case V2DF_FTYPE_V4SF:
26803 case V2DF_FTYPE_V2SI:
26804 case V2SI_FTYPE_V2SI:
26805 case V2SI_FTYPE_V4SF:
26806 case V2SI_FTYPE_V2SF:
26807 case V2SI_FTYPE_V2DF:
26808 case V2SF_FTYPE_V2SF:
26809 case V2SF_FTYPE_V2SI:
26812 case V4SF_FTYPE_V4SF_VEC_MERGE:
26813 case V2DF_FTYPE_V2DF_VEC_MERGE:
26814 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
26815 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
26816 case V16QI_FTYPE_V16QI_V16QI:
26817 case V16QI_FTYPE_V8HI_V8HI:
26818 case V8QI_FTYPE_V8QI_V8QI:
26819 case V8QI_FTYPE_V4HI_V4HI:
26820 case V8HI_FTYPE_V8HI_V8HI:
26821 case V8HI_FTYPE_V16QI_V16QI:
26822 case V8HI_FTYPE_V4SI_V4SI:
26823 case V8SF_FTYPE_V8SF_V8SF:
26824 case V8SF_FTYPE_V8SF_V8SI:
26825 case V4SI_FTYPE_V4SI_V4SI:
26826 case V4SI_FTYPE_V8HI_V8HI:
26827 case V4SI_FTYPE_V4SF_V4SF:
26828 case V4SI_FTYPE_V2DF_V2DF:
26829 case V4HI_FTYPE_V4HI_V4HI:
26830 case V4HI_FTYPE_V8QI_V8QI:
26831 case V4HI_FTYPE_V2SI_V2SI:
26832 case V4DF_FTYPE_V4DF_V4DF:
26833 case V4DF_FTYPE_V4DF_V4DI:
26834 case V4SF_FTYPE_V4SF_V4SF:
26835 case V4SF_FTYPE_V4SF_V4SI:
26836 case V4SF_FTYPE_V4SF_V2SI:
26837 case V4SF_FTYPE_V4SF_V2DF:
26838 case V4SF_FTYPE_V4SF_DI:
26839 case V4SF_FTYPE_V4SF_SI:
26840 case V2DI_FTYPE_V2DI_V2DI:
26841 case V2DI_FTYPE_V16QI_V16QI:
26842 case V2DI_FTYPE_V4SI_V4SI:
26843 case V2DI_FTYPE_V2DI_V16QI:
26844 case V2DI_FTYPE_V2DF_V2DF:
26845 case V2SI_FTYPE_V2SI_V2SI:
26846 case V2SI_FTYPE_V4HI_V4HI:
26847 case V2SI_FTYPE_V2SF_V2SF:
26848 case V2DF_FTYPE_V2DF_V2DF:
26849 case V2DF_FTYPE_V2DF_V4SF:
26850 case V2DF_FTYPE_V2DF_V2DI:
26851 case V2DF_FTYPE_V2DF_DI:
26852 case V2DF_FTYPE_V2DF_SI:
26853 case V2SF_FTYPE_V2SF_V2SF:
26854 case V1DI_FTYPE_V1DI_V1DI:
26855 case V1DI_FTYPE_V8QI_V8QI:
26856 case V1DI_FTYPE_V2SI_V2SI:
26857 if (comparison == UNKNOWN)
26858 return ix86_expand_binop_builtin (icode, exp, target);
26861 case V4SF_FTYPE_V4SF_V4SF_SWAP:
26862 case V2DF_FTYPE_V2DF_V2DF_SWAP:
26863 gcc_assert (comparison != UNKNOWN);
26867 case V8HI_FTYPE_V8HI_V8HI_COUNT:
26868 case V8HI_FTYPE_V8HI_SI_COUNT:
26869 case V4SI_FTYPE_V4SI_V4SI_COUNT:
26870 case V4SI_FTYPE_V4SI_SI_COUNT:
26871 case V4HI_FTYPE_V4HI_V4HI_COUNT:
26872 case V4HI_FTYPE_V4HI_SI_COUNT:
26873 case V2DI_FTYPE_V2DI_V2DI_COUNT:
26874 case V2DI_FTYPE_V2DI_SI_COUNT:
26875 case V2SI_FTYPE_V2SI_V2SI_COUNT:
26876 case V2SI_FTYPE_V2SI_SI_COUNT:
26877 case V1DI_FTYPE_V1DI_V1DI_COUNT:
26878 case V1DI_FTYPE_V1DI_SI_COUNT:
26880 last_arg_count = true;
26882 case UINT64_FTYPE_UINT64_UINT64:
26883 case UINT_FTYPE_UINT_UINT:
26884 case UINT_FTYPE_UINT_USHORT:
26885 case UINT_FTYPE_UINT_UCHAR:
26886 case UINT16_FTYPE_UINT16_INT:
26887 case UINT8_FTYPE_UINT8_INT:
26890 case V2DI_FTYPE_V2DI_INT_CONVERT:
26893 nargs_constant = 1;
26895 case V8HI_FTYPE_V8HI_INT:
26896 case V8HI_FTYPE_V8SF_INT:
26897 case V8HI_FTYPE_V4SF_INT:
26898 case V8SF_FTYPE_V8SF_INT:
26899 case V4SI_FTYPE_V4SI_INT:
26900 case V4SI_FTYPE_V8SI_INT:
26901 case V4HI_FTYPE_V4HI_INT:
26902 case V4DF_FTYPE_V4DF_INT:
26903 case V4SF_FTYPE_V4SF_INT:
26904 case V4SF_FTYPE_V8SF_INT:
26905 case V2DI_FTYPE_V2DI_INT:
26906 case V2DF_FTYPE_V2DF_INT:
26907 case V2DF_FTYPE_V4DF_INT:
26909 nargs_constant = 1;
26911 case V16QI_FTYPE_V16QI_V16QI_V16QI:
26912 case V8SF_FTYPE_V8SF_V8SF_V8SF:
26913 case V4DF_FTYPE_V4DF_V4DF_V4DF:
26914 case V4SF_FTYPE_V4SF_V4SF_V4SF:
26915 case V2DF_FTYPE_V2DF_V2DF_V2DF:
26918 case V16QI_FTYPE_V16QI_V16QI_INT:
26919 case V8HI_FTYPE_V8HI_V8HI_INT:
26920 case V8SI_FTYPE_V8SI_V8SI_INT:
26921 case V8SI_FTYPE_V8SI_V4SI_INT:
26922 case V8SF_FTYPE_V8SF_V8SF_INT:
26923 case V8SF_FTYPE_V8SF_V4SF_INT:
26924 case V4SI_FTYPE_V4SI_V4SI_INT:
26925 case V4DF_FTYPE_V4DF_V4DF_INT:
26926 case V4DF_FTYPE_V4DF_V2DF_INT:
26927 case V4SF_FTYPE_V4SF_V4SF_INT:
26928 case V2DI_FTYPE_V2DI_V2DI_INT:
26929 case V2DF_FTYPE_V2DF_V2DF_INT:
26931 nargs_constant = 1;
26933 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
26936 nargs_constant = 1;
26938 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
26941 nargs_constant = 1;
26943 case V2DI_FTYPE_V2DI_UINT_UINT:
26945 nargs_constant = 2;
26947 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
26948 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
26949 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
26950 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
26952 nargs_constant = 1;
26954 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
26956 nargs_constant = 2;
26959 gcc_unreachable ();
26962 gcc_assert (nargs <= ARRAY_SIZE (args));
26964 if (comparison != UNKNOWN)
26966 gcc_assert (nargs == 2);
26967 return ix86_expand_sse_compare (d, exp, target, swap);
26970 if (rmode == VOIDmode || rmode == tmode)
26974 || GET_MODE (target) != tmode
26975 || !insn_p->operand[0].predicate (target, tmode))
26976 target = gen_reg_rtx (tmode);
26977 real_target = target;
26981 target = gen_reg_rtx (rmode);
26982 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
26985 for (i = 0; i < nargs; i++)
26987 tree arg = CALL_EXPR_ARG (exp, i);
26988 rtx op = expand_normal (arg);
26989 enum machine_mode mode = insn_p->operand[i + 1].mode;
26990 bool match = insn_p->operand[i + 1].predicate (op, mode);
26992 if (last_arg_count && (i + 1) == nargs)
26994 /* SIMD shift insns take either an 8-bit immediate or
26995 register as count. But builtin functions take int as
26996 count. If count doesn't match, we put it in register. */
26999 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
27000 if (!insn_p->operand[i + 1].predicate (op, mode))
27001 op = copy_to_reg (op);
27004 else if ((nargs - i) <= nargs_constant)
27009 case CODE_FOR_sse4_1_roundpd:
27010 case CODE_FOR_sse4_1_roundps:
27011 case CODE_FOR_sse4_1_roundsd:
27012 case CODE_FOR_sse4_1_roundss:
27013 case CODE_FOR_sse4_1_blendps:
27014 case CODE_FOR_avx_blendpd256:
27015 case CODE_FOR_avx_vpermilv4df:
27016 case CODE_FOR_avx_roundpd256:
27017 case CODE_FOR_avx_roundps256:
27018 error ("the last argument must be a 4-bit immediate");
27021 case CODE_FOR_sse4_1_blendpd:
27022 case CODE_FOR_avx_vpermilv2df:
27023 case CODE_FOR_xop_vpermil2v2df3:
27024 case CODE_FOR_xop_vpermil2v4sf3:
27025 case CODE_FOR_xop_vpermil2v4df3:
27026 case CODE_FOR_xop_vpermil2v8sf3:
27027 error ("the last argument must be a 2-bit immediate");
27030 case CODE_FOR_avx_vextractf128v4df:
27031 case CODE_FOR_avx_vextractf128v8sf:
27032 case CODE_FOR_avx_vextractf128v8si:
27033 case CODE_FOR_avx_vinsertf128v4df:
27034 case CODE_FOR_avx_vinsertf128v8sf:
27035 case CODE_FOR_avx_vinsertf128v8si:
27036 error ("the last argument must be a 1-bit immediate");
27039 case CODE_FOR_avx_cmpsdv2df3:
27040 case CODE_FOR_avx_cmpssv4sf3:
27041 case CODE_FOR_avx_cmppdv2df3:
27042 case CODE_FOR_avx_cmppsv4sf3:
27043 case CODE_FOR_avx_cmppdv4df3:
27044 case CODE_FOR_avx_cmppsv8sf3:
27045 error ("the last argument must be a 5-bit immediate");
27049 switch (nargs_constant)
27052 if ((nargs - i) == nargs_constant)
27054 error ("the next to last argument must be an 8-bit immediate");
27058 error ("the last argument must be an 8-bit immediate");
27061 gcc_unreachable ();
27068 if (VECTOR_MODE_P (mode))
27069 op = safe_vector_operand (op, mode);
27071 /* If we aren't optimizing, only allow one memory operand to
27073 if (memory_operand (op, mode))
27076 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
27078 if (optimize || !match || num_memory > 1)
27079 op = copy_to_mode_reg (mode, op);
27083 op = copy_to_reg (op);
27084 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
27089 args[i].mode = mode;
27095 pat = GEN_FCN (icode) (real_target, args[0].op);
27098 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
27101 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
27105 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
27106 args[2].op, args[3].op);
27109 gcc_unreachable ();
27119 /* Subroutine of ix86_expand_builtin to take care of special insns
27120 with variable number of operands. */
27123 ix86_expand_special_args_builtin (const struct builtin_description *d,
27124 tree exp, rtx target)
27128 unsigned int i, nargs, arg_adjust, memory;
27132 enum machine_mode mode;
27134 enum insn_code icode = d->icode;
27135 bool last_arg_constant = false;
27136 const struct insn_data_d *insn_p = &insn_data[icode];
27137 enum machine_mode tmode = insn_p->operand[0].mode;
27138 enum { load, store } klass;
27140 switch ((enum ix86_builtin_func_type) d->flag)
27142 case VOID_FTYPE_VOID:
27143 if (icode == CODE_FOR_avx_vzeroupper)
27144 target = GEN_INT (vzeroupper_intrinsic);
27145 emit_insn (GEN_FCN (icode) (target));
27147 case VOID_FTYPE_UINT64:
27148 case VOID_FTYPE_UNSIGNED:
27154 case UINT64_FTYPE_VOID:
27155 case UNSIGNED_FTYPE_VOID:
27160 case UINT64_FTYPE_PUNSIGNED:
27161 case V2DI_FTYPE_PV2DI:
27162 case V32QI_FTYPE_PCCHAR:
27163 case V16QI_FTYPE_PCCHAR:
27164 case V8SF_FTYPE_PCV4SF:
27165 case V8SF_FTYPE_PCFLOAT:
27166 case V4SF_FTYPE_PCFLOAT:
27167 case V4DF_FTYPE_PCV2DF:
27168 case V4DF_FTYPE_PCDOUBLE:
27169 case V2DF_FTYPE_PCDOUBLE:
27170 case VOID_FTYPE_PVOID:
27175 case VOID_FTYPE_PV2SF_V4SF:
27176 case VOID_FTYPE_PV4DI_V4DI:
27177 case VOID_FTYPE_PV2DI_V2DI:
27178 case VOID_FTYPE_PCHAR_V32QI:
27179 case VOID_FTYPE_PCHAR_V16QI:
27180 case VOID_FTYPE_PFLOAT_V8SF:
27181 case VOID_FTYPE_PFLOAT_V4SF:
27182 case VOID_FTYPE_PDOUBLE_V4DF:
27183 case VOID_FTYPE_PDOUBLE_V2DF:
27184 case VOID_FTYPE_PULONGLONG_ULONGLONG:
27185 case VOID_FTYPE_PINT_INT:
27188 /* Reserve memory operand for target. */
27189 memory = ARRAY_SIZE (args);
27191 case V4SF_FTYPE_V4SF_PCV2SF:
27192 case V2DF_FTYPE_V2DF_PCDOUBLE:
27197 case V8SF_FTYPE_PCV8SF_V8SI:
27198 case V4DF_FTYPE_PCV4DF_V4DI:
27199 case V4SF_FTYPE_PCV4SF_V4SI:
27200 case V2DF_FTYPE_PCV2DF_V2DI:
27205 case VOID_FTYPE_PV8SF_V8SI_V8SF:
27206 case VOID_FTYPE_PV4DF_V4DI_V4DF:
27207 case VOID_FTYPE_PV4SF_V4SI_V4SF:
27208 case VOID_FTYPE_PV2DF_V2DI_V2DF:
27211 /* Reserve memory operand for target. */
27212 memory = ARRAY_SIZE (args);
27214 case VOID_FTYPE_UINT_UINT_UINT:
27215 case VOID_FTYPE_UINT64_UINT_UINT:
27216 case UCHAR_FTYPE_UINT_UINT_UINT:
27217 case UCHAR_FTYPE_UINT64_UINT_UINT:
27220 memory = ARRAY_SIZE (args);
27221 last_arg_constant = true;
27224 gcc_unreachable ();
27227 gcc_assert (nargs <= ARRAY_SIZE (args));
27229 if (klass == store)
27231 arg = CALL_EXPR_ARG (exp, 0);
27232 op = expand_normal (arg);
27233 gcc_assert (target == 0);
27235 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
27237 target = force_reg (tmode, op);
27245 || !register_operand (target, tmode)
27246 || GET_MODE (target) != tmode)
27247 target = gen_reg_rtx (tmode);
27250 for (i = 0; i < nargs; i++)
27252 enum machine_mode mode = insn_p->operand[i + 1].mode;
27255 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
27256 op = expand_normal (arg);
27257 match = insn_p->operand[i + 1].predicate (op, mode);
27259 if (last_arg_constant && (i + 1) == nargs)
27263 if (icode == CODE_FOR_lwp_lwpvalsi3
27264 || icode == CODE_FOR_lwp_lwpinssi3
27265 || icode == CODE_FOR_lwp_lwpvaldi3
27266 || icode == CODE_FOR_lwp_lwpinsdi3)
27267 error ("the last argument must be a 32-bit immediate");
27269 error ("the last argument must be an 8-bit immediate");
27277 /* This must be the memory operand. */
27278 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
27279 gcc_assert (GET_MODE (op) == mode
27280 || GET_MODE (op) == VOIDmode);
27284 /* This must be register. */
27285 if (VECTOR_MODE_P (mode))
27286 op = safe_vector_operand (op, mode);
27288 gcc_assert (GET_MODE (op) == mode
27289 || GET_MODE (op) == VOIDmode);
27290 op = copy_to_mode_reg (mode, op);
27295 args[i].mode = mode;
27301 pat = GEN_FCN (icode) (target);
27304 pat = GEN_FCN (icode) (target, args[0].op);
27307 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
27310 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
27313 gcc_unreachable ();
27319 return klass == store ? 0 : target;
27322 /* Return the integer constant in ARG. Constrain it to be in the range
27323 of the subparts of VEC_TYPE; issue an error if not. */
27326 get_element_number (tree vec_type, tree arg)
27328 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
27330 if (!host_integerp (arg, 1)
27331 || (elt = tree_low_cst (arg, 1), elt > max))
27333 error ("selector must be an integer constant in the range 0..%wi", max);
27340 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
27341 ix86_expand_vector_init. We DO have language-level syntax for this, in
27342 the form of (type){ init-list }. Except that since we can't place emms
27343 instructions from inside the compiler, we can't allow the use of MMX
27344 registers unless the user explicitly asks for it. So we do *not* define
27345 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
27346 we have builtins invoked by mmintrin.h that gives us license to emit
27347 these sorts of instructions. */
27350 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
27352 enum machine_mode tmode = TYPE_MODE (type);
27353 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
27354 int i, n_elt = GET_MODE_NUNITS (tmode);
27355 rtvec v = rtvec_alloc (n_elt);
27357 gcc_assert (VECTOR_MODE_P (tmode));
27358 gcc_assert (call_expr_nargs (exp) == n_elt);
27360 for (i = 0; i < n_elt; ++i)
27362 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
27363 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
27366 if (!target || !register_operand (target, tmode))
27367 target = gen_reg_rtx (tmode);
27369 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
27373 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
27374 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
27375 had a language-level syntax for referencing vector elements. */
27378 ix86_expand_vec_ext_builtin (tree exp, rtx target)
27380 enum machine_mode tmode, mode0;
27385 arg0 = CALL_EXPR_ARG (exp, 0);
27386 arg1 = CALL_EXPR_ARG (exp, 1);
27388 op0 = expand_normal (arg0);
27389 elt = get_element_number (TREE_TYPE (arg0), arg1);
27391 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
27392 mode0 = TYPE_MODE (TREE_TYPE (arg0));
27393 gcc_assert (VECTOR_MODE_P (mode0));
27395 op0 = force_reg (mode0, op0);
27397 if (optimize || !target || !register_operand (target, tmode))
27398 target = gen_reg_rtx (tmode);
27400 ix86_expand_vector_extract (true, target, op0, elt);
27405 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
27406 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
27407 a language-level syntax for referencing vector elements. */
27410 ix86_expand_vec_set_builtin (tree exp)
27412 enum machine_mode tmode, mode1;
27413 tree arg0, arg1, arg2;
27415 rtx op0, op1, target;
27417 arg0 = CALL_EXPR_ARG (exp, 0);
27418 arg1 = CALL_EXPR_ARG (exp, 1);
27419 arg2 = CALL_EXPR_ARG (exp, 2);
27421 tmode = TYPE_MODE (TREE_TYPE (arg0));
27422 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
27423 gcc_assert (VECTOR_MODE_P (tmode));
27425 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
27426 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
27427 elt = get_element_number (TREE_TYPE (arg0), arg2);
27429 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
27430 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
27432 op0 = force_reg (tmode, op0);
27433 op1 = force_reg (mode1, op1);
27435 /* OP0 is the source of these builtin functions and shouldn't be
27436 modified. Create a copy, use it and return it as target. */
27437 target = gen_reg_rtx (tmode);
27438 emit_move_insn (target, op0);
27439 ix86_expand_vector_set (true, target, op1, elt);
27444 /* Expand an expression EXP that calls a built-in function,
27445 with result going to TARGET if that's convenient
27446 (and in mode MODE if that's convenient).
27447 SUBTARGET may be used as the target for computing one of EXP's operands.
27448 IGNORE is nonzero if the value is to be ignored. */
27451 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
27452 enum machine_mode mode ATTRIBUTE_UNUSED,
27453 int ignore ATTRIBUTE_UNUSED)
27455 const struct builtin_description *d;
27457 enum insn_code icode;
27458 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
27459 tree arg0, arg1, arg2;
27460 rtx op0, op1, op2, pat;
27461 enum machine_mode mode0, mode1, mode2;
27462 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
27464 /* Determine whether the builtin function is available under the current ISA.
27465 Originally the builtin was not created if it wasn't applicable to the
27466 current ISA based on the command line switches. With function specific
27467 options, we need to check in the context of the function making the call
27468 whether it is supported. */
27469 if (ix86_builtins_isa[fcode].isa
27470 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
27472 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
27473 NULL, NULL, false);
27476 error ("%qE needs unknown isa option", fndecl);
27479 gcc_assert (opts != NULL);
27480 error ("%qE needs isa option %s", fndecl, opts);
27488 case IX86_BUILTIN_MASKMOVQ:
27489 case IX86_BUILTIN_MASKMOVDQU:
27490 icode = (fcode == IX86_BUILTIN_MASKMOVQ
27491 ? CODE_FOR_mmx_maskmovq
27492 : CODE_FOR_sse2_maskmovdqu);
27493 /* Note the arg order is different from the operand order. */
27494 arg1 = CALL_EXPR_ARG (exp, 0);
27495 arg2 = CALL_EXPR_ARG (exp, 1);
27496 arg0 = CALL_EXPR_ARG (exp, 2);
27497 op0 = expand_normal (arg0);
27498 op1 = expand_normal (arg1);
27499 op2 = expand_normal (arg2);
27500 mode0 = insn_data[icode].operand[0].mode;
27501 mode1 = insn_data[icode].operand[1].mode;
27502 mode2 = insn_data[icode].operand[2].mode;
27504 op0 = force_reg (Pmode, op0);
27505 op0 = gen_rtx_MEM (mode1, op0);
27507 if (!insn_data[icode].operand[0].predicate (op0, mode0))
27508 op0 = copy_to_mode_reg (mode0, op0);
27509 if (!insn_data[icode].operand[1].predicate (op1, mode1))
27510 op1 = copy_to_mode_reg (mode1, op1);
27511 if (!insn_data[icode].operand[2].predicate (op2, mode2))
27512 op2 = copy_to_mode_reg (mode2, op2);
27513 pat = GEN_FCN (icode) (op0, op1, op2);
27519 case IX86_BUILTIN_LDMXCSR:
27520 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
27521 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
27522 emit_move_insn (target, op0);
27523 emit_insn (gen_sse_ldmxcsr (target));
27526 case IX86_BUILTIN_STMXCSR:
27527 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
27528 emit_insn (gen_sse_stmxcsr (target));
27529 return copy_to_mode_reg (SImode, target);
27531 case IX86_BUILTIN_CLFLUSH:
27532 arg0 = CALL_EXPR_ARG (exp, 0);
27533 op0 = expand_normal (arg0);
27534 icode = CODE_FOR_sse2_clflush;
27535 if (!insn_data[icode].operand[0].predicate (op0, Pmode))
27536 op0 = copy_to_mode_reg (Pmode, op0);
27538 emit_insn (gen_sse2_clflush (op0));
27541 case IX86_BUILTIN_MONITOR:
27542 arg0 = CALL_EXPR_ARG (exp, 0);
27543 arg1 = CALL_EXPR_ARG (exp, 1);
27544 arg2 = CALL_EXPR_ARG (exp, 2);
27545 op0 = expand_normal (arg0);
27546 op1 = expand_normal (arg1);
27547 op2 = expand_normal (arg2);
27549 op0 = copy_to_mode_reg (Pmode, op0);
27551 op1 = copy_to_mode_reg (SImode, op1);
27553 op2 = copy_to_mode_reg (SImode, op2);
27554 emit_insn (ix86_gen_monitor (op0, op1, op2));
27557 case IX86_BUILTIN_MWAIT:
27558 arg0 = CALL_EXPR_ARG (exp, 0);
27559 arg1 = CALL_EXPR_ARG (exp, 1);
27560 op0 = expand_normal (arg0);
27561 op1 = expand_normal (arg1);
27563 op0 = copy_to_mode_reg (SImode, op0);
27565 op1 = copy_to_mode_reg (SImode, op1);
27566 emit_insn (gen_sse3_mwait (op0, op1));
27569 case IX86_BUILTIN_VEC_INIT_V2SI:
27570 case IX86_BUILTIN_VEC_INIT_V4HI:
27571 case IX86_BUILTIN_VEC_INIT_V8QI:
27572 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
27574 case IX86_BUILTIN_VEC_EXT_V2DF:
27575 case IX86_BUILTIN_VEC_EXT_V2DI:
27576 case IX86_BUILTIN_VEC_EXT_V4SF:
27577 case IX86_BUILTIN_VEC_EXT_V4SI:
27578 case IX86_BUILTIN_VEC_EXT_V8HI:
27579 case IX86_BUILTIN_VEC_EXT_V2SI:
27580 case IX86_BUILTIN_VEC_EXT_V4HI:
27581 case IX86_BUILTIN_VEC_EXT_V16QI:
27582 return ix86_expand_vec_ext_builtin (exp, target);
27584 case IX86_BUILTIN_VEC_SET_V2DI:
27585 case IX86_BUILTIN_VEC_SET_V4SF:
27586 case IX86_BUILTIN_VEC_SET_V4SI:
27587 case IX86_BUILTIN_VEC_SET_V8HI:
27588 case IX86_BUILTIN_VEC_SET_V4HI:
27589 case IX86_BUILTIN_VEC_SET_V16QI:
27590 return ix86_expand_vec_set_builtin (exp);
27592 case IX86_BUILTIN_VEC_PERM_V2DF:
27593 case IX86_BUILTIN_VEC_PERM_V4SF:
27594 case IX86_BUILTIN_VEC_PERM_V2DI:
27595 case IX86_BUILTIN_VEC_PERM_V4SI:
27596 case IX86_BUILTIN_VEC_PERM_V8HI:
27597 case IX86_BUILTIN_VEC_PERM_V16QI:
27598 case IX86_BUILTIN_VEC_PERM_V2DI_U:
27599 case IX86_BUILTIN_VEC_PERM_V4SI_U:
27600 case IX86_BUILTIN_VEC_PERM_V8HI_U:
27601 case IX86_BUILTIN_VEC_PERM_V16QI_U:
27602 case IX86_BUILTIN_VEC_PERM_V4DF:
27603 case IX86_BUILTIN_VEC_PERM_V8SF:
27604 return ix86_expand_vec_perm_builtin (exp);
27606 case IX86_BUILTIN_INFQ:
27607 case IX86_BUILTIN_HUGE_VALQ:
27609 REAL_VALUE_TYPE inf;
27613 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
27615 tmp = validize_mem (force_const_mem (mode, tmp));
27618 target = gen_reg_rtx (mode);
27620 emit_move_insn (target, tmp);
27624 case IX86_BUILTIN_LLWPCB:
27625 arg0 = CALL_EXPR_ARG (exp, 0);
27626 op0 = expand_normal (arg0);
27627 icode = CODE_FOR_lwp_llwpcb;
27628 if (!insn_data[icode].operand[0].predicate (op0, Pmode))
27629 op0 = copy_to_mode_reg (Pmode, op0);
27630 emit_insn (gen_lwp_llwpcb (op0));
27633 case IX86_BUILTIN_SLWPCB:
27634 icode = CODE_FOR_lwp_slwpcb;
27636 || !insn_data[icode].operand[0].predicate (target, Pmode))
27637 target = gen_reg_rtx (Pmode);
27638 emit_insn (gen_lwp_slwpcb (target));
27641 case IX86_BUILTIN_BEXTRI32:
27642 case IX86_BUILTIN_BEXTRI64:
27643 arg0 = CALL_EXPR_ARG (exp, 0);
27644 arg1 = CALL_EXPR_ARG (exp, 1);
27645 op0 = expand_normal (arg0);
27646 op1 = expand_normal (arg1);
27647 icode = (fcode == IX86_BUILTIN_BEXTRI32
27648 ? CODE_FOR_tbm_bextri_si
27649 : CODE_FOR_tbm_bextri_di);
27650 if (!CONST_INT_P (op1))
27652 error ("last argument must be an immediate");
27657 unsigned char length = (INTVAL (op1) >> 8) & 0xFF;
27658 unsigned char lsb_index = INTVAL (op1) & 0xFF;
27659 op1 = GEN_INT (length);
27660 op2 = GEN_INT (lsb_index);
27661 pat = GEN_FCN (icode) (target, op0, op1, op2);
27667 case IX86_BUILTIN_RDRAND16_STEP:
27668 icode = CODE_FOR_rdrandhi_1;
27672 case IX86_BUILTIN_RDRAND32_STEP:
27673 icode = CODE_FOR_rdrandsi_1;
27677 case IX86_BUILTIN_RDRAND64_STEP:
27678 icode = CODE_FOR_rdranddi_1;
27682 op0 = gen_reg_rtx (mode0);
27683 emit_insn (GEN_FCN (icode) (op0));
27685 arg0 = CALL_EXPR_ARG (exp, 0);
27686 op1 = expand_normal (arg0);
27687 if (!address_operand (op1, VOIDmode))
27688 op1 = copy_addr_to_reg (op1);
27689 emit_move_insn (gen_rtx_MEM (mode0, op1), op0);
27691 op1 = gen_reg_rtx (SImode);
27692 emit_move_insn (op1, CONST1_RTX (SImode));
27694 /* Emit SImode conditional move. */
27695 if (mode0 == HImode)
27697 op2 = gen_reg_rtx (SImode);
27698 emit_insn (gen_zero_extendhisi2 (op2, op0));
27700 else if (mode0 == SImode)
27703 op2 = gen_rtx_SUBREG (SImode, op0, 0);
27706 target = gen_reg_rtx (SImode);
27708 pat = gen_rtx_GEU (VOIDmode, gen_rtx_REG (CCCmode, FLAGS_REG),
27710 emit_insn (gen_rtx_SET (VOIDmode, target,
27711 gen_rtx_IF_THEN_ELSE (SImode, pat, op2, op1)));
27718 for (i = 0, d = bdesc_special_args;
27719 i < ARRAY_SIZE (bdesc_special_args);
27721 if (d->code == fcode)
27722 return ix86_expand_special_args_builtin (d, exp, target);
27724 for (i = 0, d = bdesc_args;
27725 i < ARRAY_SIZE (bdesc_args);
27727 if (d->code == fcode)
27730 case IX86_BUILTIN_FABSQ:
27731 case IX86_BUILTIN_COPYSIGNQ:
27733 /* Emit a normal call if SSE2 isn't available. */
27734 return expand_call (exp, target, ignore);
27736 return ix86_expand_args_builtin (d, exp, target);
27739 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
27740 if (d->code == fcode)
27741 return ix86_expand_sse_comi (d, exp, target);
27743 for (i = 0, d = bdesc_pcmpestr;
27744 i < ARRAY_SIZE (bdesc_pcmpestr);
27746 if (d->code == fcode)
27747 return ix86_expand_sse_pcmpestr (d, exp, target);
27749 for (i = 0, d = bdesc_pcmpistr;
27750 i < ARRAY_SIZE (bdesc_pcmpistr);
27752 if (d->code == fcode)
27753 return ix86_expand_sse_pcmpistr (d, exp, target);
27755 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
27756 if (d->code == fcode)
27757 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
27758 (enum ix86_builtin_func_type)
27759 d->flag, d->comparison);
27761 gcc_unreachable ();
27764 /* Returns a function decl for a vectorized version of the builtin function
27765 with builtin function code FN and the result vector type TYPE, or NULL_TREE
27766 if it is not available. */
27769 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
27772 enum machine_mode in_mode, out_mode;
27774 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
27776 if (TREE_CODE (type_out) != VECTOR_TYPE
27777 || TREE_CODE (type_in) != VECTOR_TYPE
27778 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
27781 out_mode = TYPE_MODE (TREE_TYPE (type_out));
27782 out_n = TYPE_VECTOR_SUBPARTS (type_out);
27783 in_mode = TYPE_MODE (TREE_TYPE (type_in));
27784 in_n = TYPE_VECTOR_SUBPARTS (type_in);
27788 case BUILT_IN_SQRT:
27789 if (out_mode == DFmode && in_mode == DFmode)
27791 if (out_n == 2 && in_n == 2)
27792 return ix86_builtins[IX86_BUILTIN_SQRTPD];
27793 else if (out_n == 4 && in_n == 4)
27794 return ix86_builtins[IX86_BUILTIN_SQRTPD256];
27798 case BUILT_IN_SQRTF:
27799 if (out_mode == SFmode && in_mode == SFmode)
27801 if (out_n == 4 && in_n == 4)
27802 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
27803 else if (out_n == 8 && in_n == 8)
27804 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR256];
27808 case BUILT_IN_LRINT:
27809 if (out_mode == SImode && out_n == 4
27810 && in_mode == DFmode && in_n == 2)
27811 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
27814 case BUILT_IN_LRINTF:
27815 if (out_mode == SImode && in_mode == SFmode)
27817 if (out_n == 4 && in_n == 4)
27818 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
27819 else if (out_n == 8 && in_n == 8)
27820 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ256];
27824 case BUILT_IN_COPYSIGN:
27825 if (out_mode == DFmode && in_mode == DFmode)
27827 if (out_n == 2 && in_n == 2)
27828 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
27829 else if (out_n == 4 && in_n == 4)
27830 return ix86_builtins[IX86_BUILTIN_CPYSGNPD256];
27834 case BUILT_IN_COPYSIGNF:
27835 if (out_mode == SFmode && in_mode == SFmode)
27837 if (out_n == 4 && in_n == 4)
27838 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
27839 else if (out_n == 8 && in_n == 8)
27840 return ix86_builtins[IX86_BUILTIN_CPYSGNPS256];
27845 if (out_mode == DFmode && in_mode == DFmode)
27847 if (out_n == 2 && in_n == 2)
27848 return ix86_builtins[IX86_BUILTIN_VFMADDPD];
27849 if (out_n == 4 && in_n == 4)
27850 return ix86_builtins[IX86_BUILTIN_VFMADDPD256];
27854 case BUILT_IN_FMAF:
27855 if (out_mode == SFmode && in_mode == SFmode)
27857 if (out_n == 4 && in_n == 4)
27858 return ix86_builtins[IX86_BUILTIN_VFMADDPS];
27859 if (out_n == 8 && in_n == 8)
27860 return ix86_builtins[IX86_BUILTIN_VFMADDPS256];
27868 /* Dispatch to a handler for a vectorization library. */
27869 if (ix86_veclib_handler)
27870 return ix86_veclib_handler ((enum built_in_function) fn, type_out,
27876 /* Handler for an SVML-style interface to
27877 a library with vectorized intrinsics. */
27880 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
27883 tree fntype, new_fndecl, args;
27886 enum machine_mode el_mode, in_mode;
27889 /* The SVML is suitable for unsafe math only. */
27890 if (!flag_unsafe_math_optimizations)
27893 el_mode = TYPE_MODE (TREE_TYPE (type_out));
27894 n = TYPE_VECTOR_SUBPARTS (type_out);
27895 in_mode = TYPE_MODE (TREE_TYPE (type_in));
27896 in_n = TYPE_VECTOR_SUBPARTS (type_in);
27897 if (el_mode != in_mode
27905 case BUILT_IN_LOG10:
27907 case BUILT_IN_TANH:
27909 case BUILT_IN_ATAN:
27910 case BUILT_IN_ATAN2:
27911 case BUILT_IN_ATANH:
27912 case BUILT_IN_CBRT:
27913 case BUILT_IN_SINH:
27915 case BUILT_IN_ASINH:
27916 case BUILT_IN_ASIN:
27917 case BUILT_IN_COSH:
27919 case BUILT_IN_ACOSH:
27920 case BUILT_IN_ACOS:
27921 if (el_mode != DFmode || n != 2)
27925 case BUILT_IN_EXPF:
27926 case BUILT_IN_LOGF:
27927 case BUILT_IN_LOG10F:
27928 case BUILT_IN_POWF:
27929 case BUILT_IN_TANHF:
27930 case BUILT_IN_TANF:
27931 case BUILT_IN_ATANF:
27932 case BUILT_IN_ATAN2F:
27933 case BUILT_IN_ATANHF:
27934 case BUILT_IN_CBRTF:
27935 case BUILT_IN_SINHF:
27936 case BUILT_IN_SINF:
27937 case BUILT_IN_ASINHF:
27938 case BUILT_IN_ASINF:
27939 case BUILT_IN_COSHF:
27940 case BUILT_IN_COSF:
27941 case BUILT_IN_ACOSHF:
27942 case BUILT_IN_ACOSF:
27943 if (el_mode != SFmode || n != 4)
27951 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
27953 if (fn == BUILT_IN_LOGF)
27954 strcpy (name, "vmlsLn4");
27955 else if (fn == BUILT_IN_LOG)
27956 strcpy (name, "vmldLn2");
27959 sprintf (name, "vmls%s", bname+10);
27960 name[strlen (name)-1] = '4';
27963 sprintf (name, "vmld%s2", bname+10);
27965 /* Convert to uppercase. */
27969 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
27970 args = TREE_CHAIN (args))
27974 fntype = build_function_type_list (type_out, type_in, NULL);
27976 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
27978 /* Build a function declaration for the vectorized function. */
27979 new_fndecl = build_decl (BUILTINS_LOCATION,
27980 FUNCTION_DECL, get_identifier (name), fntype);
27981 TREE_PUBLIC (new_fndecl) = 1;
27982 DECL_EXTERNAL (new_fndecl) = 1;
27983 DECL_IS_NOVOPS (new_fndecl) = 1;
27984 TREE_READONLY (new_fndecl) = 1;
27989 /* Handler for an ACML-style interface to
27990 a library with vectorized intrinsics. */
27993 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
27995 char name[20] = "__vr.._";
27996 tree fntype, new_fndecl, args;
27999 enum machine_mode el_mode, in_mode;
28002 /* The ACML is 64bits only and suitable for unsafe math only as
28003 it does not correctly support parts of IEEE with the required
28004 precision such as denormals. */
28006 || !flag_unsafe_math_optimizations)
28009 el_mode = TYPE_MODE (TREE_TYPE (type_out));
28010 n = TYPE_VECTOR_SUBPARTS (type_out);
28011 in_mode = TYPE_MODE (TREE_TYPE (type_in));
28012 in_n = TYPE_VECTOR_SUBPARTS (type_in);
28013 if (el_mode != in_mode
28023 case BUILT_IN_LOG2:
28024 case BUILT_IN_LOG10:
28027 if (el_mode != DFmode
28032 case BUILT_IN_SINF:
28033 case BUILT_IN_COSF:
28034 case BUILT_IN_EXPF:
28035 case BUILT_IN_POWF:
28036 case BUILT_IN_LOGF:
28037 case BUILT_IN_LOG2F:
28038 case BUILT_IN_LOG10F:
28041 if (el_mode != SFmode
28050 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
28051 sprintf (name + 7, "%s", bname+10);
28054 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
28055 args = TREE_CHAIN (args))
28059 fntype = build_function_type_list (type_out, type_in, NULL);
28061 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
28063 /* Build a function declaration for the vectorized function. */
28064 new_fndecl = build_decl (BUILTINS_LOCATION,
28065 FUNCTION_DECL, get_identifier (name), fntype);
28066 TREE_PUBLIC (new_fndecl) = 1;
28067 DECL_EXTERNAL (new_fndecl) = 1;
28068 DECL_IS_NOVOPS (new_fndecl) = 1;
28069 TREE_READONLY (new_fndecl) = 1;
28075 /* Returns a decl of a function that implements conversion of an integer vector
28076 into a floating-point vector, or vice-versa. DEST_TYPE and SRC_TYPE
28077 are the types involved when converting according to CODE.
28078 Return NULL_TREE if it is not available. */
28081 ix86_vectorize_builtin_conversion (unsigned int code,
28082 tree dest_type, tree src_type)
28090 switch (TYPE_MODE (src_type))
28093 switch (TYPE_MODE (dest_type))
28096 return (TYPE_UNSIGNED (src_type)
28097 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
28098 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
28100 return (TYPE_UNSIGNED (src_type)
28102 : ix86_builtins[IX86_BUILTIN_CVTDQ2PD256]);
28108 switch (TYPE_MODE (dest_type))
28111 return (TYPE_UNSIGNED (src_type)
28113 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS256]);
28122 case FIX_TRUNC_EXPR:
28123 switch (TYPE_MODE (dest_type))
28126 switch (TYPE_MODE (src_type))
28129 return (TYPE_UNSIGNED (dest_type)
28131 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ]);
28133 return (TYPE_UNSIGNED (dest_type)
28135 : ix86_builtins[IX86_BUILTIN_CVTTPD2DQ256]);
28142 switch (TYPE_MODE (src_type))
28145 return (TYPE_UNSIGNED (dest_type)
28147 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ256]);
28164 /* Returns a code for a target-specific builtin that implements
28165 reciprocal of the function, or NULL_TREE if not available. */
28168 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
28169 bool sqrt ATTRIBUTE_UNUSED)
28171 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
28172 && flag_finite_math_only && !flag_trapping_math
28173 && flag_unsafe_math_optimizations))
28177 /* Machine dependent builtins. */
28180 /* Vectorized version of sqrt to rsqrt conversion. */
28181 case IX86_BUILTIN_SQRTPS_NR:
28182 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
28184 case IX86_BUILTIN_SQRTPS_NR256:
28185 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR256];
28191 /* Normal builtins. */
28194 /* Sqrt to rsqrt conversion. */
28195 case BUILT_IN_SQRTF:
28196 return ix86_builtins[IX86_BUILTIN_RSQRTF];
28203 /* Helper for avx_vpermilps256_operand et al. This is also used by
28204 the expansion functions to turn the parallel back into a mask.
28205 The return value is 0 for no match and the imm8+1 for a match. */
28208 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
28210 unsigned i, nelt = GET_MODE_NUNITS (mode);
28212 unsigned char ipar[8];
28214 if (XVECLEN (par, 0) != (int) nelt)
28217 /* Validate that all of the elements are constants, and not totally
28218 out of range. Copy the data into an integral array to make the
28219 subsequent checks easier. */
28220 for (i = 0; i < nelt; ++i)
28222 rtx er = XVECEXP (par, 0, i);
28223 unsigned HOST_WIDE_INT ei;
28225 if (!CONST_INT_P (er))
28236 /* In the 256-bit DFmode case, we can only move elements within
28238 for (i = 0; i < 2; ++i)
28242 mask |= ipar[i] << i;
28244 for (i = 2; i < 4; ++i)
28248 mask |= (ipar[i] - 2) << i;
28253 /* In the 256-bit SFmode case, we have full freedom of movement
28254 within the low 128-bit lane, but the high 128-bit lane must
28255 mirror the exact same pattern. */
28256 for (i = 0; i < 4; ++i)
28257 if (ipar[i] + 4 != ipar[i + 4])
28264 /* In the 128-bit case, we've full freedom in the placement of
28265 the elements from the source operand. */
28266 for (i = 0; i < nelt; ++i)
28267 mask |= ipar[i] << (i * (nelt / 2));
28271 gcc_unreachable ();
28274 /* Make sure success has a non-zero value by adding one. */
28278 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
28279 the expansion functions to turn the parallel back into a mask.
28280 The return value is 0 for no match and the imm8+1 for a match. */
28283 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
28285 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
28287 unsigned char ipar[8];
28289 if (XVECLEN (par, 0) != (int) nelt)
28292 /* Validate that all of the elements are constants, and not totally
28293 out of range. Copy the data into an integral array to make the
28294 subsequent checks easier. */
28295 for (i = 0; i < nelt; ++i)
28297 rtx er = XVECEXP (par, 0, i);
28298 unsigned HOST_WIDE_INT ei;
28300 if (!CONST_INT_P (er))
28303 if (ei >= 2 * nelt)
28308 /* Validate that the halves of the permute are halves. */
28309 for (i = 0; i < nelt2 - 1; ++i)
28310 if (ipar[i] + 1 != ipar[i + 1])
28312 for (i = nelt2; i < nelt - 1; ++i)
28313 if (ipar[i] + 1 != ipar[i + 1])
28316 /* Reconstruct the mask. */
28317 for (i = 0; i < 2; ++i)
28319 unsigned e = ipar[i * nelt2];
28323 mask |= e << (i * 4);
28326 /* Make sure success has a non-zero value by adding one. */
28331 /* Store OPERAND to the memory after reload is completed. This means
28332 that we can't easily use assign_stack_local. */
28334 ix86_force_to_memory (enum machine_mode mode, rtx operand)
28338 gcc_assert (reload_completed);
28339 if (ix86_using_red_zone ())
28341 result = gen_rtx_MEM (mode,
28342 gen_rtx_PLUS (Pmode,
28344 GEN_INT (-RED_ZONE_SIZE)));
28345 emit_move_insn (result, operand);
28347 else if (TARGET_64BIT)
28353 operand = gen_lowpart (DImode, operand);
28357 gen_rtx_SET (VOIDmode,
28358 gen_rtx_MEM (DImode,
28359 gen_rtx_PRE_DEC (DImode,
28360 stack_pointer_rtx)),
28364 gcc_unreachable ();
28366 result = gen_rtx_MEM (mode, stack_pointer_rtx);
28375 split_double_mode (mode, &operand, 1, operands, operands + 1);
28377 gen_rtx_SET (VOIDmode,
28378 gen_rtx_MEM (SImode,
28379 gen_rtx_PRE_DEC (Pmode,
28380 stack_pointer_rtx)),
28383 gen_rtx_SET (VOIDmode,
28384 gen_rtx_MEM (SImode,
28385 gen_rtx_PRE_DEC (Pmode,
28386 stack_pointer_rtx)),
28391 /* Store HImodes as SImodes. */
28392 operand = gen_lowpart (SImode, operand);
28396 gen_rtx_SET (VOIDmode,
28397 gen_rtx_MEM (GET_MODE (operand),
28398 gen_rtx_PRE_DEC (SImode,
28399 stack_pointer_rtx)),
28403 gcc_unreachable ();
28405 result = gen_rtx_MEM (mode, stack_pointer_rtx);
28410 /* Free operand from the memory. */
28412 ix86_free_from_memory (enum machine_mode mode)
28414 if (!ix86_using_red_zone ())
28418 if (mode == DImode || TARGET_64BIT)
28422 /* Use LEA to deallocate stack space. In peephole2 it will be converted
28423 to pop or add instruction if registers are available. */
28424 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
28425 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
28430 /* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
28431 SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
28433 static const reg_class_t *
28434 i386_ira_cover_classes (void)
28436 static const reg_class_t sse_fpmath_classes[] = {
28437 GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
28439 static const reg_class_t no_sse_fpmath_classes[] = {
28440 GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
28443 return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
28446 /* Implement TARGET_PREFERRED_RELOAD_CLASS.
28448 Put float CONST_DOUBLE in the constant pool instead of fp regs.
28449 QImode must go into class Q_REGS.
28450 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
28451 movdf to do mem-to-mem moves through integer regs. */
28454 ix86_preferred_reload_class (rtx x, reg_class_t regclass)
28456 enum machine_mode mode = GET_MODE (x);
28458 /* We're only allowed to return a subclass of CLASS. Many of the
28459 following checks fail for NO_REGS, so eliminate that early. */
28460 if (regclass == NO_REGS)
28463 /* All classes can load zeros. */
28464 if (x == CONST0_RTX (mode))
28467 /* Force constants into memory if we are loading a (nonzero) constant into
28468 an MMX or SSE register. This is because there are no MMX/SSE instructions
28469 to load from a constant. */
28471 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
28474 /* Prefer SSE regs only, if we can use them for math. */
28475 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
28476 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
28478 /* Floating-point constants need more complex checks. */
28479 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
28481 /* General regs can load everything. */
28482 if (reg_class_subset_p (regclass, GENERAL_REGS))
28485 /* Floats can load 0 and 1 plus some others. Note that we eliminated
28486 zero above. We only want to wind up preferring 80387 registers if
28487 we plan on doing computation with them. */
28489 && standard_80387_constant_p (x) > 0)
28491 /* Limit class to non-sse. */
28492 if (regclass == FLOAT_SSE_REGS)
28494 if (regclass == FP_TOP_SSE_REGS)
28496 if (regclass == FP_SECOND_SSE_REGS)
28497 return FP_SECOND_REG;
28498 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
28505 /* Generally when we see PLUS here, it's the function invariant
28506 (plus soft-fp const_int). Which can only be computed into general
28508 if (GET_CODE (x) == PLUS)
28509 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
28511 /* QImode constants are easy to load, but non-constant QImode data
28512 must go into Q_REGS. */
28513 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
28515 if (reg_class_subset_p (regclass, Q_REGS))
28517 if (reg_class_subset_p (Q_REGS, regclass))
28525 /* Discourage putting floating-point values in SSE registers unless
28526 SSE math is being used, and likewise for the 387 registers. */
28528 ix86_preferred_output_reload_class (rtx x, reg_class_t regclass)
28530 enum machine_mode mode = GET_MODE (x);
28532 /* Restrict the output reload class to the register bank that we are doing
28533 math on. If we would like not to return a subset of CLASS, reject this
28534 alternative: if reload cannot do this, it will still use its choice. */
28535 mode = GET_MODE (x);
28536 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
28537 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
28539 if (X87_FLOAT_MODE_P (mode))
28541 if (regclass == FP_TOP_SSE_REGS)
28543 else if (regclass == FP_SECOND_SSE_REGS)
28544 return FP_SECOND_REG;
28546 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
28553 ix86_secondary_reload (bool in_p, rtx x, reg_class_t rclass,
28554 enum machine_mode mode,
28555 secondary_reload_info *sri ATTRIBUTE_UNUSED)
28557 /* QImode spills from non-QI registers require
28558 intermediate register on 32bit targets. */
28560 && !in_p && mode == QImode
28561 && (rclass == GENERAL_REGS
28562 || rclass == LEGACY_REGS
28563 || rclass == INDEX_REGS))
28572 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
28573 regno = true_regnum (x);
28575 /* Return Q_REGS if the operand is in memory. */
28580 /* This condition handles corner case where an expression involving
28581 pointers gets vectorized. We're trying to use the address of a
28582 stack slot as a vector initializer.
28584 (set (reg:V2DI 74 [ vect_cst_.2 ])
28585 (vec_duplicate:V2DI (reg/f:DI 20 frame)))
28587 Eventually frame gets turned into sp+offset like this:
28589 (set (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28590 (vec_duplicate:V2DI (plus:DI (reg/f:DI 7 sp)
28591 (const_int 392 [0x188]))))
28593 That later gets turned into:
28595 (set (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28596 (vec_duplicate:V2DI (plus:DI (reg/f:DI 7 sp)
28597 (mem/u/c/i:DI (symbol_ref/u:DI ("*.LC0") [flags 0x2]) [0 S8 A64]))))
28599 We'll have the following reload recorded:
28601 Reload 0: reload_in (DI) =
28602 (plus:DI (reg/f:DI 7 sp)
28603 (mem/u/c/i:DI (symbol_ref/u:DI ("*.LC0") [flags 0x2]) [0 S8 A64]))
28604 reload_out (V2DI) = (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28605 SSE_REGS, RELOAD_OTHER (opnum = 0), can't combine
28606 reload_in_reg: (plus:DI (reg/f:DI 7 sp) (const_int 392 [0x188]))
28607 reload_out_reg: (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28608 reload_reg_rtx: (reg:V2DI 22 xmm1)
28610 Which isn't going to work since SSE instructions can't handle scalar
28611 additions. Returning GENERAL_REGS forces the addition into integer
28612 register and reload can handle subsequent reloads without problems. */
28614 if (in_p && GET_CODE (x) == PLUS
28615 && SSE_CLASS_P (rclass)
28616 && SCALAR_INT_MODE_P (mode))
28617 return GENERAL_REGS;
28622 /* Implement TARGET_CLASS_LIKELY_SPILLED_P. */
28625 ix86_class_likely_spilled_p (reg_class_t rclass)
28636 case SSE_FIRST_REG:
28638 case FP_SECOND_REG:
28648 /* If we are copying between general and FP registers, we need a memory
28649 location. The same is true for SSE and MMX registers.
28651 To optimize register_move_cost performance, allow inline variant.
28653 The macro can't work reliably when one of the CLASSES is class containing
28654 registers from multiple units (SSE, MMX, integer). We avoid this by never
28655 combining those units in single alternative in the machine description.
28656 Ensure that this constraint holds to avoid unexpected surprises.
28658 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
28659 enforce these sanity checks. */
28662 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
28663 enum machine_mode mode, int strict)
28665 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
28666 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
28667 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
28668 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
28669 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
28670 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
28672 gcc_assert (!strict);
28676 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
28679 /* ??? This is a lie. We do have moves between mmx/general, and for
28680 mmx/sse2. But by saying we need secondary memory we discourage the
28681 register allocator from using the mmx registers unless needed. */
28682 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
28685 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
28687 /* SSE1 doesn't have any direct moves from other classes. */
28691 /* If the target says that inter-unit moves are more expensive
28692 than moving through memory, then don't generate them. */
28693 if (!TARGET_INTER_UNIT_MOVES)
28696 /* Between SSE and general, we have moves no larger than word size. */
28697 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
28705 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
28706 enum machine_mode mode, int strict)
28708 return inline_secondary_memory_needed (class1, class2, mode, strict);
28711 /* Return true if the registers in CLASS cannot represent the change from
28712 modes FROM to TO. */
28715 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
28716 enum reg_class regclass)
28721 /* x87 registers can't do subreg at all, as all values are reformatted
28722 to extended precision. */
28723 if (MAYBE_FLOAT_CLASS_P (regclass))
28726 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
28728 /* Vector registers do not support QI or HImode loads. If we don't
28729 disallow a change to these modes, reload will assume it's ok to
28730 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
28731 the vec_dupv4hi pattern. */
28732 if (GET_MODE_SIZE (from) < 4)
28735 /* Vector registers do not support subreg with nonzero offsets, which
28736 are otherwise valid for integer registers. Since we can't see
28737 whether we have a nonzero offset from here, prohibit all
28738 nonparadoxical subregs changing size. */
28739 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
28746 /* Return the cost of moving data of mode M between a
28747 register and memory. A value of 2 is the default; this cost is
28748 relative to those in `REGISTER_MOVE_COST'.
28750 This function is used extensively by register_move_cost that is used to
28751 build tables at startup. Make it inline in this case.
28752 When IN is 2, return maximum of in and out move cost.
28754 If moving between registers and memory is more expensive than
28755 between two registers, you should define this macro to express the
28758 Model also increased moving costs of QImode registers in non
28762 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
28766 if (FLOAT_CLASS_P (regclass))
28784 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
28785 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
28787 if (SSE_CLASS_P (regclass))
28790 switch (GET_MODE_SIZE (mode))
28805 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
28806 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
28808 if (MMX_CLASS_P (regclass))
28811 switch (GET_MODE_SIZE (mode))
28823 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
28824 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
28826 switch (GET_MODE_SIZE (mode))
28829 if (Q_CLASS_P (regclass) || TARGET_64BIT)
28832 return ix86_cost->int_store[0];
28833 if (TARGET_PARTIAL_REG_DEPENDENCY
28834 && optimize_function_for_speed_p (cfun))
28835 cost = ix86_cost->movzbl_load;
28837 cost = ix86_cost->int_load[0];
28839 return MAX (cost, ix86_cost->int_store[0]);
28845 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
28847 return ix86_cost->movzbl_load;
28849 return ix86_cost->int_store[0] + 4;
28854 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
28855 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
28857 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
28858 if (mode == TFmode)
28861 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
28863 cost = ix86_cost->int_load[2];
28865 cost = ix86_cost->int_store[2];
28866 return (cost * (((int) GET_MODE_SIZE (mode)
28867 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
28872 ix86_memory_move_cost (enum machine_mode mode, reg_class_t regclass,
28875 return inline_memory_move_cost (mode, (enum reg_class) regclass, in ? 1 : 0);
28879 /* Return the cost of moving data from a register in class CLASS1 to
28880 one in class CLASS2.
28882 It is not required that the cost always equal 2 when FROM is the same as TO;
28883 on some machines it is expensive to move between registers if they are not
28884 general registers. */
28887 ix86_register_move_cost (enum machine_mode mode, reg_class_t class1_i,
28888 reg_class_t class2_i)
28890 enum reg_class class1 = (enum reg_class) class1_i;
28891 enum reg_class class2 = (enum reg_class) class2_i;
28893 /* In case we require secondary memory, compute cost of the store followed
28894 by load. In order to avoid bad register allocation choices, we need
28895 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
28897 if (inline_secondary_memory_needed (class1, class2, mode, 0))
28901 cost += inline_memory_move_cost (mode, class1, 2);
28902 cost += inline_memory_move_cost (mode, class2, 2);
28904 /* In case of copying from general_purpose_register we may emit multiple
28905 stores followed by single load causing memory size mismatch stall.
28906 Count this as arbitrarily high cost of 20. */
28907 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
28910 /* In the case of FP/MMX moves, the registers actually overlap, and we
28911 have to switch modes in order to treat them differently. */
28912 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
28913 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
28919 /* Moves between SSE/MMX and integer unit are expensive. */
28920 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
28921 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
28923 /* ??? By keeping returned value relatively high, we limit the number
28924 of moves between integer and MMX/SSE registers for all targets.
28925 Additionally, high value prevents problem with x86_modes_tieable_p(),
28926 where integer modes in MMX/SSE registers are not tieable
28927 because of missing QImode and HImode moves to, from or between
28928 MMX/SSE registers. */
28929 return MAX (8, ix86_cost->mmxsse_to_integer);
28931 if (MAYBE_FLOAT_CLASS_P (class1))
28932 return ix86_cost->fp_move;
28933 if (MAYBE_SSE_CLASS_P (class1))
28934 return ix86_cost->sse_move;
28935 if (MAYBE_MMX_CLASS_P (class1))
28936 return ix86_cost->mmx_move;
28940 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
28943 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
28945 /* Flags and only flags can only hold CCmode values. */
28946 if (CC_REGNO_P (regno))
28947 return GET_MODE_CLASS (mode) == MODE_CC;
28948 if (GET_MODE_CLASS (mode) == MODE_CC
28949 || GET_MODE_CLASS (mode) == MODE_RANDOM
28950 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
28952 if (FP_REGNO_P (regno))
28953 return VALID_FP_MODE_P (mode);
28954 if (SSE_REGNO_P (regno))
28956 /* We implement the move patterns for all vector modes into and
28957 out of SSE registers, even when no operation instructions
28958 are available. OImode move is available only when AVX is
28960 return ((TARGET_AVX && mode == OImode)
28961 || VALID_AVX256_REG_MODE (mode)
28962 || VALID_SSE_REG_MODE (mode)
28963 || VALID_SSE2_REG_MODE (mode)
28964 || VALID_MMX_REG_MODE (mode)
28965 || VALID_MMX_REG_MODE_3DNOW (mode));
28967 if (MMX_REGNO_P (regno))
28969 /* We implement the move patterns for 3DNOW modes even in MMX mode,
28970 so if the register is available at all, then we can move data of
28971 the given mode into or out of it. */
28972 return (VALID_MMX_REG_MODE (mode)
28973 || VALID_MMX_REG_MODE_3DNOW (mode));
28976 if (mode == QImode)
28978 /* Take care for QImode values - they can be in non-QI regs,
28979 but then they do cause partial register stalls. */
28980 if (regno <= BX_REG || TARGET_64BIT)
28982 if (!TARGET_PARTIAL_REG_STALL)
28984 return reload_in_progress || reload_completed;
28986 /* We handle both integer and floats in the general purpose registers. */
28987 else if (VALID_INT_MODE_P (mode))
28989 else if (VALID_FP_MODE_P (mode))
28991 else if (VALID_DFP_MODE_P (mode))
28993 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
28994 on to use that value in smaller contexts, this can easily force a
28995 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
28996 supporting DImode, allow it. */
28997 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
29003 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
29004 tieable integer mode. */
29007 ix86_tieable_integer_mode_p (enum machine_mode mode)
29016 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
29019 return TARGET_64BIT;
29026 /* Return true if MODE1 is accessible in a register that can hold MODE2
29027 without copying. That is, all register classes that can hold MODE2
29028 can also hold MODE1. */
29031 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
29033 if (mode1 == mode2)
29036 if (ix86_tieable_integer_mode_p (mode1)
29037 && ix86_tieable_integer_mode_p (mode2))
29040 /* MODE2 being XFmode implies fp stack or general regs, which means we
29041 can tie any smaller floating point modes to it. Note that we do not
29042 tie this with TFmode. */
29043 if (mode2 == XFmode)
29044 return mode1 == SFmode || mode1 == DFmode;
29046 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
29047 that we can tie it with SFmode. */
29048 if (mode2 == DFmode)
29049 return mode1 == SFmode;
29051 /* If MODE2 is only appropriate for an SSE register, then tie with
29052 any other mode acceptable to SSE registers. */
29053 if (GET_MODE_SIZE (mode2) == 16
29054 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
29055 return (GET_MODE_SIZE (mode1) == 16
29056 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
29058 /* If MODE2 is appropriate for an MMX register, then tie
29059 with any other mode acceptable to MMX registers. */
29060 if (GET_MODE_SIZE (mode2) == 8
29061 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
29062 return (GET_MODE_SIZE (mode1) == 8
29063 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
29068 /* Compute a (partial) cost for rtx X. Return true if the complete
29069 cost has been computed, and false if subexpressions should be
29070 scanned. In either case, *TOTAL contains the cost result. */
29073 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
29075 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
29076 enum machine_mode mode = GET_MODE (x);
29077 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
29085 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
29087 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
29089 else if (flag_pic && SYMBOLIC_CONST (x)
29091 || (!GET_CODE (x) != LABEL_REF
29092 && (GET_CODE (x) != SYMBOL_REF
29093 || !SYMBOL_REF_LOCAL_P (x)))))
29100 if (mode == VOIDmode)
29103 switch (standard_80387_constant_p (x))
29108 default: /* Other constants */
29113 /* Start with (MEM (SYMBOL_REF)), since that's where
29114 it'll probably end up. Add a penalty for size. */
29115 *total = (COSTS_N_INSNS (1)
29116 + (flag_pic != 0 && !TARGET_64BIT)
29117 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
29123 /* The zero extensions is often completely free on x86_64, so make
29124 it as cheap as possible. */
29125 if (TARGET_64BIT && mode == DImode
29126 && GET_MODE (XEXP (x, 0)) == SImode)
29128 else if (TARGET_ZERO_EXTEND_WITH_AND)
29129 *total = cost->add;
29131 *total = cost->movzx;
29135 *total = cost->movsx;
29139 if (CONST_INT_P (XEXP (x, 1))
29140 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
29142 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
29145 *total = cost->add;
29148 if ((value == 2 || value == 3)
29149 && cost->lea <= cost->shift_const)
29151 *total = cost->lea;
29161 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
29163 if (CONST_INT_P (XEXP (x, 1)))
29165 if (INTVAL (XEXP (x, 1)) > 32)
29166 *total = cost->shift_const + COSTS_N_INSNS (2);
29168 *total = cost->shift_const * 2;
29172 if (GET_CODE (XEXP (x, 1)) == AND)
29173 *total = cost->shift_var * 2;
29175 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
29180 if (CONST_INT_P (XEXP (x, 1)))
29181 *total = cost->shift_const;
29182 else if (GET_CODE (XEXP (x, 1)) == SUBREG
29183 && GET_CODE (XEXP (XEXP (x, 1), 0)) == AND)
29185 /* Return the cost after shift-and truncation. */
29186 *total = cost->shift_var;
29190 *total = cost->shift_var;
29198 gcc_assert (FLOAT_MODE_P (mode));
29199 gcc_assert (TARGET_FMA || TARGET_FMA4);
29201 /* ??? SSE scalar/vector cost should be used here. */
29202 /* ??? Bald assumption that fma has the same cost as fmul. */
29203 *total = cost->fmul;
29204 *total += rtx_cost (XEXP (x, 1), FMA, speed);
29206 /* Negate in op0 or op2 is free: FMS, FNMA, FNMS. */
29208 if (GET_CODE (sub) == NEG)
29209 sub = XEXP (sub, 0);
29210 *total += rtx_cost (sub, FMA, speed);
29213 if (GET_CODE (sub) == NEG)
29214 sub = XEXP (sub, 0);
29215 *total += rtx_cost (sub, FMA, speed);
29220 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29222 /* ??? SSE scalar cost should be used here. */
29223 *total = cost->fmul;
29226 else if (X87_FLOAT_MODE_P (mode))
29228 *total = cost->fmul;
29231 else if (FLOAT_MODE_P (mode))
29233 /* ??? SSE vector cost should be used here. */
29234 *total = cost->fmul;
29239 rtx op0 = XEXP (x, 0);
29240 rtx op1 = XEXP (x, 1);
29242 if (CONST_INT_P (XEXP (x, 1)))
29244 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
29245 for (nbits = 0; value != 0; value &= value - 1)
29249 /* This is arbitrary. */
29252 /* Compute costs correctly for widening multiplication. */
29253 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
29254 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
29255 == GET_MODE_SIZE (mode))
29257 int is_mulwiden = 0;
29258 enum machine_mode inner_mode = GET_MODE (op0);
29260 if (GET_CODE (op0) == GET_CODE (op1))
29261 is_mulwiden = 1, op1 = XEXP (op1, 0);
29262 else if (CONST_INT_P (op1))
29264 if (GET_CODE (op0) == SIGN_EXTEND)
29265 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
29268 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
29272 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
29275 *total = (cost->mult_init[MODE_INDEX (mode)]
29276 + nbits * cost->mult_bit
29277 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
29286 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29287 /* ??? SSE cost should be used here. */
29288 *total = cost->fdiv;
29289 else if (X87_FLOAT_MODE_P (mode))
29290 *total = cost->fdiv;
29291 else if (FLOAT_MODE_P (mode))
29292 /* ??? SSE vector cost should be used here. */
29293 *total = cost->fdiv;
29295 *total = cost->divide[MODE_INDEX (mode)];
29299 if (GET_MODE_CLASS (mode) == MODE_INT
29300 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
29302 if (GET_CODE (XEXP (x, 0)) == PLUS
29303 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
29304 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
29305 && CONSTANT_P (XEXP (x, 1)))
29307 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
29308 if (val == 2 || val == 4 || val == 8)
29310 *total = cost->lea;
29311 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
29312 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
29313 outer_code, speed);
29314 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
29318 else if (GET_CODE (XEXP (x, 0)) == MULT
29319 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
29321 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
29322 if (val == 2 || val == 4 || val == 8)
29324 *total = cost->lea;
29325 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
29326 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
29330 else if (GET_CODE (XEXP (x, 0)) == PLUS)
29332 *total = cost->lea;
29333 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
29334 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
29335 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
29342 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29344 /* ??? SSE cost should be used here. */
29345 *total = cost->fadd;
29348 else if (X87_FLOAT_MODE_P (mode))
29350 *total = cost->fadd;
29353 else if (FLOAT_MODE_P (mode))
29355 /* ??? SSE vector cost should be used here. */
29356 *total = cost->fadd;
29364 if (!TARGET_64BIT && mode == DImode)
29366 *total = (cost->add * 2
29367 + (rtx_cost (XEXP (x, 0), outer_code, speed)
29368 << (GET_MODE (XEXP (x, 0)) != DImode))
29369 + (rtx_cost (XEXP (x, 1), outer_code, speed)
29370 << (GET_MODE (XEXP (x, 1)) != DImode)));
29376 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29378 /* ??? SSE cost should be used here. */
29379 *total = cost->fchs;
29382 else if (X87_FLOAT_MODE_P (mode))
29384 *total = cost->fchs;
29387 else if (FLOAT_MODE_P (mode))
29389 /* ??? SSE vector cost should be used here. */
29390 *total = cost->fchs;
29396 if (!TARGET_64BIT && mode == DImode)
29397 *total = cost->add * 2;
29399 *total = cost->add;
29403 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
29404 && XEXP (XEXP (x, 0), 1) == const1_rtx
29405 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
29406 && XEXP (x, 1) == const0_rtx)
29408 /* This kind of construct is implemented using test[bwl].
29409 Treat it as if we had an AND. */
29410 *total = (cost->add
29411 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
29412 + rtx_cost (const1_rtx, outer_code, speed));
29418 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
29423 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29424 /* ??? SSE cost should be used here. */
29425 *total = cost->fabs;
29426 else if (X87_FLOAT_MODE_P (mode))
29427 *total = cost->fabs;
29428 else if (FLOAT_MODE_P (mode))
29429 /* ??? SSE vector cost should be used here. */
29430 *total = cost->fabs;
29434 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29435 /* ??? SSE cost should be used here. */
29436 *total = cost->fsqrt;
29437 else if (X87_FLOAT_MODE_P (mode))
29438 *total = cost->fsqrt;
29439 else if (FLOAT_MODE_P (mode))
29440 /* ??? SSE vector cost should be used here. */
29441 *total = cost->fsqrt;
29445 if (XINT (x, 1) == UNSPEC_TP)
29452 case VEC_DUPLICATE:
29453 /* ??? Assume all of these vector manipulation patterns are
29454 recognizable. In which case they all pretty much have the
29456 *total = COSTS_N_INSNS (1);
29466 static int current_machopic_label_num;
29468 /* Given a symbol name and its associated stub, write out the
29469 definition of the stub. */
29472 machopic_output_stub (FILE *file, const char *symb, const char *stub)
29474 unsigned int length;
29475 char *binder_name, *symbol_name, lazy_ptr_name[32];
29476 int label = ++current_machopic_label_num;
29478 /* For 64-bit we shouldn't get here. */
29479 gcc_assert (!TARGET_64BIT);
29481 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
29482 symb = targetm.strip_name_encoding (symb);
29484 length = strlen (stub);
29485 binder_name = XALLOCAVEC (char, length + 32);
29486 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
29488 length = strlen (symb);
29489 symbol_name = XALLOCAVEC (char, length + 32);
29490 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
29492 sprintf (lazy_ptr_name, "L%d$lz", label);
29494 if (MACHOPIC_ATT_STUB)
29495 switch_to_section (darwin_sections[machopic_picsymbol_stub3_section]);
29496 else if (MACHOPIC_PURE)
29498 if (TARGET_DEEP_BRANCH_PREDICTION)
29499 switch_to_section (darwin_sections[machopic_picsymbol_stub2_section]);
29501 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
29504 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
29506 fprintf (file, "%s:\n", stub);
29507 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29509 if (MACHOPIC_ATT_STUB)
29511 fprintf (file, "\thlt ; hlt ; hlt ; hlt ; hlt\n");
29513 else if (MACHOPIC_PURE)
29516 if (TARGET_DEEP_BRANCH_PREDICTION)
29518 /* 25-byte PIC stub using "CALL get_pc_thunk". */
29519 rtx tmp = gen_rtx_REG (SImode, 2 /* ECX */);
29520 output_set_got (tmp, NULL_RTX); /* "CALL ___<cpu>.get_pc_thunk.cx". */
29521 fprintf (file, "LPC$%d:\tmovl\t%s-LPC$%d(%%ecx),%%ecx\n", label, lazy_ptr_name, label);
29525 /* 26-byte PIC stub using inline picbase: "CALL L42 ! L42: pop %eax". */
29526 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%ecx\n", label, label);
29527 fprintf (file, "\tmovl %s-LPC$%d(%%ecx),%%ecx\n", lazy_ptr_name, label);
29529 fprintf (file, "\tjmp\t*%%ecx\n");
29532 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
29534 /* The AT&T-style ("self-modifying") stub is not lazily bound, thus
29535 it needs no stub-binding-helper. */
29536 if (MACHOPIC_ATT_STUB)
29539 fprintf (file, "%s:\n", binder_name);
29543 fprintf (file, "\tlea\t%s-%s(%%ecx),%%ecx\n", lazy_ptr_name, binder_name);
29544 fprintf (file, "\tpushl\t%%ecx\n");
29547 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
29549 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
29551 /* N.B. Keep the correspondence of these
29552 'symbol_ptr/symbol_ptr2/symbol_ptr3' sections consistent with the
29553 old-pic/new-pic/non-pic stubs; altering this will break
29554 compatibility with existing dylibs. */
29558 if (TARGET_DEEP_BRANCH_PREDICTION)
29559 /* 25-byte PIC stub using "CALL get_pc_thunk". */
29560 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr2_section]);
29562 /* 26-byte PIC stub using inline picbase: "CALL L42 ! L42: pop %ebx". */
29563 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
29566 /* 16-byte -mdynamic-no-pic stub. */
29567 switch_to_section(darwin_sections[machopic_lazy_symbol_ptr3_section]);
29569 fprintf (file, "%s:\n", lazy_ptr_name);
29570 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29571 fprintf (file, ASM_LONG "%s\n", binder_name);
29573 #endif /* TARGET_MACHO */
29575 /* Order the registers for register allocator. */
29578 x86_order_regs_for_local_alloc (void)
29583 /* First allocate the local general purpose registers. */
29584 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
29585 if (GENERAL_REGNO_P (i) && call_used_regs[i])
29586 reg_alloc_order [pos++] = i;
29588 /* Global general purpose registers. */
29589 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
29590 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
29591 reg_alloc_order [pos++] = i;
29593 /* x87 registers come first in case we are doing FP math
29595 if (!TARGET_SSE_MATH)
29596 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
29597 reg_alloc_order [pos++] = i;
29599 /* SSE registers. */
29600 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
29601 reg_alloc_order [pos++] = i;
29602 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
29603 reg_alloc_order [pos++] = i;
29605 /* x87 registers. */
29606 if (TARGET_SSE_MATH)
29607 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
29608 reg_alloc_order [pos++] = i;
29610 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
29611 reg_alloc_order [pos++] = i;
29613 /* Initialize the rest of array as we do not allocate some registers
29615 while (pos < FIRST_PSEUDO_REGISTER)
29616 reg_alloc_order [pos++] = 0;
29619 /* Handle a "callee_pop_aggregate_return" attribute; arguments as
29620 in struct attribute_spec handler. */
29622 ix86_handle_callee_pop_aggregate_return (tree *node, tree name,
29624 int flags ATTRIBUTE_UNUSED,
29625 bool *no_add_attrs)
29627 if (TREE_CODE (*node) != FUNCTION_TYPE
29628 && TREE_CODE (*node) != METHOD_TYPE
29629 && TREE_CODE (*node) != FIELD_DECL
29630 && TREE_CODE (*node) != TYPE_DECL)
29632 warning (OPT_Wattributes, "%qE attribute only applies to functions",
29634 *no_add_attrs = true;
29639 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
29641 *no_add_attrs = true;
29644 if (is_attribute_p ("callee_pop_aggregate_return", name))
29648 cst = TREE_VALUE (args);
29649 if (TREE_CODE (cst) != INTEGER_CST)
29651 warning (OPT_Wattributes,
29652 "%qE attribute requires an integer constant argument",
29654 *no_add_attrs = true;
29656 else if (compare_tree_int (cst, 0) != 0
29657 && compare_tree_int (cst, 1) != 0)
29659 warning (OPT_Wattributes,
29660 "argument to %qE attribute is neither zero, nor one",
29662 *no_add_attrs = true;
29671 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
29672 struct attribute_spec.handler. */
29674 ix86_handle_abi_attribute (tree *node, tree name,
29675 tree args ATTRIBUTE_UNUSED,
29676 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29678 if (TREE_CODE (*node) != FUNCTION_TYPE
29679 && TREE_CODE (*node) != METHOD_TYPE
29680 && TREE_CODE (*node) != FIELD_DECL
29681 && TREE_CODE (*node) != TYPE_DECL)
29683 warning (OPT_Wattributes, "%qE attribute only applies to functions",
29685 *no_add_attrs = true;
29690 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
29692 *no_add_attrs = true;
29696 /* Can combine regparm with all attributes but fastcall. */
29697 if (is_attribute_p ("ms_abi", name))
29699 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
29701 error ("ms_abi and sysv_abi attributes are not compatible");
29706 else if (is_attribute_p ("sysv_abi", name))
29708 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
29710 error ("ms_abi and sysv_abi attributes are not compatible");
29719 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
29720 struct attribute_spec.handler. */
29722 ix86_handle_struct_attribute (tree *node, tree name,
29723 tree args ATTRIBUTE_UNUSED,
29724 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29727 if (DECL_P (*node))
29729 if (TREE_CODE (*node) == TYPE_DECL)
29730 type = &TREE_TYPE (*node);
29735 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
29736 || TREE_CODE (*type) == UNION_TYPE)))
29738 warning (OPT_Wattributes, "%qE attribute ignored",
29740 *no_add_attrs = true;
29743 else if ((is_attribute_p ("ms_struct", name)
29744 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
29745 || ((is_attribute_p ("gcc_struct", name)
29746 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
29748 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
29750 *no_add_attrs = true;
29757 ix86_handle_fndecl_attribute (tree *node, tree name,
29758 tree args ATTRIBUTE_UNUSED,
29759 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29761 if (TREE_CODE (*node) != FUNCTION_DECL)
29763 warning (OPT_Wattributes, "%qE attribute only applies to functions",
29765 *no_add_attrs = true;
29771 ix86_ms_bitfield_layout_p (const_tree record_type)
29773 return ((TARGET_MS_BITFIELD_LAYOUT
29774 && !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
29775 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type)));
29778 /* Returns an expression indicating where the this parameter is
29779 located on entry to the FUNCTION. */
29782 x86_this_parameter (tree function)
29784 tree type = TREE_TYPE (function);
29785 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
29790 const int *parm_regs;
29792 if (ix86_function_type_abi (type) == MS_ABI)
29793 parm_regs = x86_64_ms_abi_int_parameter_registers;
29795 parm_regs = x86_64_int_parameter_registers;
29796 return gen_rtx_REG (DImode, parm_regs[aggr]);
29799 nregs = ix86_function_regparm (type, function);
29801 if (nregs > 0 && !stdarg_p (type))
29805 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
29806 regno = aggr ? DX_REG : CX_REG;
29807 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
29811 return gen_rtx_MEM (SImode,
29812 plus_constant (stack_pointer_rtx, 4));
29821 return gen_rtx_MEM (SImode,
29822 plus_constant (stack_pointer_rtx, 4));
29825 return gen_rtx_REG (SImode, regno);
29828 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
29831 /* Determine whether x86_output_mi_thunk can succeed. */
29834 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
29835 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
29836 HOST_WIDE_INT vcall_offset, const_tree function)
29838 /* 64-bit can handle anything. */
29842 /* For 32-bit, everything's fine if we have one free register. */
29843 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
29846 /* Need a free register for vcall_offset. */
29850 /* Need a free register for GOT references. */
29851 if (flag_pic && !targetm.binds_local_p (function))
29854 /* Otherwise ok. */
29858 /* Output the assembler code for a thunk function. THUNK_DECL is the
29859 declaration for the thunk function itself, FUNCTION is the decl for
29860 the target function. DELTA is an immediate constant offset to be
29861 added to THIS. If VCALL_OFFSET is nonzero, the word at
29862 *(*this + vcall_offset) should be added to THIS. */
29865 x86_output_mi_thunk (FILE *file,
29866 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
29867 HOST_WIDE_INT vcall_offset, tree function)
29870 rtx this_param = x86_this_parameter (function);
29873 /* Make sure unwind info is emitted for the thunk if needed. */
29874 final_start_function (emit_barrier (), file, 1);
29876 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
29877 pull it in now and let DELTA benefit. */
29878 if (REG_P (this_param))
29879 this_reg = this_param;
29880 else if (vcall_offset)
29882 /* Put the this parameter into %eax. */
29883 xops[0] = this_param;
29884 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
29885 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
29888 this_reg = NULL_RTX;
29890 /* Adjust the this parameter by a fixed constant. */
29893 xops[0] = GEN_INT (delta);
29894 xops[1] = this_reg ? this_reg : this_param;
29897 if (!x86_64_general_operand (xops[0], DImode))
29899 tmp = gen_rtx_REG (DImode, R10_REG);
29901 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
29903 xops[1] = this_param;
29905 if (x86_maybe_negate_const_int (&xops[0], DImode))
29906 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
29908 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
29910 else if (x86_maybe_negate_const_int (&xops[0], SImode))
29911 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
29913 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
29916 /* Adjust the this parameter by a value stored in the vtable. */
29920 tmp = gen_rtx_REG (DImode, R10_REG);
29923 int tmp_regno = CX_REG;
29924 if (lookup_attribute ("fastcall",
29925 TYPE_ATTRIBUTES (TREE_TYPE (function)))
29926 || lookup_attribute ("thiscall",
29927 TYPE_ATTRIBUTES (TREE_TYPE (function))))
29928 tmp_regno = AX_REG;
29929 tmp = gen_rtx_REG (SImode, tmp_regno);
29932 xops[0] = gen_rtx_MEM (Pmode, this_reg);
29934 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
29936 /* Adjust the this parameter. */
29937 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
29938 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
29940 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
29941 xops[0] = GEN_INT (vcall_offset);
29943 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
29944 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
29946 xops[1] = this_reg;
29947 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
29950 /* If necessary, drop THIS back to its stack slot. */
29951 if (this_reg && this_reg != this_param)
29953 xops[0] = this_reg;
29954 xops[1] = this_param;
29955 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
29958 xops[0] = XEXP (DECL_RTL (function), 0);
29961 if (!flag_pic || targetm.binds_local_p (function)
29962 || DEFAULT_ABI == MS_ABI)
29963 output_asm_insn ("jmp\t%P0", xops);
29964 /* All thunks should be in the same object as their target,
29965 and thus binds_local_p should be true. */
29966 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
29967 gcc_unreachable ();
29970 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
29971 tmp = gen_rtx_CONST (Pmode, tmp);
29972 tmp = gen_rtx_MEM (QImode, tmp);
29974 output_asm_insn ("jmp\t%A0", xops);
29979 if (!flag_pic || targetm.binds_local_p (function))
29980 output_asm_insn ("jmp\t%P0", xops);
29985 rtx sym_ref = XEXP (DECL_RTL (function), 0);
29986 if (TARGET_MACHO_BRANCH_ISLANDS)
29987 sym_ref = (gen_rtx_SYMBOL_REF
29989 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
29990 tmp = gen_rtx_MEM (QImode, sym_ref);
29992 output_asm_insn ("jmp\t%0", xops);
29995 #endif /* TARGET_MACHO */
29997 tmp = gen_rtx_REG (SImode, CX_REG);
29998 output_set_got (tmp, NULL_RTX);
30001 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
30002 output_asm_insn ("jmp\t{*}%1", xops);
30005 final_end_function ();
30009 x86_file_start (void)
30011 default_file_start ();
30013 darwin_file_start ();
30015 if (X86_FILE_START_VERSION_DIRECTIVE)
30016 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
30017 if (X86_FILE_START_FLTUSED)
30018 fputs ("\t.global\t__fltused\n", asm_out_file);
30019 if (ix86_asm_dialect == ASM_INTEL)
30020 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
30024 x86_field_alignment (tree field, int computed)
30026 enum machine_mode mode;
30027 tree type = TREE_TYPE (field);
30029 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
30031 mode = TYPE_MODE (strip_array_types (type));
30032 if (mode == DFmode || mode == DCmode
30033 || GET_MODE_CLASS (mode) == MODE_INT
30034 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
30035 return MIN (32, computed);
30039 /* Output assembler code to FILE to increment profiler label # LABELNO
30040 for profiling a function entry. */
30042 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
30044 const char *mcount_name = (flag_fentry ? MCOUNT_NAME_BEFORE_PROLOGUE
30049 #ifndef NO_PROFILE_COUNTERS
30050 fprintf (file, "\tleaq\t%sP%d(%%rip),%%r11\n", LPREFIX, labelno);
30053 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
30054 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", mcount_name);
30056 fprintf (file, "\tcall\t%s\n", mcount_name);
30060 #ifndef NO_PROFILE_COUNTERS
30061 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
30064 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", mcount_name);
30068 #ifndef NO_PROFILE_COUNTERS
30069 fprintf (file, "\tmovl\t$%sP%d,%%" PROFILE_COUNT_REGISTER "\n",
30072 fprintf (file, "\tcall\t%s\n", mcount_name);
30076 /* We don't have exact information about the insn sizes, but we may assume
30077 quite safely that we are informed about all 1 byte insns and memory
30078 address sizes. This is enough to eliminate unnecessary padding in
30082 min_insn_size (rtx insn)
30086 if (!INSN_P (insn) || !active_insn_p (insn))
30089 /* Discard alignments we've emit and jump instructions. */
30090 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
30091 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
30093 if (JUMP_TABLE_DATA_P (insn))
30096 /* Important case - calls are always 5 bytes.
30097 It is common to have many calls in the row. */
30099 && symbolic_reference_mentioned_p (PATTERN (insn))
30100 && !SIBLING_CALL_P (insn))
30102 len = get_attr_length (insn);
30106 /* For normal instructions we rely on get_attr_length being exact,
30107 with a few exceptions. */
30108 if (!JUMP_P (insn))
30110 enum attr_type type = get_attr_type (insn);
30115 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
30116 || asm_noperands (PATTERN (insn)) >= 0)
30123 /* Otherwise trust get_attr_length. */
30127 l = get_attr_length_address (insn);
30128 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
30137 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
30139 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
30143 ix86_avoid_jump_mispredicts (void)
30145 rtx insn, start = get_insns ();
30146 int nbytes = 0, njumps = 0;
30149 /* Look for all minimal intervals of instructions containing 4 jumps.
30150 The intervals are bounded by START and INSN. NBYTES is the total
30151 size of instructions in the interval including INSN and not including
30152 START. When the NBYTES is smaller than 16 bytes, it is possible
30153 that the end of START and INSN ends up in the same 16byte page.
30155 The smallest offset in the page INSN can start is the case where START
30156 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
30157 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
30159 for (insn = start; insn; insn = NEXT_INSN (insn))
30163 if (LABEL_P (insn))
30165 int align = label_to_alignment (insn);
30166 int max_skip = label_to_max_skip (insn);
30170 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
30171 already in the current 16 byte page, because otherwise
30172 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
30173 bytes to reach 16 byte boundary. */
30175 || (align <= 3 && max_skip != (1 << align) - 1))
30178 fprintf (dump_file, "Label %i with max_skip %i\n",
30179 INSN_UID (insn), max_skip);
30182 while (nbytes + max_skip >= 16)
30184 start = NEXT_INSN (start);
30185 if ((JUMP_P (start)
30186 && GET_CODE (PATTERN (start)) != ADDR_VEC
30187 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
30189 njumps--, isjump = 1;
30192 nbytes -= min_insn_size (start);
30198 min_size = min_insn_size (insn);
30199 nbytes += min_size;
30201 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
30202 INSN_UID (insn), min_size);
30204 && GET_CODE (PATTERN (insn)) != ADDR_VEC
30205 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
30213 start = NEXT_INSN (start);
30214 if ((JUMP_P (start)
30215 && GET_CODE (PATTERN (start)) != ADDR_VEC
30216 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
30218 njumps--, isjump = 1;
30221 nbytes -= min_insn_size (start);
30223 gcc_assert (njumps >= 0);
30225 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
30226 INSN_UID (start), INSN_UID (insn), nbytes);
30228 if (njumps == 3 && isjump && nbytes < 16)
30230 int padsize = 15 - nbytes + min_insn_size (insn);
30233 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
30234 INSN_UID (insn), padsize);
30235 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
30241 /* AMD Athlon works faster
30242 when RET is not destination of conditional jump or directly preceded
30243 by other jump instruction. We avoid the penalty by inserting NOP just
30244 before the RET instructions in such cases. */
30246 ix86_pad_returns (void)
30251 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
30253 basic_block bb = e->src;
30254 rtx ret = BB_END (bb);
30256 bool replace = false;
30258 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
30259 || optimize_bb_for_size_p (bb))
30261 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
30262 if (active_insn_p (prev) || LABEL_P (prev))
30264 if (prev && LABEL_P (prev))
30269 FOR_EACH_EDGE (e, ei, bb->preds)
30270 if (EDGE_FREQUENCY (e) && e->src->index >= 0
30271 && !(e->flags & EDGE_FALLTHRU))
30276 prev = prev_active_insn (ret);
30278 && ((JUMP_P (prev) && any_condjump_p (prev))
30281 /* Empty functions get branch mispredict even when
30282 the jump destination is not visible to us. */
30283 if (!prev && !optimize_function_for_size_p (cfun))
30288 emit_jump_insn_before (gen_return_internal_long (), ret);
30294 /* Count the minimum number of instructions in BB. Return 4 if the
30295 number of instructions >= 4. */
30298 ix86_count_insn_bb (basic_block bb)
30301 int insn_count = 0;
30303 /* Count number of instructions in this block. Return 4 if the number
30304 of instructions >= 4. */
30305 FOR_BB_INSNS (bb, insn)
30307 /* Only happen in exit blocks. */
30309 && GET_CODE (PATTERN (insn)) == RETURN)
30312 if (NONDEBUG_INSN_P (insn)
30313 && GET_CODE (PATTERN (insn)) != USE
30314 && GET_CODE (PATTERN (insn)) != CLOBBER)
30317 if (insn_count >= 4)
30326 /* Count the minimum number of instructions in code path in BB.
30327 Return 4 if the number of instructions >= 4. */
30330 ix86_count_insn (basic_block bb)
30334 int min_prev_count;
30336 /* Only bother counting instructions along paths with no
30337 more than 2 basic blocks between entry and exit. Given
30338 that BB has an edge to exit, determine if a predecessor
30339 of BB has an edge from entry. If so, compute the number
30340 of instructions in the predecessor block. If there
30341 happen to be multiple such blocks, compute the minimum. */
30342 min_prev_count = 4;
30343 FOR_EACH_EDGE (e, ei, bb->preds)
30346 edge_iterator prev_ei;
30348 if (e->src == ENTRY_BLOCK_PTR)
30350 min_prev_count = 0;
30353 FOR_EACH_EDGE (prev_e, prev_ei, e->src->preds)
30355 if (prev_e->src == ENTRY_BLOCK_PTR)
30357 int count = ix86_count_insn_bb (e->src);
30358 if (count < min_prev_count)
30359 min_prev_count = count;
30365 if (min_prev_count < 4)
30366 min_prev_count += ix86_count_insn_bb (bb);
30368 return min_prev_count;
30371 /* Pad short funtion to 4 instructions. */
30374 ix86_pad_short_function (void)
30379 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
30381 rtx ret = BB_END (e->src);
30382 if (JUMP_P (ret) && GET_CODE (PATTERN (ret)) == RETURN)
30384 int insn_count = ix86_count_insn (e->src);
30386 /* Pad short function. */
30387 if (insn_count < 4)
30391 /* Find epilogue. */
30394 || NOTE_KIND (insn) != NOTE_INSN_EPILOGUE_BEG))
30395 insn = PREV_INSN (insn);
30400 /* Two NOPs count as one instruction. */
30401 insn_count = 2 * (4 - insn_count);
30402 emit_insn_before (gen_nops (GEN_INT (insn_count)), insn);
30408 /* Implement machine specific optimizations. We implement padding of returns
30409 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
30413 /* We are freeing block_for_insn in the toplev to keep compatibility
30414 with old MDEP_REORGS that are not CFG based. Recompute it now. */
30415 compute_bb_for_insn ();
30417 if (optimize && optimize_function_for_speed_p (cfun))
30419 if (TARGET_PAD_SHORT_FUNCTION)
30420 ix86_pad_short_function ();
30421 else if (TARGET_PAD_RETURNS)
30422 ix86_pad_returns ();
30423 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
30424 if (TARGET_FOUR_JUMP_LIMIT)
30425 ix86_avoid_jump_mispredicts ();
30429 /* Run the vzeroupper optimization if needed. */
30430 if (TARGET_VZEROUPPER)
30431 move_or_delete_vzeroupper ();
30434 /* Return nonzero when QImode register that must be represented via REX prefix
30437 x86_extended_QIreg_mentioned_p (rtx insn)
30440 extract_insn_cached (insn);
30441 for (i = 0; i < recog_data.n_operands; i++)
30442 if (REG_P (recog_data.operand[i])
30443 && REGNO (recog_data.operand[i]) > BX_REG)
30448 /* Return nonzero when P points to register encoded via REX prefix.
30449 Called via for_each_rtx. */
30451 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
30453 unsigned int regno;
30456 regno = REGNO (*p);
30457 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
30460 /* Return true when INSN mentions register that must be encoded using REX
30463 x86_extended_reg_mentioned_p (rtx insn)
30465 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
30466 extended_reg_mentioned_1, NULL);
30469 /* If profitable, negate (without causing overflow) integer constant
30470 of mode MODE at location LOC. Return true in this case. */
30472 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
30476 if (!CONST_INT_P (*loc))
30482 /* DImode x86_64 constants must fit in 32 bits. */
30483 gcc_assert (x86_64_immediate_operand (*loc, mode));
30494 gcc_unreachable ();
30497 /* Avoid overflows. */
30498 if (mode_signbit_p (mode, *loc))
30501 val = INTVAL (*loc);
30503 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
30504 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
30505 if ((val < 0 && val != -128)
30508 *loc = GEN_INT (-val);
30515 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
30516 optabs would emit if we didn't have TFmode patterns. */
30519 x86_emit_floatuns (rtx operands[2])
30521 rtx neglab, donelab, i0, i1, f0, in, out;
30522 enum machine_mode mode, inmode;
30524 inmode = GET_MODE (operands[1]);
30525 gcc_assert (inmode == SImode || inmode == DImode);
30528 in = force_reg (inmode, operands[1]);
30529 mode = GET_MODE (out);
30530 neglab = gen_label_rtx ();
30531 donelab = gen_label_rtx ();
30532 f0 = gen_reg_rtx (mode);
30534 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
30536 expand_float (out, in, 0);
30538 emit_jump_insn (gen_jump (donelab));
30541 emit_label (neglab);
30543 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
30545 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
30547 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
30549 expand_float (f0, i0, 0);
30551 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
30553 emit_label (donelab);
30556 /* AVX does not support 32-byte integer vector operations,
30557 thus the longest vector we are faced with is V16QImode. */
30558 #define MAX_VECT_LEN 16
30560 struct expand_vec_perm_d
30562 rtx target, op0, op1;
30563 unsigned char perm[MAX_VECT_LEN];
30564 enum machine_mode vmode;
30565 unsigned char nelt;
30569 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
30570 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
30572 /* Get a vector mode of the same size as the original but with elements
30573 twice as wide. This is only guaranteed to apply to integral vectors. */
30575 static inline enum machine_mode
30576 get_mode_wider_vector (enum machine_mode o)
30578 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
30579 enum machine_mode n = GET_MODE_WIDER_MODE (o);
30580 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
30581 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
30585 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
30586 with all elements equal to VAR. Return true if successful. */
30589 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
30590 rtx target, rtx val)
30613 /* First attempt to recognize VAL as-is. */
30614 dup = gen_rtx_VEC_DUPLICATE (mode, val);
30615 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
30616 if (recog_memoized (insn) < 0)
30619 /* If that fails, force VAL into a register. */
30622 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
30623 seq = get_insns ();
30626 emit_insn_before (seq, insn);
30628 ok = recog_memoized (insn) >= 0;
30637 if (TARGET_SSE || TARGET_3DNOW_A)
30641 val = gen_lowpart (SImode, val);
30642 x = gen_rtx_TRUNCATE (HImode, val);
30643 x = gen_rtx_VEC_DUPLICATE (mode, x);
30644 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30657 struct expand_vec_perm_d dperm;
30661 memset (&dperm, 0, sizeof (dperm));
30662 dperm.target = target;
30663 dperm.vmode = mode;
30664 dperm.nelt = GET_MODE_NUNITS (mode);
30665 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
30667 /* Extend to SImode using a paradoxical SUBREG. */
30668 tmp1 = gen_reg_rtx (SImode);
30669 emit_move_insn (tmp1, gen_lowpart (SImode, val));
30671 /* Insert the SImode value as low element of a V4SImode vector. */
30672 tmp2 = gen_lowpart (V4SImode, dperm.op0);
30673 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
30675 ok = (expand_vec_perm_1 (&dperm)
30676 || expand_vec_perm_broadcast_1 (&dperm));
30688 /* Replicate the value once into the next wider mode and recurse. */
30690 enum machine_mode smode, wsmode, wvmode;
30693 smode = GET_MODE_INNER (mode);
30694 wvmode = get_mode_wider_vector (mode);
30695 wsmode = GET_MODE_INNER (wvmode);
30697 val = convert_modes (wsmode, smode, val, true);
30698 x = expand_simple_binop (wsmode, ASHIFT, val,
30699 GEN_INT (GET_MODE_BITSIZE (smode)),
30700 NULL_RTX, 1, OPTAB_LIB_WIDEN);
30701 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
30703 x = gen_lowpart (wvmode, target);
30704 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
30712 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
30713 rtx x = gen_reg_rtx (hvmode);
30715 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
30718 x = gen_rtx_VEC_CONCAT (mode, x, x);
30719 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30728 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
30729 whose ONE_VAR element is VAR, and other elements are zero. Return true
30733 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
30734 rtx target, rtx var, int one_var)
30736 enum machine_mode vsimode;
30739 bool use_vector_set = false;
30744 /* For SSE4.1, we normally use vector set. But if the second
30745 element is zero and inter-unit moves are OK, we use movq
30747 use_vector_set = (TARGET_64BIT
30749 && !(TARGET_INTER_UNIT_MOVES
30755 use_vector_set = TARGET_SSE4_1;
30758 use_vector_set = TARGET_SSE2;
30761 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
30768 use_vector_set = TARGET_AVX;
30771 /* Use ix86_expand_vector_set in 64bit mode only. */
30772 use_vector_set = TARGET_AVX && TARGET_64BIT;
30778 if (use_vector_set)
30780 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
30781 var = force_reg (GET_MODE_INNER (mode), var);
30782 ix86_expand_vector_set (mmx_ok, target, var, one_var);
30798 var = force_reg (GET_MODE_INNER (mode), var);
30799 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
30800 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30805 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
30806 new_target = gen_reg_rtx (mode);
30808 new_target = target;
30809 var = force_reg (GET_MODE_INNER (mode), var);
30810 x = gen_rtx_VEC_DUPLICATE (mode, var);
30811 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
30812 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
30815 /* We need to shuffle the value to the correct position, so
30816 create a new pseudo to store the intermediate result. */
30818 /* With SSE2, we can use the integer shuffle insns. */
30819 if (mode != V4SFmode && TARGET_SSE2)
30821 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
30823 GEN_INT (one_var == 1 ? 0 : 1),
30824 GEN_INT (one_var == 2 ? 0 : 1),
30825 GEN_INT (one_var == 3 ? 0 : 1)));
30826 if (target != new_target)
30827 emit_move_insn (target, new_target);
30831 /* Otherwise convert the intermediate result to V4SFmode and
30832 use the SSE1 shuffle instructions. */
30833 if (mode != V4SFmode)
30835 tmp = gen_reg_rtx (V4SFmode);
30836 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
30841 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
30843 GEN_INT (one_var == 1 ? 0 : 1),
30844 GEN_INT (one_var == 2 ? 0+4 : 1+4),
30845 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
30847 if (mode != V4SFmode)
30848 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
30849 else if (tmp != target)
30850 emit_move_insn (target, tmp);
30852 else if (target != new_target)
30853 emit_move_insn (target, new_target);
30858 vsimode = V4SImode;
30864 vsimode = V2SImode;
30870 /* Zero extend the variable element to SImode and recurse. */
30871 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
30873 x = gen_reg_rtx (vsimode);
30874 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
30876 gcc_unreachable ();
30878 emit_move_insn (target, gen_lowpart (mode, x));
30886 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
30887 consisting of the values in VALS. It is known that all elements
30888 except ONE_VAR are constants. Return true if successful. */
30891 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
30892 rtx target, rtx vals, int one_var)
30894 rtx var = XVECEXP (vals, 0, one_var);
30895 enum machine_mode wmode;
30898 const_vec = copy_rtx (vals);
30899 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
30900 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
30908 /* For the two element vectors, it's just as easy to use
30909 the general case. */
30913 /* Use ix86_expand_vector_set in 64bit mode only. */
30936 /* There's no way to set one QImode entry easily. Combine
30937 the variable value with its adjacent constant value, and
30938 promote to an HImode set. */
30939 x = XVECEXP (vals, 0, one_var ^ 1);
30942 var = convert_modes (HImode, QImode, var, true);
30943 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
30944 NULL_RTX, 1, OPTAB_LIB_WIDEN);
30945 x = GEN_INT (INTVAL (x) & 0xff);
30949 var = convert_modes (HImode, QImode, var, true);
30950 x = gen_int_mode (INTVAL (x) << 8, HImode);
30952 if (x != const0_rtx)
30953 var = expand_simple_binop (HImode, IOR, var, x, var,
30954 1, OPTAB_LIB_WIDEN);
30956 x = gen_reg_rtx (wmode);
30957 emit_move_insn (x, gen_lowpart (wmode, const_vec));
30958 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
30960 emit_move_insn (target, gen_lowpart (mode, x));
30967 emit_move_insn (target, const_vec);
30968 ix86_expand_vector_set (mmx_ok, target, var, one_var);
30972 /* A subroutine of ix86_expand_vector_init_general. Use vector
30973 concatenate to handle the most general case: all values variable,
30974 and none identical. */
30977 ix86_expand_vector_init_concat (enum machine_mode mode,
30978 rtx target, rtx *ops, int n)
30980 enum machine_mode cmode, hmode = VOIDmode;
30981 rtx first[8], second[4];
31021 gcc_unreachable ();
31024 if (!register_operand (ops[1], cmode))
31025 ops[1] = force_reg (cmode, ops[1]);
31026 if (!register_operand (ops[0], cmode))
31027 ops[0] = force_reg (cmode, ops[0]);
31028 emit_insn (gen_rtx_SET (VOIDmode, target,
31029 gen_rtx_VEC_CONCAT (mode, ops[0],
31049 gcc_unreachable ();
31065 gcc_unreachable ();
31070 /* FIXME: We process inputs backward to help RA. PR 36222. */
31073 for (; i > 0; i -= 2, j--)
31075 first[j] = gen_reg_rtx (cmode);
31076 v = gen_rtvec (2, ops[i - 1], ops[i]);
31077 ix86_expand_vector_init (false, first[j],
31078 gen_rtx_PARALLEL (cmode, v));
31084 gcc_assert (hmode != VOIDmode);
31085 for (i = j = 0; i < n; i += 2, j++)
31087 second[j] = gen_reg_rtx (hmode);
31088 ix86_expand_vector_init_concat (hmode, second [j],
31092 ix86_expand_vector_init_concat (mode, target, second, n);
31095 ix86_expand_vector_init_concat (mode, target, first, n);
31099 gcc_unreachable ();
31103 /* A subroutine of ix86_expand_vector_init_general. Use vector
31104 interleave to handle the most general case: all values variable,
31105 and none identical. */
31108 ix86_expand_vector_init_interleave (enum machine_mode mode,
31109 rtx target, rtx *ops, int n)
31111 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
31114 rtx (*gen_load_even) (rtx, rtx, rtx);
31115 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
31116 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
31121 gen_load_even = gen_vec_setv8hi;
31122 gen_interleave_first_low = gen_vec_interleave_lowv4si;
31123 gen_interleave_second_low = gen_vec_interleave_lowv2di;
31124 inner_mode = HImode;
31125 first_imode = V4SImode;
31126 second_imode = V2DImode;
31127 third_imode = VOIDmode;
31130 gen_load_even = gen_vec_setv16qi;
31131 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
31132 gen_interleave_second_low = gen_vec_interleave_lowv4si;
31133 inner_mode = QImode;
31134 first_imode = V8HImode;
31135 second_imode = V4SImode;
31136 third_imode = V2DImode;
31139 gcc_unreachable ();
31142 for (i = 0; i < n; i++)
31144 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
31145 op0 = gen_reg_rtx (SImode);
31146 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
31148 /* Insert the SImode value as low element of V4SImode vector. */
31149 op1 = gen_reg_rtx (V4SImode);
31150 op0 = gen_rtx_VEC_MERGE (V4SImode,
31151 gen_rtx_VEC_DUPLICATE (V4SImode,
31153 CONST0_RTX (V4SImode),
31155 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
31157 /* Cast the V4SImode vector back to a vector in orignal mode. */
31158 op0 = gen_reg_rtx (mode);
31159 emit_move_insn (op0, gen_lowpart (mode, op1));
31161 /* Load even elements into the second positon. */
31162 emit_insn (gen_load_even (op0,
31163 force_reg (inner_mode,
31167 /* Cast vector to FIRST_IMODE vector. */
31168 ops[i] = gen_reg_rtx (first_imode);
31169 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
31172 /* Interleave low FIRST_IMODE vectors. */
31173 for (i = j = 0; i < n; i += 2, j++)
31175 op0 = gen_reg_rtx (first_imode);
31176 emit_insn (gen_interleave_first_low (op0, ops[i], ops[i + 1]));
31178 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
31179 ops[j] = gen_reg_rtx (second_imode);
31180 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
31183 /* Interleave low SECOND_IMODE vectors. */
31184 switch (second_imode)
31187 for (i = j = 0; i < n / 2; i += 2, j++)
31189 op0 = gen_reg_rtx (second_imode);
31190 emit_insn (gen_interleave_second_low (op0, ops[i],
31193 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
31195 ops[j] = gen_reg_rtx (third_imode);
31196 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
31198 second_imode = V2DImode;
31199 gen_interleave_second_low = gen_vec_interleave_lowv2di;
31203 op0 = gen_reg_rtx (second_imode);
31204 emit_insn (gen_interleave_second_low (op0, ops[0],
31207 /* Cast the SECOND_IMODE vector back to a vector on original
31209 emit_insn (gen_rtx_SET (VOIDmode, target,
31210 gen_lowpart (mode, op0)));
31214 gcc_unreachable ();
31218 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
31219 all values variable, and none identical. */
31222 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
31223 rtx target, rtx vals)
31225 rtx ops[32], op0, op1;
31226 enum machine_mode half_mode = VOIDmode;
31233 if (!mmx_ok && !TARGET_SSE)
31245 n = GET_MODE_NUNITS (mode);
31246 for (i = 0; i < n; i++)
31247 ops[i] = XVECEXP (vals, 0, i);
31248 ix86_expand_vector_init_concat (mode, target, ops, n);
31252 half_mode = V16QImode;
31256 half_mode = V8HImode;
31260 n = GET_MODE_NUNITS (mode);
31261 for (i = 0; i < n; i++)
31262 ops[i] = XVECEXP (vals, 0, i);
31263 op0 = gen_reg_rtx (half_mode);
31264 op1 = gen_reg_rtx (half_mode);
31265 ix86_expand_vector_init_interleave (half_mode, op0, ops,
31267 ix86_expand_vector_init_interleave (half_mode, op1,
31268 &ops [n >> 1], n >> 2);
31269 emit_insn (gen_rtx_SET (VOIDmode, target,
31270 gen_rtx_VEC_CONCAT (mode, op0, op1)));
31274 if (!TARGET_SSE4_1)
31282 /* Don't use ix86_expand_vector_init_interleave if we can't
31283 move from GPR to SSE register directly. */
31284 if (!TARGET_INTER_UNIT_MOVES)
31287 n = GET_MODE_NUNITS (mode);
31288 for (i = 0; i < n; i++)
31289 ops[i] = XVECEXP (vals, 0, i);
31290 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
31298 gcc_unreachable ();
31302 int i, j, n_elts, n_words, n_elt_per_word;
31303 enum machine_mode inner_mode;
31304 rtx words[4], shift;
31306 inner_mode = GET_MODE_INNER (mode);
31307 n_elts = GET_MODE_NUNITS (mode);
31308 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
31309 n_elt_per_word = n_elts / n_words;
31310 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
31312 for (i = 0; i < n_words; ++i)
31314 rtx word = NULL_RTX;
31316 for (j = 0; j < n_elt_per_word; ++j)
31318 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
31319 elt = convert_modes (word_mode, inner_mode, elt, true);
31325 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
31326 word, 1, OPTAB_LIB_WIDEN);
31327 word = expand_simple_binop (word_mode, IOR, word, elt,
31328 word, 1, OPTAB_LIB_WIDEN);
31336 emit_move_insn (target, gen_lowpart (mode, words[0]));
31337 else if (n_words == 2)
31339 rtx tmp = gen_reg_rtx (mode);
31340 emit_clobber (tmp);
31341 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
31342 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
31343 emit_move_insn (target, tmp);
31345 else if (n_words == 4)
31347 rtx tmp = gen_reg_rtx (V4SImode);
31348 gcc_assert (word_mode == SImode);
31349 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
31350 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
31351 emit_move_insn (target, gen_lowpart (mode, tmp));
31354 gcc_unreachable ();
31358 /* Initialize vector TARGET via VALS. Suppress the use of MMX
31359 instructions unless MMX_OK is true. */
31362 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
31364 enum machine_mode mode = GET_MODE (target);
31365 enum machine_mode inner_mode = GET_MODE_INNER (mode);
31366 int n_elts = GET_MODE_NUNITS (mode);
31367 int n_var = 0, one_var = -1;
31368 bool all_same = true, all_const_zero = true;
31372 for (i = 0; i < n_elts; ++i)
31374 x = XVECEXP (vals, 0, i);
31375 if (!(CONST_INT_P (x)
31376 || GET_CODE (x) == CONST_DOUBLE
31377 || GET_CODE (x) == CONST_FIXED))
31378 n_var++, one_var = i;
31379 else if (x != CONST0_RTX (inner_mode))
31380 all_const_zero = false;
31381 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
31385 /* Constants are best loaded from the constant pool. */
31388 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
31392 /* If all values are identical, broadcast the value. */
31394 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
31395 XVECEXP (vals, 0, 0)))
31398 /* Values where only one field is non-constant are best loaded from
31399 the pool and overwritten via move later. */
31403 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
31404 XVECEXP (vals, 0, one_var),
31408 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
31412 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
31416 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
31418 enum machine_mode mode = GET_MODE (target);
31419 enum machine_mode inner_mode = GET_MODE_INNER (mode);
31420 enum machine_mode half_mode;
31421 bool use_vec_merge = false;
31423 static rtx (*gen_extract[6][2]) (rtx, rtx)
31425 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
31426 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
31427 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
31428 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
31429 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
31430 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
31432 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
31434 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
31435 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
31436 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
31437 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
31438 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
31439 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
31449 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
31450 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
31452 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
31454 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
31455 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31461 use_vec_merge = TARGET_SSE4_1 && TARGET_64BIT;
31465 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
31466 ix86_expand_vector_extract (false, tmp, target, 1 - elt);
31468 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
31470 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
31471 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31478 /* For the two element vectors, we implement a VEC_CONCAT with
31479 the extraction of the other element. */
31481 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
31482 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
31485 op0 = val, op1 = tmp;
31487 op0 = tmp, op1 = val;
31489 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
31490 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31495 use_vec_merge = TARGET_SSE4_1;
31502 use_vec_merge = true;
31506 /* tmp = target = A B C D */
31507 tmp = copy_to_reg (target);
31508 /* target = A A B B */
31509 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
31510 /* target = X A B B */
31511 ix86_expand_vector_set (false, target, val, 0);
31512 /* target = A X C D */
31513 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
31514 const1_rtx, const0_rtx,
31515 GEN_INT (2+4), GEN_INT (3+4)));
31519 /* tmp = target = A B C D */
31520 tmp = copy_to_reg (target);
31521 /* tmp = X B C D */
31522 ix86_expand_vector_set (false, tmp, val, 0);
31523 /* target = A B X D */
31524 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
31525 const0_rtx, const1_rtx,
31526 GEN_INT (0+4), GEN_INT (3+4)));
31530 /* tmp = target = A B C D */
31531 tmp = copy_to_reg (target);
31532 /* tmp = X B C D */
31533 ix86_expand_vector_set (false, tmp, val, 0);
31534 /* target = A B X D */
31535 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
31536 const0_rtx, const1_rtx,
31537 GEN_INT (2+4), GEN_INT (0+4)));
31541 gcc_unreachable ();
31546 use_vec_merge = TARGET_SSE4_1;
31550 /* Element 0 handled by vec_merge below. */
31553 use_vec_merge = true;
31559 /* With SSE2, use integer shuffles to swap element 0 and ELT,
31560 store into element 0, then shuffle them back. */
31564 order[0] = GEN_INT (elt);
31565 order[1] = const1_rtx;
31566 order[2] = const2_rtx;
31567 order[3] = GEN_INT (3);
31568 order[elt] = const0_rtx;
31570 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
31571 order[1], order[2], order[3]));
31573 ix86_expand_vector_set (false, target, val, 0);
31575 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
31576 order[1], order[2], order[3]));
31580 /* For SSE1, we have to reuse the V4SF code. */
31581 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
31582 gen_lowpart (SFmode, val), elt);
31587 use_vec_merge = TARGET_SSE2;
31590 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
31594 use_vec_merge = TARGET_SSE4_1;
31601 half_mode = V16QImode;
31607 half_mode = V8HImode;
31613 half_mode = V4SImode;
31619 half_mode = V2DImode;
31625 half_mode = V4SFmode;
31631 half_mode = V2DFmode;
31637 /* Compute offset. */
31641 gcc_assert (i <= 1);
31643 /* Extract the half. */
31644 tmp = gen_reg_rtx (half_mode);
31645 emit_insn (gen_extract[j][i] (tmp, target));
31647 /* Put val in tmp at elt. */
31648 ix86_expand_vector_set (false, tmp, val, elt);
31651 emit_insn (gen_insert[j][i] (target, target, tmp));
31660 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
31661 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
31662 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31666 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
31668 emit_move_insn (mem, target);
31670 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
31671 emit_move_insn (tmp, val);
31673 emit_move_insn (target, mem);
31678 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
31680 enum machine_mode mode = GET_MODE (vec);
31681 enum machine_mode inner_mode = GET_MODE_INNER (mode);
31682 bool use_vec_extr = false;
31695 use_vec_extr = true;
31699 use_vec_extr = TARGET_SSE4_1;
31711 tmp = gen_reg_rtx (mode);
31712 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
31713 GEN_INT (elt), GEN_INT (elt),
31714 GEN_INT (elt+4), GEN_INT (elt+4)));
31718 tmp = gen_reg_rtx (mode);
31719 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
31723 gcc_unreachable ();
31726 use_vec_extr = true;
31731 use_vec_extr = TARGET_SSE4_1;
31745 tmp = gen_reg_rtx (mode);
31746 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
31747 GEN_INT (elt), GEN_INT (elt),
31748 GEN_INT (elt), GEN_INT (elt)));
31752 tmp = gen_reg_rtx (mode);
31753 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
31757 gcc_unreachable ();
31760 use_vec_extr = true;
31765 /* For SSE1, we have to reuse the V4SF code. */
31766 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
31767 gen_lowpart (V4SFmode, vec), elt);
31773 use_vec_extr = TARGET_SSE2;
31776 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
31780 use_vec_extr = TARGET_SSE4_1;
31784 /* ??? Could extract the appropriate HImode element and shift. */
31791 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
31792 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
31794 /* Let the rtl optimizers know about the zero extension performed. */
31795 if (inner_mode == QImode || inner_mode == HImode)
31797 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
31798 target = gen_lowpart (SImode, target);
31801 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31805 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
31807 emit_move_insn (mem, vec);
31809 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
31810 emit_move_insn (target, tmp);
31814 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
31815 pattern to reduce; DEST is the destination; IN is the input vector. */
31818 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
31820 rtx tmp1, tmp2, tmp3;
31822 tmp1 = gen_reg_rtx (V4SFmode);
31823 tmp2 = gen_reg_rtx (V4SFmode);
31824 tmp3 = gen_reg_rtx (V4SFmode);
31826 emit_insn (gen_sse_movhlps (tmp1, in, in));
31827 emit_insn (fn (tmp2, tmp1, in));
31829 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
31830 const1_rtx, const1_rtx,
31831 GEN_INT (1+4), GEN_INT (1+4)));
31832 emit_insn (fn (dest, tmp2, tmp3));
31835 /* Target hook for scalar_mode_supported_p. */
31837 ix86_scalar_mode_supported_p (enum machine_mode mode)
31839 if (DECIMAL_FLOAT_MODE_P (mode))
31840 return default_decimal_float_supported_p ();
31841 else if (mode == TFmode)
31844 return default_scalar_mode_supported_p (mode);
31847 /* Implements target hook vector_mode_supported_p. */
31849 ix86_vector_mode_supported_p (enum machine_mode mode)
31851 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
31853 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
31855 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
31857 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
31859 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
31864 /* Target hook for c_mode_for_suffix. */
31865 static enum machine_mode
31866 ix86_c_mode_for_suffix (char suffix)
31876 /* Worker function for TARGET_MD_ASM_CLOBBERS.
31878 We do this in the new i386 backend to maintain source compatibility
31879 with the old cc0-based compiler. */
31882 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
31883 tree inputs ATTRIBUTE_UNUSED,
31886 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
31888 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
31893 /* Implements target vector targetm.asm.encode_section_info. This
31894 is not used by netware. */
31896 static void ATTRIBUTE_UNUSED
31897 ix86_encode_section_info (tree decl, rtx rtl, int first)
31899 default_encode_section_info (decl, rtl, first);
31901 if (TREE_CODE (decl) == VAR_DECL
31902 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
31903 && ix86_in_large_data_p (decl))
31904 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
31907 /* Worker function for REVERSE_CONDITION. */
31910 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
31912 return (mode != CCFPmode && mode != CCFPUmode
31913 ? reverse_condition (code)
31914 : reverse_condition_maybe_unordered (code));
31917 /* Output code to perform an x87 FP register move, from OPERANDS[1]
31921 output_387_reg_move (rtx insn, rtx *operands)
31923 if (REG_P (operands[0]))
31925 if (REG_P (operands[1])
31926 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
31928 if (REGNO (operands[0]) == FIRST_STACK_REG)
31929 return output_387_ffreep (operands, 0);
31930 return "fstp\t%y0";
31932 if (STACK_TOP_P (operands[0]))
31933 return "fld%Z1\t%y1";
31936 else if (MEM_P (operands[0]))
31938 gcc_assert (REG_P (operands[1]));
31939 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
31940 return "fstp%Z0\t%y0";
31943 /* There is no non-popping store to memory for XFmode.
31944 So if we need one, follow the store with a load. */
31945 if (GET_MODE (operands[0]) == XFmode)
31946 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
31948 return "fst%Z0\t%y0";
31955 /* Output code to perform a conditional jump to LABEL, if C2 flag in
31956 FP status register is set. */
31959 ix86_emit_fp_unordered_jump (rtx label)
31961 rtx reg = gen_reg_rtx (HImode);
31964 emit_insn (gen_x86_fnstsw_1 (reg));
31966 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
31968 emit_insn (gen_x86_sahf_1 (reg));
31970 temp = gen_rtx_REG (CCmode, FLAGS_REG);
31971 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
31975 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
31977 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
31978 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
31981 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
31982 gen_rtx_LABEL_REF (VOIDmode, label),
31984 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
31986 emit_jump_insn (temp);
31987 predict_jump (REG_BR_PROB_BASE * 10 / 100);
31990 /* Output code to perform a log1p XFmode calculation. */
31992 void ix86_emit_i387_log1p (rtx op0, rtx op1)
31994 rtx label1 = gen_label_rtx ();
31995 rtx label2 = gen_label_rtx ();
31997 rtx tmp = gen_reg_rtx (XFmode);
31998 rtx tmp2 = gen_reg_rtx (XFmode);
32001 emit_insn (gen_absxf2 (tmp, op1));
32002 test = gen_rtx_GE (VOIDmode, tmp,
32003 CONST_DOUBLE_FROM_REAL_VALUE (
32004 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
32006 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
32008 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
32009 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
32010 emit_jump (label2);
32012 emit_label (label1);
32013 emit_move_insn (tmp, CONST1_RTX (XFmode));
32014 emit_insn (gen_addxf3 (tmp, op1, tmp));
32015 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
32016 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
32018 emit_label (label2);
32021 /* Output code to perform a Newton-Rhapson approximation of a single precision
32022 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
32024 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
32026 rtx x0, x1, e0, e1, two;
32028 x0 = gen_reg_rtx (mode);
32029 e0 = gen_reg_rtx (mode);
32030 e1 = gen_reg_rtx (mode);
32031 x1 = gen_reg_rtx (mode);
32033 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
32035 if (VECTOR_MODE_P (mode))
32036 two = ix86_build_const_vector (mode, true, two);
32038 two = force_reg (mode, two);
32040 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
32042 /* x0 = rcp(b) estimate */
32043 emit_insn (gen_rtx_SET (VOIDmode, x0,
32044 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
32047 emit_insn (gen_rtx_SET (VOIDmode, e0,
32048 gen_rtx_MULT (mode, x0, a)));
32050 emit_insn (gen_rtx_SET (VOIDmode, e1,
32051 gen_rtx_MULT (mode, x0, b)));
32053 emit_insn (gen_rtx_SET (VOIDmode, x1,
32054 gen_rtx_MINUS (mode, two, e1)));
32055 /* res = e0 * x1 */
32056 emit_insn (gen_rtx_SET (VOIDmode, res,
32057 gen_rtx_MULT (mode, e0, x1)));
32060 /* Output code to perform a Newton-Rhapson approximation of a
32061 single precision floating point [reciprocal] square root. */
32063 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
32066 rtx x0, e0, e1, e2, e3, mthree, mhalf;
32069 x0 = gen_reg_rtx (mode);
32070 e0 = gen_reg_rtx (mode);
32071 e1 = gen_reg_rtx (mode);
32072 e2 = gen_reg_rtx (mode);
32073 e3 = gen_reg_rtx (mode);
32075 real_from_integer (&r, VOIDmode, -3, -1, 0);
32076 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
32078 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
32079 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
32081 if (VECTOR_MODE_P (mode))
32083 mthree = ix86_build_const_vector (mode, true, mthree);
32084 mhalf = ix86_build_const_vector (mode, true, mhalf);
32087 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
32088 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
32090 /* x0 = rsqrt(a) estimate */
32091 emit_insn (gen_rtx_SET (VOIDmode, x0,
32092 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
32095 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
32100 zero = gen_reg_rtx (mode);
32101 mask = gen_reg_rtx (mode);
32103 zero = force_reg (mode, CONST0_RTX(mode));
32104 emit_insn (gen_rtx_SET (VOIDmode, mask,
32105 gen_rtx_NE (mode, zero, a)));
32107 emit_insn (gen_rtx_SET (VOIDmode, x0,
32108 gen_rtx_AND (mode, x0, mask)));
32112 emit_insn (gen_rtx_SET (VOIDmode, e0,
32113 gen_rtx_MULT (mode, x0, a)));
32115 emit_insn (gen_rtx_SET (VOIDmode, e1,
32116 gen_rtx_MULT (mode, e0, x0)));
32119 mthree = force_reg (mode, mthree);
32120 emit_insn (gen_rtx_SET (VOIDmode, e2,
32121 gen_rtx_PLUS (mode, e1, mthree)));
32123 mhalf = force_reg (mode, mhalf);
32125 /* e3 = -.5 * x0 */
32126 emit_insn (gen_rtx_SET (VOIDmode, e3,
32127 gen_rtx_MULT (mode, x0, mhalf)));
32129 /* e3 = -.5 * e0 */
32130 emit_insn (gen_rtx_SET (VOIDmode, e3,
32131 gen_rtx_MULT (mode, e0, mhalf)));
32132 /* ret = e2 * e3 */
32133 emit_insn (gen_rtx_SET (VOIDmode, res,
32134 gen_rtx_MULT (mode, e2, e3)));
32137 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
32139 static void ATTRIBUTE_UNUSED
32140 i386_solaris_elf_named_section (const char *name, unsigned int flags,
32143 /* With Binutils 2.15, the "@unwind" marker must be specified on
32144 every occurrence of the ".eh_frame" section, not just the first
32147 && strcmp (name, ".eh_frame") == 0)
32149 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
32150 flags & SECTION_WRITE ? "aw" : "a");
32153 default_elf_asm_named_section (name, flags, decl);
32156 /* Return the mangling of TYPE if it is an extended fundamental type. */
32158 static const char *
32159 ix86_mangle_type (const_tree type)
32161 type = TYPE_MAIN_VARIANT (type);
32163 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32164 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32167 switch (TYPE_MODE (type))
32170 /* __float128 is "g". */
32173 /* "long double" or __float80 is "e". */
32180 /* For 32-bit code we can save PIC register setup by using
32181 __stack_chk_fail_local hidden function instead of calling
32182 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
32183 register, so it is better to call __stack_chk_fail directly. */
32186 ix86_stack_protect_fail (void)
32188 return TARGET_64BIT
32189 ? default_external_stack_protect_fail ()
32190 : default_hidden_stack_protect_fail ();
32193 /* Select a format to encode pointers in exception handling data. CODE
32194 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
32195 true if the symbol may be affected by dynamic relocations.
32197 ??? All x86 object file formats are capable of representing this.
32198 After all, the relocation needed is the same as for the call insn.
32199 Whether or not a particular assembler allows us to enter such, I
32200 guess we'll have to see. */
32202 asm_preferred_eh_data_format (int code, int global)
32206 int type = DW_EH_PE_sdata8;
32208 || ix86_cmodel == CM_SMALL_PIC
32209 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
32210 type = DW_EH_PE_sdata4;
32211 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
32213 if (ix86_cmodel == CM_SMALL
32214 || (ix86_cmodel == CM_MEDIUM && code))
32215 return DW_EH_PE_udata4;
32216 return DW_EH_PE_absptr;
32219 /* Expand copysign from SIGN to the positive value ABS_VALUE
32220 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
32223 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
32225 enum machine_mode mode = GET_MODE (sign);
32226 rtx sgn = gen_reg_rtx (mode);
32227 if (mask == NULL_RTX)
32229 enum machine_mode vmode;
32231 if (mode == SFmode)
32233 else if (mode == DFmode)
32238 mask = ix86_build_signbit_mask (vmode, VECTOR_MODE_P (mode), false);
32239 if (!VECTOR_MODE_P (mode))
32241 /* We need to generate a scalar mode mask in this case. */
32242 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
32243 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
32244 mask = gen_reg_rtx (mode);
32245 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
32249 mask = gen_rtx_NOT (mode, mask);
32250 emit_insn (gen_rtx_SET (VOIDmode, sgn,
32251 gen_rtx_AND (mode, mask, sign)));
32252 emit_insn (gen_rtx_SET (VOIDmode, result,
32253 gen_rtx_IOR (mode, abs_value, sgn)));
32256 /* Expand fabs (OP0) and return a new rtx that holds the result. The
32257 mask for masking out the sign-bit is stored in *SMASK, if that is
32260 ix86_expand_sse_fabs (rtx op0, rtx *smask)
32262 enum machine_mode vmode, mode = GET_MODE (op0);
32265 xa = gen_reg_rtx (mode);
32266 if (mode == SFmode)
32268 else if (mode == DFmode)
32272 mask = ix86_build_signbit_mask (vmode, VECTOR_MODE_P (mode), true);
32273 if (!VECTOR_MODE_P (mode))
32275 /* We need to generate a scalar mode mask in this case. */
32276 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
32277 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
32278 mask = gen_reg_rtx (mode);
32279 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
32281 emit_insn (gen_rtx_SET (VOIDmode, xa,
32282 gen_rtx_AND (mode, op0, mask)));
32290 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
32291 swapping the operands if SWAP_OPERANDS is true. The expanded
32292 code is a forward jump to a newly created label in case the
32293 comparison is true. The generated label rtx is returned. */
32295 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
32296 bool swap_operands)
32307 label = gen_label_rtx ();
32308 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
32309 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32310 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
32311 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
32312 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
32313 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
32314 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
32315 JUMP_LABEL (tmp) = label;
32320 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
32321 using comparison code CODE. Operands are swapped for the comparison if
32322 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
32324 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
32325 bool swap_operands)
32327 enum machine_mode mode = GET_MODE (op0);
32328 rtx mask = gen_reg_rtx (mode);
32337 if (mode == DFmode)
32338 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
32339 gen_rtx_fmt_ee (code, mode, op0, op1)));
32341 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
32342 gen_rtx_fmt_ee (code, mode, op0, op1)));
32347 /* Generate and return a rtx of mode MODE for 2**n where n is the number
32348 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
32350 ix86_gen_TWO52 (enum machine_mode mode)
32352 REAL_VALUE_TYPE TWO52r;
32355 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
32356 TWO52 = const_double_from_real_value (TWO52r, mode);
32357 TWO52 = force_reg (mode, TWO52);
32362 /* Expand SSE sequence for computing lround from OP1 storing
32365 ix86_expand_lround (rtx op0, rtx op1)
32367 /* C code for the stuff we're doing below:
32368 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
32371 enum machine_mode mode = GET_MODE (op1);
32372 const struct real_format *fmt;
32373 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
32376 /* load nextafter (0.5, 0.0) */
32377 fmt = REAL_MODE_FORMAT (mode);
32378 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
32379 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
32381 /* adj = copysign (0.5, op1) */
32382 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
32383 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
32385 /* adj = op1 + adj */
32386 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
32388 /* op0 = (imode)adj */
32389 expand_fix (op0, adj, 0);
32392 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
32395 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
32397 /* C code for the stuff we're doing below (for do_floor):
32399 xi -= (double)xi > op1 ? 1 : 0;
32402 enum machine_mode fmode = GET_MODE (op1);
32403 enum machine_mode imode = GET_MODE (op0);
32404 rtx ireg, freg, label, tmp;
32406 /* reg = (long)op1 */
32407 ireg = gen_reg_rtx (imode);
32408 expand_fix (ireg, op1, 0);
32410 /* freg = (double)reg */
32411 freg = gen_reg_rtx (fmode);
32412 expand_float (freg, ireg, 0);
32414 /* ireg = (freg > op1) ? ireg - 1 : ireg */
32415 label = ix86_expand_sse_compare_and_jump (UNLE,
32416 freg, op1, !do_floor);
32417 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
32418 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
32419 emit_move_insn (ireg, tmp);
32421 emit_label (label);
32422 LABEL_NUSES (label) = 1;
32424 emit_move_insn (op0, ireg);
32427 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
32428 result in OPERAND0. */
32430 ix86_expand_rint (rtx operand0, rtx operand1)
32432 /* C code for the stuff we're doing below:
32433 xa = fabs (operand1);
32434 if (!isless (xa, 2**52))
32436 xa = xa + 2**52 - 2**52;
32437 return copysign (xa, operand1);
32439 enum machine_mode mode = GET_MODE (operand0);
32440 rtx res, xa, label, TWO52, mask;
32442 res = gen_reg_rtx (mode);
32443 emit_move_insn (res, operand1);
32445 /* xa = abs (operand1) */
32446 xa = ix86_expand_sse_fabs (res, &mask);
32448 /* if (!isless (xa, TWO52)) goto label; */
32449 TWO52 = ix86_gen_TWO52 (mode);
32450 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32452 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32453 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
32455 ix86_sse_copysign_to_positive (res, xa, res, mask);
32457 emit_label (label);
32458 LABEL_NUSES (label) = 1;
32460 emit_move_insn (operand0, res);
32463 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
32466 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
32468 /* C code for the stuff we expand below.
32469 double xa = fabs (x), x2;
32470 if (!isless (xa, TWO52))
32472 xa = xa + TWO52 - TWO52;
32473 x2 = copysign (xa, x);
32482 enum machine_mode mode = GET_MODE (operand0);
32483 rtx xa, TWO52, tmp, label, one, res, mask;
32485 TWO52 = ix86_gen_TWO52 (mode);
32487 /* Temporary for holding the result, initialized to the input
32488 operand to ease control flow. */
32489 res = gen_reg_rtx (mode);
32490 emit_move_insn (res, operand1);
32492 /* xa = abs (operand1) */
32493 xa = ix86_expand_sse_fabs (res, &mask);
32495 /* if (!isless (xa, TWO52)) goto label; */
32496 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32498 /* xa = xa + TWO52 - TWO52; */
32499 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32500 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
32502 /* xa = copysign (xa, operand1) */
32503 ix86_sse_copysign_to_positive (xa, xa, res, mask);
32505 /* generate 1.0 or -1.0 */
32506 one = force_reg (mode,
32507 const_double_from_real_value (do_floor
32508 ? dconst1 : dconstm1, mode));
32510 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
32511 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
32512 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32513 gen_rtx_AND (mode, one, tmp)));
32514 /* We always need to subtract here to preserve signed zero. */
32515 tmp = expand_simple_binop (mode, MINUS,
32516 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32517 emit_move_insn (res, tmp);
32519 emit_label (label);
32520 LABEL_NUSES (label) = 1;
32522 emit_move_insn (operand0, res);
32525 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
32528 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
32530 /* C code for the stuff we expand below.
32531 double xa = fabs (x), x2;
32532 if (!isless (xa, TWO52))
32534 x2 = (double)(long)x;
32541 if (HONOR_SIGNED_ZEROS (mode))
32542 return copysign (x2, x);
32545 enum machine_mode mode = GET_MODE (operand0);
32546 rtx xa, xi, TWO52, tmp, label, one, res, mask;
32548 TWO52 = ix86_gen_TWO52 (mode);
32550 /* Temporary for holding the result, initialized to the input
32551 operand to ease control flow. */
32552 res = gen_reg_rtx (mode);
32553 emit_move_insn (res, operand1);
32555 /* xa = abs (operand1) */
32556 xa = ix86_expand_sse_fabs (res, &mask);
32558 /* if (!isless (xa, TWO52)) goto label; */
32559 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32561 /* xa = (double)(long)x */
32562 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
32563 expand_fix (xi, res, 0);
32564 expand_float (xa, xi, 0);
32567 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
32569 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
32570 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
32571 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32572 gen_rtx_AND (mode, one, tmp)));
32573 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
32574 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32575 emit_move_insn (res, tmp);
32577 if (HONOR_SIGNED_ZEROS (mode))
32578 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
32580 emit_label (label);
32581 LABEL_NUSES (label) = 1;
32583 emit_move_insn (operand0, res);
32586 /* Expand SSE sequence for computing round from OPERAND1 storing
32587 into OPERAND0. Sequence that works without relying on DImode truncation
32588 via cvttsd2siq that is only available on 64bit targets. */
32590 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
32592 /* C code for the stuff we expand below.
32593 double xa = fabs (x), xa2, x2;
32594 if (!isless (xa, TWO52))
32596 Using the absolute value and copying back sign makes
32597 -0.0 -> -0.0 correct.
32598 xa2 = xa + TWO52 - TWO52;
32603 else if (dxa > 0.5)
32605 x2 = copysign (xa2, x);
32608 enum machine_mode mode = GET_MODE (operand0);
32609 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
32611 TWO52 = ix86_gen_TWO52 (mode);
32613 /* Temporary for holding the result, initialized to the input
32614 operand to ease control flow. */
32615 res = gen_reg_rtx (mode);
32616 emit_move_insn (res, operand1);
32618 /* xa = abs (operand1) */
32619 xa = ix86_expand_sse_fabs (res, &mask);
32621 /* if (!isless (xa, TWO52)) goto label; */
32622 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32624 /* xa2 = xa + TWO52 - TWO52; */
32625 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32626 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
32628 /* dxa = xa2 - xa; */
32629 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
32631 /* generate 0.5, 1.0 and -0.5 */
32632 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
32633 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
32634 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
32638 tmp = gen_reg_rtx (mode);
32639 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
32640 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
32641 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32642 gen_rtx_AND (mode, one, tmp)));
32643 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32644 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
32645 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
32646 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32647 gen_rtx_AND (mode, one, tmp)));
32648 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32650 /* res = copysign (xa2, operand1) */
32651 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
32653 emit_label (label);
32654 LABEL_NUSES (label) = 1;
32656 emit_move_insn (operand0, res);
32659 /* Expand SSE sequence for computing trunc from OPERAND1 storing
32662 ix86_expand_trunc (rtx operand0, rtx operand1)
32664 /* C code for SSE variant we expand below.
32665 double xa = fabs (x), x2;
32666 if (!isless (xa, TWO52))
32668 x2 = (double)(long)x;
32669 if (HONOR_SIGNED_ZEROS (mode))
32670 return copysign (x2, x);
32673 enum machine_mode mode = GET_MODE (operand0);
32674 rtx xa, xi, TWO52, label, res, mask;
32676 TWO52 = ix86_gen_TWO52 (mode);
32678 /* Temporary for holding the result, initialized to the input
32679 operand to ease control flow. */
32680 res = gen_reg_rtx (mode);
32681 emit_move_insn (res, operand1);
32683 /* xa = abs (operand1) */
32684 xa = ix86_expand_sse_fabs (res, &mask);
32686 /* if (!isless (xa, TWO52)) goto label; */
32687 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32689 /* x = (double)(long)x */
32690 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
32691 expand_fix (xi, res, 0);
32692 expand_float (res, xi, 0);
32694 if (HONOR_SIGNED_ZEROS (mode))
32695 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
32697 emit_label (label);
32698 LABEL_NUSES (label) = 1;
32700 emit_move_insn (operand0, res);
32703 /* Expand SSE sequence for computing trunc from OPERAND1 storing
32706 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
32708 enum machine_mode mode = GET_MODE (operand0);
32709 rtx xa, mask, TWO52, label, one, res, smask, tmp;
32711 /* C code for SSE variant we expand below.
32712 double xa = fabs (x), x2;
32713 if (!isless (xa, TWO52))
32715 xa2 = xa + TWO52 - TWO52;
32719 x2 = copysign (xa2, x);
32723 TWO52 = ix86_gen_TWO52 (mode);
32725 /* Temporary for holding the result, initialized to the input
32726 operand to ease control flow. */
32727 res = gen_reg_rtx (mode);
32728 emit_move_insn (res, operand1);
32730 /* xa = abs (operand1) */
32731 xa = ix86_expand_sse_fabs (res, &smask);
32733 /* if (!isless (xa, TWO52)) goto label; */
32734 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32736 /* res = xa + TWO52 - TWO52; */
32737 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32738 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
32739 emit_move_insn (res, tmp);
32742 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
32744 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
32745 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
32746 emit_insn (gen_rtx_SET (VOIDmode, mask,
32747 gen_rtx_AND (mode, mask, one)));
32748 tmp = expand_simple_binop (mode, MINUS,
32749 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
32750 emit_move_insn (res, tmp);
32752 /* res = copysign (res, operand1) */
32753 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
32755 emit_label (label);
32756 LABEL_NUSES (label) = 1;
32758 emit_move_insn (operand0, res);
32761 /* Expand SSE sequence for computing round from OPERAND1 storing
32764 ix86_expand_round (rtx operand0, rtx operand1)
32766 /* C code for the stuff we're doing below:
32767 double xa = fabs (x);
32768 if (!isless (xa, TWO52))
32770 xa = (double)(long)(xa + nextafter (0.5, 0.0));
32771 return copysign (xa, x);
32773 enum machine_mode mode = GET_MODE (operand0);
32774 rtx res, TWO52, xa, label, xi, half, mask;
32775 const struct real_format *fmt;
32776 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
32778 /* Temporary for holding the result, initialized to the input
32779 operand to ease control flow. */
32780 res = gen_reg_rtx (mode);
32781 emit_move_insn (res, operand1);
32783 TWO52 = ix86_gen_TWO52 (mode);
32784 xa = ix86_expand_sse_fabs (res, &mask);
32785 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32787 /* load nextafter (0.5, 0.0) */
32788 fmt = REAL_MODE_FORMAT (mode);
32789 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
32790 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
32792 /* xa = xa + 0.5 */
32793 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
32794 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
32796 /* xa = (double)(int64_t)xa */
32797 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
32798 expand_fix (xi, xa, 0);
32799 expand_float (xa, xi, 0);
32801 /* res = copysign (xa, operand1) */
32802 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
32804 emit_label (label);
32805 LABEL_NUSES (label) = 1;
32807 emit_move_insn (operand0, res);
32811 /* Table of valid machine attributes. */
32812 static const struct attribute_spec ix86_attribute_table[] =
32814 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
32815 /* Stdcall attribute says callee is responsible for popping arguments
32816 if they are not variable. */
32817 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
32818 /* Fastcall attribute says callee is responsible for popping arguments
32819 if they are not variable. */
32820 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
32821 /* Thiscall attribute says callee is responsible for popping arguments
32822 if they are not variable. */
32823 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
32824 /* Cdecl attribute says the callee is a normal C declaration */
32825 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
32826 /* Regparm attribute specifies how many integer arguments are to be
32827 passed in registers. */
32828 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
32829 /* Sseregparm attribute says we are using x86_64 calling conventions
32830 for FP arguments. */
32831 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
32832 /* force_align_arg_pointer says this function realigns the stack at entry. */
32833 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
32834 false, true, true, ix86_handle_cconv_attribute },
32835 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
32836 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
32837 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
32838 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
32840 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
32841 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
32842 #ifdef SUBTARGET_ATTRIBUTE_TABLE
32843 SUBTARGET_ATTRIBUTE_TABLE,
32845 /* ms_abi and sysv_abi calling convention function attributes. */
32846 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
32847 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
32848 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute },
32849 { "callee_pop_aggregate_return", 1, 1, false, true, true,
32850 ix86_handle_callee_pop_aggregate_return },
32852 { NULL, 0, 0, false, false, false, NULL }
32855 /* Implement targetm.vectorize.builtin_vectorization_cost. */
32857 ix86_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
32858 tree vectype ATTRIBUTE_UNUSED,
32859 int misalign ATTRIBUTE_UNUSED)
32861 switch (type_of_cost)
32864 return ix86_cost->scalar_stmt_cost;
32867 return ix86_cost->scalar_load_cost;
32870 return ix86_cost->scalar_store_cost;
32873 return ix86_cost->vec_stmt_cost;
32876 return ix86_cost->vec_align_load_cost;
32879 return ix86_cost->vec_store_cost;
32881 case vec_to_scalar:
32882 return ix86_cost->vec_to_scalar_cost;
32884 case scalar_to_vec:
32885 return ix86_cost->scalar_to_vec_cost;
32887 case unaligned_load:
32888 case unaligned_store:
32889 return ix86_cost->vec_unalign_load_cost;
32891 case cond_branch_taken:
32892 return ix86_cost->cond_taken_branch_cost;
32894 case cond_branch_not_taken:
32895 return ix86_cost->cond_not_taken_branch_cost;
32898 case vec_promote_demote:
32899 return ix86_cost->vec_stmt_cost;
32902 gcc_unreachable ();
32907 /* Implement targetm.vectorize.builtin_vec_perm. */
32910 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
32912 tree itype = TREE_TYPE (vec_type);
32913 bool u = TYPE_UNSIGNED (itype);
32914 enum machine_mode vmode = TYPE_MODE (vec_type);
32915 enum ix86_builtins fcode;
32916 bool ok = TARGET_SSE2;
32922 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
32925 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
32927 itype = ix86_get_builtin_type (IX86_BT_DI);
32932 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
32936 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
32938 itype = ix86_get_builtin_type (IX86_BT_SI);
32942 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
32945 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
32948 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
32951 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
32961 *mask_type = itype;
32962 return ix86_builtins[(int) fcode];
32965 /* Return a vector mode with twice as many elements as VMODE. */
32966 /* ??? Consider moving this to a table generated by genmodes.c. */
32968 static enum machine_mode
32969 doublesize_vector_mode (enum machine_mode vmode)
32973 case V2SFmode: return V4SFmode;
32974 case V1DImode: return V2DImode;
32975 case V2SImode: return V4SImode;
32976 case V4HImode: return V8HImode;
32977 case V8QImode: return V16QImode;
32979 case V2DFmode: return V4DFmode;
32980 case V4SFmode: return V8SFmode;
32981 case V2DImode: return V4DImode;
32982 case V4SImode: return V8SImode;
32983 case V8HImode: return V16HImode;
32984 case V16QImode: return V32QImode;
32986 case V4DFmode: return V8DFmode;
32987 case V8SFmode: return V16SFmode;
32988 case V4DImode: return V8DImode;
32989 case V8SImode: return V16SImode;
32990 case V16HImode: return V32HImode;
32991 case V32QImode: return V64QImode;
32994 gcc_unreachable ();
32998 /* Construct (set target (vec_select op0 (parallel perm))) and
32999 return true if that's a valid instruction in the active ISA. */
33002 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
33004 rtx rperm[MAX_VECT_LEN], x;
33007 for (i = 0; i < nelt; ++i)
33008 rperm[i] = GEN_INT (perm[i]);
33010 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
33011 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
33012 x = gen_rtx_SET (VOIDmode, target, x);
33015 if (recog_memoized (x) < 0)
33023 /* Similar, but generate a vec_concat from op0 and op1 as well. */
33026 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
33027 const unsigned char *perm, unsigned nelt)
33029 enum machine_mode v2mode;
33032 v2mode = doublesize_vector_mode (GET_MODE (op0));
33033 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
33034 return expand_vselect (target, x, perm, nelt);
33037 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
33038 in terms of blendp[sd] / pblendw / pblendvb. */
33041 expand_vec_perm_blend (struct expand_vec_perm_d *d)
33043 enum machine_mode vmode = d->vmode;
33044 unsigned i, mask, nelt = d->nelt;
33045 rtx target, op0, op1, x;
33047 if (!TARGET_SSE4_1 || d->op0 == d->op1)
33049 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
33052 /* This is a blend, not a permute. Elements must stay in their
33053 respective lanes. */
33054 for (i = 0; i < nelt; ++i)
33056 unsigned e = d->perm[i];
33057 if (!(e == i || e == i + nelt))
33064 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
33065 decision should be extracted elsewhere, so that we only try that
33066 sequence once all budget==3 options have been tried. */
33068 /* For bytes, see if bytes move in pairs so we can use pblendw with
33069 an immediate argument, rather than pblendvb with a vector argument. */
33070 if (vmode == V16QImode)
33072 bool pblendw_ok = true;
33073 for (i = 0; i < 16 && pblendw_ok; i += 2)
33074 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
33078 rtx rperm[16], vperm;
33080 for (i = 0; i < nelt; ++i)
33081 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
33083 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
33084 vperm = force_reg (V16QImode, vperm);
33086 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
33091 target = d->target;
33103 for (i = 0; i < nelt; ++i)
33104 mask |= (d->perm[i] >= nelt) << i;
33108 for (i = 0; i < 2; ++i)
33109 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
33113 for (i = 0; i < 4; ++i)
33114 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
33118 for (i = 0; i < 8; ++i)
33119 mask |= (d->perm[i * 2] >= 16) << i;
33123 target = gen_lowpart (vmode, target);
33124 op0 = gen_lowpart (vmode, op0);
33125 op1 = gen_lowpart (vmode, op1);
33129 gcc_unreachable ();
33132 /* This matches five different patterns with the different modes. */
33133 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
33134 x = gen_rtx_SET (VOIDmode, target, x);
33140 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
33141 in terms of the variable form of vpermilps.
33143 Note that we will have already failed the immediate input vpermilps,
33144 which requires that the high and low part shuffle be identical; the
33145 variable form doesn't require that. */
33148 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
33150 rtx rperm[8], vperm;
33153 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
33156 /* We can only permute within the 128-bit lane. */
33157 for (i = 0; i < 8; ++i)
33159 unsigned e = d->perm[i];
33160 if (i < 4 ? e >= 4 : e < 4)
33167 for (i = 0; i < 8; ++i)
33169 unsigned e = d->perm[i];
33171 /* Within each 128-bit lane, the elements of op0 are numbered
33172 from 0 and the elements of op1 are numbered from 4. */
33178 rperm[i] = GEN_INT (e);
33181 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
33182 vperm = force_reg (V8SImode, vperm);
33183 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
33188 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
33189 in terms of pshufb or vpperm. */
33192 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
33194 unsigned i, nelt, eltsz;
33195 rtx rperm[16], vperm, target, op0, op1;
33197 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
33199 if (GET_MODE_SIZE (d->vmode) != 16)
33206 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
33208 for (i = 0; i < nelt; ++i)
33210 unsigned j, e = d->perm[i];
33211 for (j = 0; j < eltsz; ++j)
33212 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
33215 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
33216 vperm = force_reg (V16QImode, vperm);
33218 target = gen_lowpart (V16QImode, d->target);
33219 op0 = gen_lowpart (V16QImode, d->op0);
33220 if (d->op0 == d->op1)
33221 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
33224 op1 = gen_lowpart (V16QImode, d->op1);
33225 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
33231 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
33232 in a single instruction. */
33235 expand_vec_perm_1 (struct expand_vec_perm_d *d)
33237 unsigned i, nelt = d->nelt;
33238 unsigned char perm2[MAX_VECT_LEN];
33240 /* Check plain VEC_SELECT first, because AVX has instructions that could
33241 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
33242 input where SEL+CONCAT may not. */
33243 if (d->op0 == d->op1)
33245 int mask = nelt - 1;
33247 for (i = 0; i < nelt; i++)
33248 perm2[i] = d->perm[i] & mask;
33250 if (expand_vselect (d->target, d->op0, perm2, nelt))
33253 /* There are plenty of patterns in sse.md that are written for
33254 SEL+CONCAT and are not replicated for a single op. Perhaps
33255 that should be changed, to avoid the nastiness here. */
33257 /* Recognize interleave style patterns, which means incrementing
33258 every other permutation operand. */
33259 for (i = 0; i < nelt; i += 2)
33261 perm2[i] = d->perm[i] & mask;
33262 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
33264 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
33267 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
33270 for (i = 0; i < nelt; i += 4)
33272 perm2[i + 0] = d->perm[i + 0] & mask;
33273 perm2[i + 1] = d->perm[i + 1] & mask;
33274 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
33275 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
33278 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
33283 /* Finally, try the fully general two operand permute. */
33284 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
33287 /* Recognize interleave style patterns with reversed operands. */
33288 if (d->op0 != d->op1)
33290 for (i = 0; i < nelt; ++i)
33292 unsigned e = d->perm[i];
33300 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
33304 /* Try the SSE4.1 blend variable merge instructions. */
33305 if (expand_vec_perm_blend (d))
33308 /* Try one of the AVX vpermil variable permutations. */
33309 if (expand_vec_perm_vpermil (d))
33312 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
33313 if (expand_vec_perm_pshufb (d))
33319 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
33320 in terms of a pair of pshuflw + pshufhw instructions. */
33323 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
33325 unsigned char perm2[MAX_VECT_LEN];
33329 if (d->vmode != V8HImode || d->op0 != d->op1)
33332 /* The two permutations only operate in 64-bit lanes. */
33333 for (i = 0; i < 4; ++i)
33334 if (d->perm[i] >= 4)
33336 for (i = 4; i < 8; ++i)
33337 if (d->perm[i] < 4)
33343 /* Emit the pshuflw. */
33344 memcpy (perm2, d->perm, 4);
33345 for (i = 4; i < 8; ++i)
33347 ok = expand_vselect (d->target, d->op0, perm2, 8);
33350 /* Emit the pshufhw. */
33351 memcpy (perm2 + 4, d->perm + 4, 4);
33352 for (i = 0; i < 4; ++i)
33354 ok = expand_vselect (d->target, d->target, perm2, 8);
33360 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
33361 the permutation using the SSSE3 palignr instruction. This succeeds
33362 when all of the elements in PERM fit within one vector and we merely
33363 need to shift them down so that a single vector permutation has a
33364 chance to succeed. */
33367 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
33369 unsigned i, nelt = d->nelt;
33374 /* Even with AVX, palignr only operates on 128-bit vectors. */
33375 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
33378 min = nelt, max = 0;
33379 for (i = 0; i < nelt; ++i)
33381 unsigned e = d->perm[i];
33387 if (min == 0 || max - min >= nelt)
33390 /* Given that we have SSSE3, we know we'll be able to implement the
33391 single operand permutation after the palignr with pshufb. */
33395 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
33396 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
33397 gen_lowpart (TImode, d->op1),
33398 gen_lowpart (TImode, d->op0), shift));
33400 d->op0 = d->op1 = d->target;
33403 for (i = 0; i < nelt; ++i)
33405 unsigned e = d->perm[i] - min;
33411 /* Test for the degenerate case where the alignment by itself
33412 produces the desired permutation. */
33416 ok = expand_vec_perm_1 (d);
33422 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
33423 a two vector permutation into a single vector permutation by using
33424 an interleave operation to merge the vectors. */
33427 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
33429 struct expand_vec_perm_d dremap, dfinal;
33430 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
33431 unsigned contents, h1, h2, h3, h4;
33432 unsigned char remap[2 * MAX_VECT_LEN];
33436 if (d->op0 == d->op1)
33439 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
33440 lanes. We can use similar techniques with the vperm2f128 instruction,
33441 but it requires slightly different logic. */
33442 if (GET_MODE_SIZE (d->vmode) != 16)
33445 /* Examine from whence the elements come. */
33447 for (i = 0; i < nelt; ++i)
33448 contents |= 1u << d->perm[i];
33450 /* Split the two input vectors into 4 halves. */
33451 h1 = (1u << nelt2) - 1;
33456 memset (remap, 0xff, sizeof (remap));
33459 /* If the elements from the low halves use interleave low, and similarly
33460 for interleave high. If the elements are from mis-matched halves, we
33461 can use shufps for V4SF/V4SI or do a DImode shuffle. */
33462 if ((contents & (h1 | h3)) == contents)
33464 for (i = 0; i < nelt2; ++i)
33467 remap[i + nelt] = i * 2 + 1;
33468 dremap.perm[i * 2] = i;
33469 dremap.perm[i * 2 + 1] = i + nelt;
33472 else if ((contents & (h2 | h4)) == contents)
33474 for (i = 0; i < nelt2; ++i)
33476 remap[i + nelt2] = i * 2;
33477 remap[i + nelt + nelt2] = i * 2 + 1;
33478 dremap.perm[i * 2] = i + nelt2;
33479 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
33482 else if ((contents & (h1 | h4)) == contents)
33484 for (i = 0; i < nelt2; ++i)
33487 remap[i + nelt + nelt2] = i + nelt2;
33488 dremap.perm[i] = i;
33489 dremap.perm[i + nelt2] = i + nelt + nelt2;
33493 dremap.vmode = V2DImode;
33495 dremap.perm[0] = 0;
33496 dremap.perm[1] = 3;
33499 else if ((contents & (h2 | h3)) == contents)
33501 for (i = 0; i < nelt2; ++i)
33503 remap[i + nelt2] = i;
33504 remap[i + nelt] = i + nelt2;
33505 dremap.perm[i] = i + nelt2;
33506 dremap.perm[i + nelt2] = i + nelt;
33510 dremap.vmode = V2DImode;
33512 dremap.perm[0] = 1;
33513 dremap.perm[1] = 2;
33519 /* Use the remapping array set up above to move the elements from their
33520 swizzled locations into their final destinations. */
33522 for (i = 0; i < nelt; ++i)
33524 unsigned e = remap[d->perm[i]];
33525 gcc_assert (e < nelt);
33526 dfinal.perm[i] = e;
33528 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
33529 dfinal.op1 = dfinal.op0;
33530 dremap.target = dfinal.op0;
33532 /* Test if the final remap can be done with a single insn. For V4SFmode or
33533 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
33535 ok = expand_vec_perm_1 (&dfinal);
33536 seq = get_insns ();
33542 if (dremap.vmode != dfinal.vmode)
33544 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
33545 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
33546 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
33549 ok = expand_vec_perm_1 (&dremap);
33556 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
33557 permutation with two pshufb insns and an ior. We should have already
33558 failed all two instruction sequences. */
33561 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
33563 rtx rperm[2][16], vperm, l, h, op, m128;
33564 unsigned int i, nelt, eltsz;
33566 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
33568 gcc_assert (d->op0 != d->op1);
33571 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
33573 /* Generate two permutation masks. If the required element is within
33574 the given vector it is shuffled into the proper lane. If the required
33575 element is in the other vector, force a zero into the lane by setting
33576 bit 7 in the permutation mask. */
33577 m128 = GEN_INT (-128);
33578 for (i = 0; i < nelt; ++i)
33580 unsigned j, e = d->perm[i];
33581 unsigned which = (e >= nelt);
33585 for (j = 0; j < eltsz; ++j)
33587 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
33588 rperm[1-which][i*eltsz + j] = m128;
33592 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
33593 vperm = force_reg (V16QImode, vperm);
33595 l = gen_reg_rtx (V16QImode);
33596 op = gen_lowpart (V16QImode, d->op0);
33597 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
33599 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
33600 vperm = force_reg (V16QImode, vperm);
33602 h = gen_reg_rtx (V16QImode);
33603 op = gen_lowpart (V16QImode, d->op1);
33604 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
33606 op = gen_lowpart (V16QImode, d->target);
33607 emit_insn (gen_iorv16qi3 (op, l, h));
33612 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
33613 and extract-odd permutations. */
33616 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
33623 t1 = gen_reg_rtx (V4DFmode);
33624 t2 = gen_reg_rtx (V4DFmode);
33626 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
33627 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
33628 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
33630 /* Now an unpck[lh]pd will produce the result required. */
33632 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
33634 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
33640 int mask = odd ? 0xdd : 0x88;
33642 t1 = gen_reg_rtx (V8SFmode);
33643 t2 = gen_reg_rtx (V8SFmode);
33644 t3 = gen_reg_rtx (V8SFmode);
33646 /* Shuffle within the 128-bit lanes to produce:
33647 { 0 2 8 a 4 6 c e } | { 1 3 9 b 5 7 d f }. */
33648 emit_insn (gen_avx_shufps256 (t1, d->op0, d->op1,
33651 /* Shuffle the lanes around to produce:
33652 { 4 6 c e 0 2 8 a } and { 5 7 d f 1 3 9 b }. */
33653 emit_insn (gen_avx_vperm2f128v8sf3 (t2, t1, t1,
33656 /* Shuffle within the 128-bit lanes to produce:
33657 { 0 2 4 6 4 6 0 2 } | { 1 3 5 7 5 7 1 3 }. */
33658 emit_insn (gen_avx_shufps256 (t3, t1, t2, GEN_INT (0x44)));
33660 /* Shuffle within the 128-bit lanes to produce:
33661 { 8 a c e c e 8 a } | { 9 b d f d f 9 b }. */
33662 emit_insn (gen_avx_shufps256 (t2, t1, t2, GEN_INT (0xee)));
33664 /* Shuffle the lanes around to produce:
33665 { 0 2 4 6 8 a c e } | { 1 3 5 7 9 b d f }. */
33666 emit_insn (gen_avx_vperm2f128v8sf3 (d->target, t3, t2,
33675 /* These are always directly implementable by expand_vec_perm_1. */
33676 gcc_unreachable ();
33680 return expand_vec_perm_pshufb2 (d);
33683 /* We need 2*log2(N)-1 operations to achieve odd/even
33684 with interleave. */
33685 t1 = gen_reg_rtx (V8HImode);
33686 t2 = gen_reg_rtx (V8HImode);
33687 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
33688 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
33689 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
33690 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
33692 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
33694 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
33701 return expand_vec_perm_pshufb2 (d);
33704 t1 = gen_reg_rtx (V16QImode);
33705 t2 = gen_reg_rtx (V16QImode);
33706 t3 = gen_reg_rtx (V16QImode);
33707 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
33708 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
33709 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
33710 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
33711 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
33712 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
33714 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
33716 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
33722 gcc_unreachable ();
33728 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
33729 extract-even and extract-odd permutations. */
33732 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
33734 unsigned i, odd, nelt = d->nelt;
33737 if (odd != 0 && odd != 1)
33740 for (i = 1; i < nelt; ++i)
33741 if (d->perm[i] != 2 * i + odd)
33744 return expand_vec_perm_even_odd_1 (d, odd);
33747 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
33748 permutations. We assume that expand_vec_perm_1 has already failed. */
33751 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
33753 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
33754 enum machine_mode vmode = d->vmode;
33755 unsigned char perm2[4];
33763 /* These are special-cased in sse.md so that we can optionally
33764 use the vbroadcast instruction. They expand to two insns
33765 if the input happens to be in a register. */
33766 gcc_unreachable ();
33772 /* These are always implementable using standard shuffle patterns. */
33773 gcc_unreachable ();
33777 /* These can be implemented via interleave. We save one insn by
33778 stopping once we have promoted to V4SImode and then use pshufd. */
33781 optab otab = vec_interleave_low_optab;
33785 otab = vec_interleave_high_optab;
33790 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
33791 vmode = get_mode_wider_vector (vmode);
33792 op0 = gen_lowpart (vmode, op0);
33794 while (vmode != V4SImode);
33796 memset (perm2, elt, 4);
33797 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
33802 gcc_unreachable ();
33806 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
33807 broadcast permutations. */
33810 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
33812 unsigned i, elt, nelt = d->nelt;
33814 if (d->op0 != d->op1)
33818 for (i = 1; i < nelt; ++i)
33819 if (d->perm[i] != elt)
33822 return expand_vec_perm_broadcast_1 (d);
33825 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
33826 With all of the interface bits taken care of, perform the expansion
33827 in D and return true on success. */
33830 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
33832 /* Try a single instruction expansion. */
33833 if (expand_vec_perm_1 (d))
33836 /* Try sequences of two instructions. */
33838 if (expand_vec_perm_pshuflw_pshufhw (d))
33841 if (expand_vec_perm_palignr (d))
33844 if (expand_vec_perm_interleave2 (d))
33847 if (expand_vec_perm_broadcast (d))
33850 /* Try sequences of three instructions. */
33852 if (expand_vec_perm_pshufb2 (d))
33855 /* ??? Look for narrow permutations whose element orderings would
33856 allow the promotion to a wider mode. */
33858 /* ??? Look for sequences of interleave or a wider permute that place
33859 the data into the correct lanes for a half-vector shuffle like
33860 pshuf[lh]w or vpermilps. */
33862 /* ??? Look for sequences of interleave that produce the desired results.
33863 The combinatorics of punpck[lh] get pretty ugly... */
33865 if (expand_vec_perm_even_odd (d))
33871 /* Extract the values from the vector CST into the permutation array in D.
33872 Return 0 on error, 1 if all values from the permutation come from the
33873 first vector, 2 if all values from the second vector, and 3 otherwise. */
33876 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
33878 tree list = TREE_VECTOR_CST_ELTS (cst);
33879 unsigned i, nelt = d->nelt;
33882 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
33884 unsigned HOST_WIDE_INT e;
33886 if (!host_integerp (TREE_VALUE (list), 1))
33888 e = tree_low_cst (TREE_VALUE (list), 1);
33892 ret |= (e < nelt ? 1 : 2);
33895 gcc_assert (list == NULL);
33897 /* For all elements from second vector, fold the elements to first. */
33899 for (i = 0; i < nelt; ++i)
33900 d->perm[i] -= nelt;
33906 ix86_expand_vec_perm_builtin (tree exp)
33908 struct expand_vec_perm_d d;
33909 tree arg0, arg1, arg2;
33911 arg0 = CALL_EXPR_ARG (exp, 0);
33912 arg1 = CALL_EXPR_ARG (exp, 1);
33913 arg2 = CALL_EXPR_ARG (exp, 2);
33915 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
33916 d.nelt = GET_MODE_NUNITS (d.vmode);
33917 d.testing_p = false;
33918 gcc_assert (VECTOR_MODE_P (d.vmode));
33920 if (TREE_CODE (arg2) != VECTOR_CST)
33922 error_at (EXPR_LOCATION (exp),
33923 "vector permutation requires vector constant");
33927 switch (extract_vec_perm_cst (&d, arg2))
33933 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
33937 if (!operand_equal_p (arg0, arg1, 0))
33939 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
33940 d.op0 = force_reg (d.vmode, d.op0);
33941 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
33942 d.op1 = force_reg (d.vmode, d.op1);
33946 /* The elements of PERM do not suggest that only the first operand
33947 is used, but both operands are identical. Allow easier matching
33948 of the permutation by folding the permutation into the single
33951 unsigned i, nelt = d.nelt;
33952 for (i = 0; i < nelt; ++i)
33953 if (d.perm[i] >= nelt)
33959 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
33960 d.op0 = force_reg (d.vmode, d.op0);
33965 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
33966 d.op0 = force_reg (d.vmode, d.op0);
33971 d.target = gen_reg_rtx (d.vmode);
33972 if (ix86_expand_vec_perm_builtin_1 (&d))
33975 /* For compiler generated permutations, we should never got here, because
33976 the compiler should also be checking the ok hook. But since this is a
33977 builtin the user has access too, so don't abort. */
33981 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
33984 sorry ("vector permutation (%d %d %d %d)",
33985 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
33988 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
33989 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
33990 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
33993 sorry ("vector permutation "
33994 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
33995 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
33996 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
33997 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
33998 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
34001 gcc_unreachable ();
34004 return CONST0_RTX (d.vmode);
34007 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
34010 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
34012 struct expand_vec_perm_d d;
34016 d.vmode = TYPE_MODE (vec_type);
34017 d.nelt = GET_MODE_NUNITS (d.vmode);
34018 d.testing_p = true;
34020 /* Given sufficient ISA support we can just return true here
34021 for selected vector modes. */
34022 if (GET_MODE_SIZE (d.vmode) == 16)
34024 /* All implementable with a single vpperm insn. */
34027 /* All implementable with 2 pshufb + 1 ior. */
34030 /* All implementable with shufpd or unpck[lh]pd. */
34035 vec_mask = extract_vec_perm_cst (&d, mask);
34037 /* This hook is cannot be called in response to something that the
34038 user does (unlike the builtin expander) so we shouldn't ever see
34039 an error generated from the extract. */
34040 gcc_assert (vec_mask > 0 && vec_mask <= 3);
34041 one_vec = (vec_mask != 3);
34043 /* Implementable with shufps or pshufd. */
34044 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
34047 /* Otherwise we have to go through the motions and see if we can
34048 figure out how to generate the requested permutation. */
34049 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
34050 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
34052 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
34055 ret = ix86_expand_vec_perm_builtin_1 (&d);
34062 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
34064 struct expand_vec_perm_d d;
34070 d.vmode = GET_MODE (targ);
34071 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
34072 d.testing_p = false;
34074 for (i = 0; i < nelt; ++i)
34075 d.perm[i] = i * 2 + odd;
34077 /* We'll either be able to implement the permutation directly... */
34078 if (expand_vec_perm_1 (&d))
34081 /* ... or we use the special-case patterns. */
34082 expand_vec_perm_even_odd_1 (&d, odd);
34085 /* This function returns the calling abi specific va_list type node.
34086 It returns the FNDECL specific va_list type. */
34089 ix86_fn_abi_va_list (tree fndecl)
34092 return va_list_type_node;
34093 gcc_assert (fndecl != NULL_TREE);
34095 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
34096 return ms_va_list_type_node;
34098 return sysv_va_list_type_node;
34101 /* Returns the canonical va_list type specified by TYPE. If there
34102 is no valid TYPE provided, it return NULL_TREE. */
34105 ix86_canonical_va_list_type (tree type)
34109 /* Resolve references and pointers to va_list type. */
34110 if (TREE_CODE (type) == MEM_REF)
34111 type = TREE_TYPE (type);
34112 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
34113 type = TREE_TYPE (type);
34114 else if (POINTER_TYPE_P (type) && TREE_CODE (TREE_TYPE (type)) == ARRAY_TYPE)
34115 type = TREE_TYPE (type);
34117 if (TARGET_64BIT && va_list_type_node != NULL_TREE)
34119 wtype = va_list_type_node;
34120 gcc_assert (wtype != NULL_TREE);
34122 if (TREE_CODE (wtype) == ARRAY_TYPE)
34124 /* If va_list is an array type, the argument may have decayed
34125 to a pointer type, e.g. by being passed to another function.
34126 In that case, unwrap both types so that we can compare the
34127 underlying records. */
34128 if (TREE_CODE (htype) == ARRAY_TYPE
34129 || POINTER_TYPE_P (htype))
34131 wtype = TREE_TYPE (wtype);
34132 htype = TREE_TYPE (htype);
34135 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
34136 return va_list_type_node;
34137 wtype = sysv_va_list_type_node;
34138 gcc_assert (wtype != NULL_TREE);
34140 if (TREE_CODE (wtype) == ARRAY_TYPE)
34142 /* If va_list is an array type, the argument may have decayed
34143 to a pointer type, e.g. by being passed to another function.
34144 In that case, unwrap both types so that we can compare the
34145 underlying records. */
34146 if (TREE_CODE (htype) == ARRAY_TYPE
34147 || POINTER_TYPE_P (htype))
34149 wtype = TREE_TYPE (wtype);
34150 htype = TREE_TYPE (htype);
34153 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
34154 return sysv_va_list_type_node;
34155 wtype = ms_va_list_type_node;
34156 gcc_assert (wtype != NULL_TREE);
34158 if (TREE_CODE (wtype) == ARRAY_TYPE)
34160 /* If va_list is an array type, the argument may have decayed
34161 to a pointer type, e.g. by being passed to another function.
34162 In that case, unwrap both types so that we can compare the
34163 underlying records. */
34164 if (TREE_CODE (htype) == ARRAY_TYPE
34165 || POINTER_TYPE_P (htype))
34167 wtype = TREE_TYPE (wtype);
34168 htype = TREE_TYPE (htype);
34171 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
34172 return ms_va_list_type_node;
34175 return std_canonical_va_list_type (type);
34178 /* Iterate through the target-specific builtin types for va_list.
34179 IDX denotes the iterator, *PTREE is set to the result type of
34180 the va_list builtin, and *PNAME to its internal type.
34181 Returns zero if there is no element for this index, otherwise
34182 IDX should be increased upon the next call.
34183 Note, do not iterate a base builtin's name like __builtin_va_list.
34184 Used from c_common_nodes_and_builtins. */
34187 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
34197 *ptree = ms_va_list_type_node;
34198 *pname = "__builtin_ms_va_list";
34202 *ptree = sysv_va_list_type_node;
34203 *pname = "__builtin_sysv_va_list";
34211 #undef TARGET_SCHED_DISPATCH
34212 #define TARGET_SCHED_DISPATCH has_dispatch
34213 #undef TARGET_SCHED_DISPATCH_DO
34214 #define TARGET_SCHED_DISPATCH_DO do_dispatch
34216 /* The size of the dispatch window is the total number of bytes of
34217 object code allowed in a window. */
34218 #define DISPATCH_WINDOW_SIZE 16
34220 /* Number of dispatch windows considered for scheduling. */
34221 #define MAX_DISPATCH_WINDOWS 3
34223 /* Maximum number of instructions in a window. */
34226 /* Maximum number of immediate operands in a window. */
34229 /* Maximum number of immediate bits allowed in a window. */
34230 #define MAX_IMM_SIZE 128
34232 /* Maximum number of 32 bit immediates allowed in a window. */
34233 #define MAX_IMM_32 4
34235 /* Maximum number of 64 bit immediates allowed in a window. */
34236 #define MAX_IMM_64 2
34238 /* Maximum total of loads or prefetches allowed in a window. */
34241 /* Maximum total of stores allowed in a window. */
34242 #define MAX_STORE 1
34248 /* Dispatch groups. Istructions that affect the mix in a dispatch window. */
34249 enum dispatch_group {
34264 /* Number of allowable groups in a dispatch window. It is an array
34265 indexed by dispatch_group enum. 100 is used as a big number,
34266 because the number of these kind of operations does not have any
34267 effect in dispatch window, but we need them for other reasons in
34269 static unsigned int num_allowable_groups[disp_last] = {
34270 0, 2, 1, 1, 2, 4, 4, 2, 1, BIG, BIG
34273 char group_name[disp_last + 1][16] = {
34274 "disp_no_group", "disp_load", "disp_store", "disp_load_store",
34275 "disp_prefetch", "disp_imm", "disp_imm_32", "disp_imm_64",
34276 "disp_branch", "disp_cmp", "disp_jcc", "disp_last"
34279 /* Instruction path. */
34282 path_single, /* Single micro op. */
34283 path_double, /* Double micro op. */
34284 path_multi, /* Instructions with more than 2 micro op.. */
34288 /* sched_insn_info defines a window to the instructions scheduled in
34289 the basic block. It contains a pointer to the insn_info table and
34290 the instruction scheduled.
34292 Windows are allocated for each basic block and are linked
34294 typedef struct sched_insn_info_s {
34296 enum dispatch_group group;
34297 enum insn_path path;
34302 /* Linked list of dispatch windows. This is a two way list of
34303 dispatch windows of a basic block. It contains information about
34304 the number of uops in the window and the total number of
34305 instructions and of bytes in the object code for this dispatch
34307 typedef struct dispatch_windows_s {
34308 int num_insn; /* Number of insn in the window. */
34309 int num_uops; /* Number of uops in the window. */
34310 int window_size; /* Number of bytes in the window. */
34311 int window_num; /* Window number between 0 or 1. */
34312 int num_imm; /* Number of immediates in an insn. */
34313 int num_imm_32; /* Number of 32 bit immediates in an insn. */
34314 int num_imm_64; /* Number of 64 bit immediates in an insn. */
34315 int imm_size; /* Total immediates in the window. */
34316 int num_loads; /* Total memory loads in the window. */
34317 int num_stores; /* Total memory stores in the window. */
34318 int violation; /* Violation exists in window. */
34319 sched_insn_info *window; /* Pointer to the window. */
34320 struct dispatch_windows_s *next;
34321 struct dispatch_windows_s *prev;
34322 } dispatch_windows;
34324 /* Immediate valuse used in an insn. */
34325 typedef struct imm_info_s
34332 static dispatch_windows *dispatch_window_list;
34333 static dispatch_windows *dispatch_window_list1;
34335 /* Get dispatch group of insn. */
34337 static enum dispatch_group
34338 get_mem_group (rtx insn)
34340 enum attr_memory memory;
34342 if (INSN_CODE (insn) < 0)
34343 return disp_no_group;
34344 memory = get_attr_memory (insn);
34345 if (memory == MEMORY_STORE)
34348 if (memory == MEMORY_LOAD)
34351 if (memory == MEMORY_BOTH)
34352 return disp_load_store;
34354 return disp_no_group;
34357 /* Return true if insn is a compare instruction. */
34362 enum attr_type type;
34364 type = get_attr_type (insn);
34365 return (type == TYPE_TEST
34366 || type == TYPE_ICMP
34367 || type == TYPE_FCMP
34368 || GET_CODE (PATTERN (insn)) == COMPARE);
34371 /* Return true if a dispatch violation encountered. */
34374 dispatch_violation (void)
34376 if (dispatch_window_list->next)
34377 return dispatch_window_list->next->violation;
34378 return dispatch_window_list->violation;
34381 /* Return true if insn is a branch instruction. */
34384 is_branch (rtx insn)
34386 return (CALL_P (insn) || JUMP_P (insn));
34389 /* Return true if insn is a prefetch instruction. */
34392 is_prefetch (rtx insn)
34394 return NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == PREFETCH;
34397 /* This function initializes a dispatch window and the list container holding a
34398 pointer to the window. */
34401 init_window (int window_num)
34404 dispatch_windows *new_list;
34406 if (window_num == 0)
34407 new_list = dispatch_window_list;
34409 new_list = dispatch_window_list1;
34411 new_list->num_insn = 0;
34412 new_list->num_uops = 0;
34413 new_list->window_size = 0;
34414 new_list->next = NULL;
34415 new_list->prev = NULL;
34416 new_list->window_num = window_num;
34417 new_list->num_imm = 0;
34418 new_list->num_imm_32 = 0;
34419 new_list->num_imm_64 = 0;
34420 new_list->imm_size = 0;
34421 new_list->num_loads = 0;
34422 new_list->num_stores = 0;
34423 new_list->violation = false;
34425 for (i = 0; i < MAX_INSN; i++)
34427 new_list->window[i].insn = NULL;
34428 new_list->window[i].group = disp_no_group;
34429 new_list->window[i].path = no_path;
34430 new_list->window[i].byte_len = 0;
34431 new_list->window[i].imm_bytes = 0;
34436 /* This function allocates and initializes a dispatch window and the
34437 list container holding a pointer to the window. */
34439 static dispatch_windows *
34440 allocate_window (void)
34442 dispatch_windows *new_list = XNEW (struct dispatch_windows_s);
34443 new_list->window = XNEWVEC (struct sched_insn_info_s, MAX_INSN + 1);
34448 /* This routine initializes the dispatch scheduling information. It
34449 initiates building dispatch scheduler tables and constructs the
34450 first dispatch window. */
34453 init_dispatch_sched (void)
34455 /* Allocate a dispatch list and a window. */
34456 dispatch_window_list = allocate_window ();
34457 dispatch_window_list1 = allocate_window ();
34462 /* This function returns true if a branch is detected. End of a basic block
34463 does not have to be a branch, but here we assume only branches end a
34467 is_end_basic_block (enum dispatch_group group)
34469 return group == disp_branch;
34472 /* This function is called when the end of a window processing is reached. */
34475 process_end_window (void)
34477 gcc_assert (dispatch_window_list->num_insn <= MAX_INSN);
34478 if (dispatch_window_list->next)
34480 gcc_assert (dispatch_window_list1->num_insn <= MAX_INSN);
34481 gcc_assert (dispatch_window_list->window_size
34482 + dispatch_window_list1->window_size <= 48);
34488 /* Allocates a new dispatch window and adds it to WINDOW_LIST.
34489 WINDOW_NUM is either 0 or 1. A maximum of two windows are generated
34490 for 48 bytes of instructions. Note that these windows are not dispatch
34491 windows that their sizes are DISPATCH_WINDOW_SIZE. */
34493 static dispatch_windows *
34494 allocate_next_window (int window_num)
34496 if (window_num == 0)
34498 if (dispatch_window_list->next)
34501 return dispatch_window_list;
34504 dispatch_window_list->next = dispatch_window_list1;
34505 dispatch_window_list1->prev = dispatch_window_list;
34507 return dispatch_window_list1;
34510 /* Increment the number of immediate operands of an instruction. */
34513 find_constant_1 (rtx *in_rtx, imm_info *imm_values)
34518 switch ( GET_CODE (*in_rtx))
34523 (imm_values->imm)++;
34524 if (x86_64_immediate_operand (*in_rtx, SImode))
34525 (imm_values->imm32)++;
34527 (imm_values->imm64)++;
34531 (imm_values->imm)++;
34532 (imm_values->imm64)++;
34536 if (LABEL_KIND (*in_rtx) == LABEL_NORMAL)
34538 (imm_values->imm)++;
34539 (imm_values->imm32)++;
34550 /* Compute number of immediate operands of an instruction. */
34553 find_constant (rtx in_rtx, imm_info *imm_values)
34555 for_each_rtx (INSN_P (in_rtx) ? &PATTERN (in_rtx) : &in_rtx,
34556 (rtx_function) find_constant_1, (void *) imm_values);
34559 /* Return total size of immediate operands of an instruction along with number
34560 of corresponding immediate-operands. It initializes its parameters to zero
34561 befor calling FIND_CONSTANT.
34562 INSN is the input instruction. IMM is the total of immediates.
34563 IMM32 is the number of 32 bit immediates. IMM64 is the number of 64
34567 get_num_immediates (rtx insn, int *imm, int *imm32, int *imm64)
34569 imm_info imm_values = {0, 0, 0};
34571 find_constant (insn, &imm_values);
34572 *imm = imm_values.imm;
34573 *imm32 = imm_values.imm32;
34574 *imm64 = imm_values.imm64;
34575 return imm_values.imm32 * 4 + imm_values.imm64 * 8;
34578 /* This function indicates if an operand of an instruction is an
34582 has_immediate (rtx insn)
34584 int num_imm_operand;
34585 int num_imm32_operand;
34586 int num_imm64_operand;
34589 return get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34590 &num_imm64_operand);
34594 /* Return single or double path for instructions. */
34596 static enum insn_path
34597 get_insn_path (rtx insn)
34599 enum attr_amdfam10_decode path = get_attr_amdfam10_decode (insn);
34601 if ((int)path == 0)
34602 return path_single;
34604 if ((int)path == 1)
34605 return path_double;
34610 /* Return insn dispatch group. */
34612 static enum dispatch_group
34613 get_insn_group (rtx insn)
34615 enum dispatch_group group = get_mem_group (insn);
34619 if (is_branch (insn))
34620 return disp_branch;
34625 if (has_immediate (insn))
34628 if (is_prefetch (insn))
34629 return disp_prefetch;
34631 return disp_no_group;
34634 /* Count number of GROUP restricted instructions in a dispatch
34635 window WINDOW_LIST. */
34638 count_num_restricted (rtx insn, dispatch_windows *window_list)
34640 enum dispatch_group group = get_insn_group (insn);
34642 int num_imm_operand;
34643 int num_imm32_operand;
34644 int num_imm64_operand;
34646 if (group == disp_no_group)
34649 if (group == disp_imm)
34651 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34652 &num_imm64_operand);
34653 if (window_list->imm_size + imm_size > MAX_IMM_SIZE
34654 || num_imm_operand + window_list->num_imm > MAX_IMM
34655 || (num_imm32_operand > 0
34656 && (window_list->num_imm_32 + num_imm32_operand > MAX_IMM_32
34657 || window_list->num_imm_64 * 2 + num_imm32_operand > MAX_IMM_32))
34658 || (num_imm64_operand > 0
34659 && (window_list->num_imm_64 + num_imm64_operand > MAX_IMM_64
34660 || window_list->num_imm_32 + num_imm64_operand * 2 > MAX_IMM_32))
34661 || (window_list->imm_size + imm_size == MAX_IMM_SIZE
34662 && num_imm64_operand > 0
34663 && ((window_list->num_imm_64 > 0
34664 && window_list->num_insn >= 2)
34665 || window_list->num_insn >= 3)))
34671 if ((group == disp_load_store
34672 && (window_list->num_loads >= MAX_LOAD
34673 || window_list->num_stores >= MAX_STORE))
34674 || ((group == disp_load
34675 || group == disp_prefetch)
34676 && window_list->num_loads >= MAX_LOAD)
34677 || (group == disp_store
34678 && window_list->num_stores >= MAX_STORE))
34684 /* This function returns true if insn satisfies dispatch rules on the
34685 last window scheduled. */
34688 fits_dispatch_window (rtx insn)
34690 dispatch_windows *window_list = dispatch_window_list;
34691 dispatch_windows *window_list_next = dispatch_window_list->next;
34692 unsigned int num_restrict;
34693 enum dispatch_group group = get_insn_group (insn);
34694 enum insn_path path = get_insn_path (insn);
34697 /* Make disp_cmp and disp_jcc get scheduled at the latest. These
34698 instructions should be given the lowest priority in the
34699 scheduling process in Haifa scheduler to make sure they will be
34700 scheduled in the same dispatch window as the refrence to them. */
34701 if (group == disp_jcc || group == disp_cmp)
34704 /* Check nonrestricted. */
34705 if (group == disp_no_group || group == disp_branch)
34708 /* Get last dispatch window. */
34709 if (window_list_next)
34710 window_list = window_list_next;
34712 if (window_list->window_num == 1)
34714 sum = window_list->prev->window_size + window_list->window_size;
34717 || (min_insn_size (insn) + sum) >= 48)
34718 /* Window 1 is full. Go for next window. */
34722 num_restrict = count_num_restricted (insn, window_list);
34724 if (num_restrict > num_allowable_groups[group])
34727 /* See if it fits in the first window. */
34728 if (window_list->window_num == 0)
34730 /* The first widow should have only single and double path
34732 if (path == path_double
34733 && (window_list->num_uops + 2) > MAX_INSN)
34735 else if (path != path_single)
34741 /* Add an instruction INSN with NUM_UOPS micro-operations to the
34742 dispatch window WINDOW_LIST. */
34745 add_insn_window (rtx insn, dispatch_windows *window_list, int num_uops)
34747 int byte_len = min_insn_size (insn);
34748 int num_insn = window_list->num_insn;
34750 sched_insn_info *window = window_list->window;
34751 enum dispatch_group group = get_insn_group (insn);
34752 enum insn_path path = get_insn_path (insn);
34753 int num_imm_operand;
34754 int num_imm32_operand;
34755 int num_imm64_operand;
34757 if (!window_list->violation && group != disp_cmp
34758 && !fits_dispatch_window (insn))
34759 window_list->violation = true;
34761 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34762 &num_imm64_operand);
34764 /* Initialize window with new instruction. */
34765 window[num_insn].insn = insn;
34766 window[num_insn].byte_len = byte_len;
34767 window[num_insn].group = group;
34768 window[num_insn].path = path;
34769 window[num_insn].imm_bytes = imm_size;
34771 window_list->window_size += byte_len;
34772 window_list->num_insn = num_insn + 1;
34773 window_list->num_uops = window_list->num_uops + num_uops;
34774 window_list->imm_size += imm_size;
34775 window_list->num_imm += num_imm_operand;
34776 window_list->num_imm_32 += num_imm32_operand;
34777 window_list->num_imm_64 += num_imm64_operand;
34779 if (group == disp_store)
34780 window_list->num_stores += 1;
34781 else if (group == disp_load
34782 || group == disp_prefetch)
34783 window_list->num_loads += 1;
34784 else if (group == disp_load_store)
34786 window_list->num_stores += 1;
34787 window_list->num_loads += 1;
34791 /* Adds a scheduled instruction, INSN, to the current dispatch window.
34792 If the total bytes of instructions or the number of instructions in
34793 the window exceed allowable, it allocates a new window. */
34796 add_to_dispatch_window (rtx insn)
34799 dispatch_windows *window_list;
34800 dispatch_windows *next_list;
34801 dispatch_windows *window0_list;
34802 enum insn_path path;
34803 enum dispatch_group insn_group;
34811 if (INSN_CODE (insn) < 0)
34814 byte_len = min_insn_size (insn);
34815 window_list = dispatch_window_list;
34816 next_list = window_list->next;
34817 path = get_insn_path (insn);
34818 insn_group = get_insn_group (insn);
34820 /* Get the last dispatch window. */
34822 window_list = dispatch_window_list->next;
34824 if (path == path_single)
34826 else if (path == path_double)
34829 insn_num_uops = (int) path;
34831 /* If current window is full, get a new window.
34832 Window number zero is full, if MAX_INSN uops are scheduled in it.
34833 Window number one is full, if window zero's bytes plus window
34834 one's bytes is 32, or if the bytes of the new instruction added
34835 to the total makes it greater than 48, or it has already MAX_INSN
34836 instructions in it. */
34837 num_insn = window_list->num_insn;
34838 num_uops = window_list->num_uops;
34839 window_num = window_list->window_num;
34840 insn_fits = fits_dispatch_window (insn);
34842 if (num_insn >= MAX_INSN
34843 || num_uops + insn_num_uops > MAX_INSN
34846 window_num = ~window_num & 1;
34847 window_list = allocate_next_window (window_num);
34850 if (window_num == 0)
34852 add_insn_window (insn, window_list, insn_num_uops);
34853 if (window_list->num_insn >= MAX_INSN
34854 && insn_group == disp_branch)
34856 process_end_window ();
34860 else if (window_num == 1)
34862 window0_list = window_list->prev;
34863 sum = window0_list->window_size + window_list->window_size;
34865 || (byte_len + sum) >= 48)
34867 process_end_window ();
34868 window_list = dispatch_window_list;
34871 add_insn_window (insn, window_list, insn_num_uops);
34874 gcc_unreachable ();
34876 if (is_end_basic_block (insn_group))
34878 /* End of basic block is reached do end-basic-block process. */
34879 process_end_window ();
34884 /* Print the dispatch window, WINDOW_NUM, to FILE. */
34886 DEBUG_FUNCTION static void
34887 debug_dispatch_window_file (FILE *file, int window_num)
34889 dispatch_windows *list;
34892 if (window_num == 0)
34893 list = dispatch_window_list;
34895 list = dispatch_window_list1;
34897 fprintf (file, "Window #%d:\n", list->window_num);
34898 fprintf (file, " num_insn = %d, num_uops = %d, window_size = %d\n",
34899 list->num_insn, list->num_uops, list->window_size);
34900 fprintf (file, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
34901 list->num_imm, list->num_imm_32, list->num_imm_64, list->imm_size);
34903 fprintf (file, " num_loads = %d, num_stores = %d\n", list->num_loads,
34905 fprintf (file, " insn info:\n");
34907 for (i = 0; i < MAX_INSN; i++)
34909 if (!list->window[i].insn)
34911 fprintf (file, " group[%d] = %s, insn[%d] = %p, path[%d] = %d byte_len[%d] = %d, imm_bytes[%d] = %d\n",
34912 i, group_name[list->window[i].group],
34913 i, (void *)list->window[i].insn,
34914 i, list->window[i].path,
34915 i, list->window[i].byte_len,
34916 i, list->window[i].imm_bytes);
34920 /* Print to stdout a dispatch window. */
34922 DEBUG_FUNCTION void
34923 debug_dispatch_window (int window_num)
34925 debug_dispatch_window_file (stdout, window_num);
34928 /* Print INSN dispatch information to FILE. */
34930 DEBUG_FUNCTION static void
34931 debug_insn_dispatch_info_file (FILE *file, rtx insn)
34934 enum insn_path path;
34935 enum dispatch_group group;
34937 int num_imm_operand;
34938 int num_imm32_operand;
34939 int num_imm64_operand;
34941 if (INSN_CODE (insn) < 0)
34944 byte_len = min_insn_size (insn);
34945 path = get_insn_path (insn);
34946 group = get_insn_group (insn);
34947 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34948 &num_imm64_operand);
34950 fprintf (file, " insn info:\n");
34951 fprintf (file, " group = %s, path = %d, byte_len = %d\n",
34952 group_name[group], path, byte_len);
34953 fprintf (file, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
34954 num_imm_operand, num_imm32_operand, num_imm64_operand, imm_size);
34957 /* Print to STDERR the status of the ready list with respect to
34958 dispatch windows. */
34960 DEBUG_FUNCTION void
34961 debug_ready_dispatch (void)
34964 int no_ready = number_in_ready ();
34966 fprintf (stdout, "Number of ready: %d\n", no_ready);
34968 for (i = 0; i < no_ready; i++)
34969 debug_insn_dispatch_info_file (stdout, get_ready_element (i));
34972 /* This routine is the driver of the dispatch scheduler. */
34975 do_dispatch (rtx insn, int mode)
34977 if (mode == DISPATCH_INIT)
34978 init_dispatch_sched ();
34979 else if (mode == ADD_TO_DISPATCH_WINDOW)
34980 add_to_dispatch_window (insn);
34983 /* Return TRUE if Dispatch Scheduling is supported. */
34986 has_dispatch (rtx insn, int action)
34988 if (ix86_tune == PROCESSOR_BDVER1 && flag_dispatch_scheduler)
34994 case IS_DISPATCH_ON:
34999 return is_cmp (insn);
35001 case DISPATCH_VIOLATION:
35002 return dispatch_violation ();
35004 case FITS_DISPATCH_WINDOW:
35005 return fits_dispatch_window (insn);
35011 /* ??? No autovectorization into MMX or 3DNOW until we can reliably
35012 place emms and femms instructions. */
35014 static enum machine_mode
35015 ix86_preferred_simd_mode (enum machine_mode mode)
35017 /* Disable double precision vectorizer if needed. */
35018 if (mode == DFmode && !TARGET_VECTORIZE_DOUBLE)
35021 if (!TARGET_AVX && !TARGET_SSE)
35027 return (TARGET_AVX && !TARGET_PREFER_AVX128) ? V8SFmode : V4SFmode;
35029 return (TARGET_AVX && !TARGET_PREFER_AVX128) ? V4DFmode : V2DFmode;
35045 /* If AVX is enabled then try vectorizing with both 256bit and 128bit
35048 static unsigned int
35049 ix86_autovectorize_vector_sizes (void)
35051 return (TARGET_AVX && !TARGET_PREFER_AVX128) ? 32 | 16 : 0;
35054 /* Initialize the GCC target structure. */
35055 #undef TARGET_RETURN_IN_MEMORY
35056 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
35058 #undef TARGET_LEGITIMIZE_ADDRESS
35059 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
35061 #undef TARGET_ATTRIBUTE_TABLE
35062 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
35063 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
35064 # undef TARGET_MERGE_DECL_ATTRIBUTES
35065 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
35068 #undef TARGET_COMP_TYPE_ATTRIBUTES
35069 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
35071 #undef TARGET_INIT_BUILTINS
35072 #define TARGET_INIT_BUILTINS ix86_init_builtins
35073 #undef TARGET_BUILTIN_DECL
35074 #define TARGET_BUILTIN_DECL ix86_builtin_decl
35075 #undef TARGET_EXPAND_BUILTIN
35076 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
35078 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
35079 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
35080 ix86_builtin_vectorized_function
35082 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
35083 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
35085 #undef TARGET_BUILTIN_RECIPROCAL
35086 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
35088 #undef TARGET_ASM_FUNCTION_EPILOGUE
35089 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
35091 #undef TARGET_ENCODE_SECTION_INFO
35092 #ifndef SUBTARGET_ENCODE_SECTION_INFO
35093 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
35095 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
35098 #undef TARGET_ASM_OPEN_PAREN
35099 #define TARGET_ASM_OPEN_PAREN ""
35100 #undef TARGET_ASM_CLOSE_PAREN
35101 #define TARGET_ASM_CLOSE_PAREN ""
35103 #undef TARGET_ASM_BYTE_OP
35104 #define TARGET_ASM_BYTE_OP ASM_BYTE
35106 #undef TARGET_ASM_ALIGNED_HI_OP
35107 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
35108 #undef TARGET_ASM_ALIGNED_SI_OP
35109 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
35111 #undef TARGET_ASM_ALIGNED_DI_OP
35112 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
35115 #undef TARGET_PROFILE_BEFORE_PROLOGUE
35116 #define TARGET_PROFILE_BEFORE_PROLOGUE ix86_profile_before_prologue
35118 #undef TARGET_ASM_UNALIGNED_HI_OP
35119 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
35120 #undef TARGET_ASM_UNALIGNED_SI_OP
35121 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
35122 #undef TARGET_ASM_UNALIGNED_DI_OP
35123 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
35125 #undef TARGET_PRINT_OPERAND
35126 #define TARGET_PRINT_OPERAND ix86_print_operand
35127 #undef TARGET_PRINT_OPERAND_ADDRESS
35128 #define TARGET_PRINT_OPERAND_ADDRESS ix86_print_operand_address
35129 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
35130 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ix86_print_operand_punct_valid_p
35131 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
35132 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA i386_asm_output_addr_const_extra
35134 #undef TARGET_SCHED_INIT_GLOBAL
35135 #define TARGET_SCHED_INIT_GLOBAL ix86_sched_init_global
35136 #undef TARGET_SCHED_ADJUST_COST
35137 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
35138 #undef TARGET_SCHED_ISSUE_RATE
35139 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
35140 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
35141 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
35142 ia32_multipass_dfa_lookahead
35144 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
35145 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
35148 #undef TARGET_HAVE_TLS
35149 #define TARGET_HAVE_TLS true
35151 #undef TARGET_CANNOT_FORCE_CONST_MEM
35152 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
35153 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
35154 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
35156 #undef TARGET_DELEGITIMIZE_ADDRESS
35157 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
35159 #undef TARGET_MS_BITFIELD_LAYOUT_P
35160 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
35163 #undef TARGET_BINDS_LOCAL_P
35164 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
35166 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
35167 #undef TARGET_BINDS_LOCAL_P
35168 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
35171 #undef TARGET_ASM_OUTPUT_MI_THUNK
35172 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
35173 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
35174 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
35176 #undef TARGET_ASM_FILE_START
35177 #define TARGET_ASM_FILE_START x86_file_start
35179 #undef TARGET_DEFAULT_TARGET_FLAGS
35180 #define TARGET_DEFAULT_TARGET_FLAGS \
35182 | TARGET_SUBTARGET_DEFAULT \
35183 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
35185 #undef TARGET_HANDLE_OPTION
35186 #define TARGET_HANDLE_OPTION ix86_handle_option
35188 #undef TARGET_OPTION_OVERRIDE
35189 #define TARGET_OPTION_OVERRIDE ix86_option_override
35190 #undef TARGET_OPTION_OPTIMIZATION_TABLE
35191 #define TARGET_OPTION_OPTIMIZATION_TABLE ix86_option_optimization_table
35192 #undef TARGET_OPTION_INIT_STRUCT
35193 #define TARGET_OPTION_INIT_STRUCT ix86_option_init_struct
35195 #undef TARGET_REGISTER_MOVE_COST
35196 #define TARGET_REGISTER_MOVE_COST ix86_register_move_cost
35197 #undef TARGET_MEMORY_MOVE_COST
35198 #define TARGET_MEMORY_MOVE_COST ix86_memory_move_cost
35199 #undef TARGET_RTX_COSTS
35200 #define TARGET_RTX_COSTS ix86_rtx_costs
35201 #undef TARGET_ADDRESS_COST
35202 #define TARGET_ADDRESS_COST ix86_address_cost
35204 #undef TARGET_FIXED_CONDITION_CODE_REGS
35205 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
35206 #undef TARGET_CC_MODES_COMPATIBLE
35207 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
35209 #undef TARGET_MACHINE_DEPENDENT_REORG
35210 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
35212 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
35213 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
35215 #undef TARGET_BUILD_BUILTIN_VA_LIST
35216 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
35218 #undef TARGET_ENUM_VA_LIST_P
35219 #define TARGET_ENUM_VA_LIST_P ix86_enum_va_list
35221 #undef TARGET_FN_ABI_VA_LIST
35222 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
35224 #undef TARGET_CANONICAL_VA_LIST_TYPE
35225 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
35227 #undef TARGET_EXPAND_BUILTIN_VA_START
35228 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
35230 #undef TARGET_MD_ASM_CLOBBERS
35231 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
35233 #undef TARGET_PROMOTE_PROTOTYPES
35234 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
35235 #undef TARGET_STRUCT_VALUE_RTX
35236 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
35237 #undef TARGET_SETUP_INCOMING_VARARGS
35238 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
35239 #undef TARGET_MUST_PASS_IN_STACK
35240 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
35241 #undef TARGET_FUNCTION_ARG_ADVANCE
35242 #define TARGET_FUNCTION_ARG_ADVANCE ix86_function_arg_advance
35243 #undef TARGET_FUNCTION_ARG
35244 #define TARGET_FUNCTION_ARG ix86_function_arg
35245 #undef TARGET_FUNCTION_ARG_BOUNDARY
35246 #define TARGET_FUNCTION_ARG_BOUNDARY ix86_function_arg_boundary
35247 #undef TARGET_PASS_BY_REFERENCE
35248 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
35249 #undef TARGET_INTERNAL_ARG_POINTER
35250 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
35251 #undef TARGET_UPDATE_STACK_BOUNDARY
35252 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
35253 #undef TARGET_GET_DRAP_RTX
35254 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
35255 #undef TARGET_STRICT_ARGUMENT_NAMING
35256 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
35257 #undef TARGET_STATIC_CHAIN
35258 #define TARGET_STATIC_CHAIN ix86_static_chain
35259 #undef TARGET_TRAMPOLINE_INIT
35260 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
35261 #undef TARGET_RETURN_POPS_ARGS
35262 #define TARGET_RETURN_POPS_ARGS ix86_return_pops_args
35264 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
35265 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
35267 #undef TARGET_SCALAR_MODE_SUPPORTED_P
35268 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
35270 #undef TARGET_VECTOR_MODE_SUPPORTED_P
35271 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
35273 #undef TARGET_C_MODE_FOR_SUFFIX
35274 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
35277 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
35278 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
35281 #ifdef SUBTARGET_INSERT_ATTRIBUTES
35282 #undef TARGET_INSERT_ATTRIBUTES
35283 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
35286 #undef TARGET_MANGLE_TYPE
35287 #define TARGET_MANGLE_TYPE ix86_mangle_type
35289 #undef TARGET_STACK_PROTECT_FAIL
35290 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
35292 #undef TARGET_SUPPORTS_SPLIT_STACK
35293 #define TARGET_SUPPORTS_SPLIT_STACK ix86_supports_split_stack
35295 #undef TARGET_FUNCTION_VALUE
35296 #define TARGET_FUNCTION_VALUE ix86_function_value
35298 #undef TARGET_FUNCTION_VALUE_REGNO_P
35299 #define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
35301 #undef TARGET_SECONDARY_RELOAD
35302 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
35304 #undef TARGET_PREFERRED_RELOAD_CLASS
35305 #define TARGET_PREFERRED_RELOAD_CLASS ix86_preferred_reload_class
35306 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
35307 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS ix86_preferred_output_reload_class
35308 #undef TARGET_CLASS_LIKELY_SPILLED_P
35309 #define TARGET_CLASS_LIKELY_SPILLED_P ix86_class_likely_spilled_p
35311 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
35312 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
35313 ix86_builtin_vectorization_cost
35314 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
35315 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
35316 ix86_vectorize_builtin_vec_perm
35317 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
35318 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
35319 ix86_vectorize_builtin_vec_perm_ok
35320 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
35321 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
35322 ix86_preferred_simd_mode
35323 #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
35324 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
35325 ix86_autovectorize_vector_sizes
35327 #undef TARGET_SET_CURRENT_FUNCTION
35328 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
35330 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
35331 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
35333 #undef TARGET_OPTION_SAVE
35334 #define TARGET_OPTION_SAVE ix86_function_specific_save
35336 #undef TARGET_OPTION_RESTORE
35337 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
35339 #undef TARGET_OPTION_PRINT
35340 #define TARGET_OPTION_PRINT ix86_function_specific_print
35342 #undef TARGET_CAN_INLINE_P
35343 #define TARGET_CAN_INLINE_P ix86_can_inline_p
35345 #undef TARGET_EXPAND_TO_RTL_HOOK
35346 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
35348 #undef TARGET_LEGITIMATE_ADDRESS_P
35349 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
35351 #undef TARGET_IRA_COVER_CLASSES
35352 #define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
35354 #undef TARGET_FRAME_POINTER_REQUIRED
35355 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
35357 #undef TARGET_CAN_ELIMINATE
35358 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
35360 #undef TARGET_EXTRA_LIVE_ON_ENTRY
35361 #define TARGET_EXTRA_LIVE_ON_ENTRY ix86_live_on_entry
35363 #undef TARGET_ASM_CODE_END
35364 #define TARGET_ASM_CODE_END ix86_code_end
35366 #undef TARGET_CONDITIONAL_REGISTER_USAGE
35367 #define TARGET_CONDITIONAL_REGISTER_USAGE ix86_conditional_register_usage
35370 #undef TARGET_INIT_LIBFUNCS
35371 #define TARGET_INIT_LIBFUNCS darwin_rename_builtins
35374 struct gcc_target targetm = TARGET_INITIALIZER;
35376 #include "gt-i386.h"